mirror of
https://github.com/project-zot/zot.git
synced 2024-12-16 21:56:37 -05:00
fac1d1d05d
1. chore(trivy): update trivy library version The trivy team switched github.com/urfave/cli for viper so there are some other code changes as well. Since we don't use github.com/urfave/cli directly in our software we needed to add a tools.go in order for "go mod tidy" to not delete it. See this pattern explained in: - https://github.com/99designs/gqlgen#quick-start - https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module - https://github.com/go-modules-by-example/index/blob/master/010_tools/README.md#walk-through The jobs using "go get -u" have been updated to use "go install", since go get modifies the go.mod by upgrading some of the packages, but downgrading trivy to an older version with broken dependencies 2. fix(storage) Update local storage to ignore folder names not compliant with dist spec Also updated trivy to download the DB and cache results under the rootDir/_trivy folder 3. fix(s3): one of the s3 tests was missing the skipIt call This caused a failure when running locally without s3 being available 4. make sure the offline scanning is enabled, and zot only downloads the trivy DB on the regular schedule, and doesn't download the DB on every image scan ci: increase build and test timeout as tests are reaching the limit more often Signed-off-by: Andrei Aaron <aaaron@luxoft.com>
178 lines
6.3 KiB
YAML
178 lines
6.3 KiB
YAML
name: "Clustering test"
|
|
on:
|
|
push:
|
|
branches:
|
|
- main
|
|
pull_request:
|
|
branches: [main]
|
|
release:
|
|
types:
|
|
- published
|
|
|
|
permissions: read-all
|
|
|
|
jobs:
|
|
client-tools:
|
|
name: Stateless zot with shared reliable storage
|
|
runs-on: ubuntu-22.04
|
|
# services:
|
|
# minio:
|
|
# image: minio/minio:edge-cicd
|
|
# env:
|
|
# MINIO_ROOT_USER: minioadmin
|
|
# MINIO_ROOT_PASSWORD: minioadmin
|
|
# ports:
|
|
# - 9000:9000
|
|
# volumes:
|
|
# - /tmp/data:/data
|
|
# options: --name=minio --health-cmd "curl http://localhost:9000/minio/health/live"
|
|
steps:
|
|
- uses: actions/checkout@v3
|
|
- uses: actions/setup-go@v3
|
|
with:
|
|
go-version: 1.19.x
|
|
- name: Install dependencies
|
|
run: |
|
|
cd $GITHUB_WORKSPACE
|
|
go install github.com/swaggo/swag/cmd/swag
|
|
go mod download
|
|
sudo apt-get update
|
|
sudo apt-get -y install rpm uidmap
|
|
# install skopeo
|
|
sudo apt-get -y install skopeo
|
|
|
|
# install haproxy
|
|
sudo apt-get install haproxy
|
|
|
|
- name: Setup minio service
|
|
run: |
|
|
docker run -d -p 9000:9000 --name minio \
|
|
-e "MINIO_ACCESS_KEY=minioadmin" \
|
|
-e "MINIO_SECRET_KEY=minioadmin" \
|
|
-v /tmp/data:/data \
|
|
-v /tmp/config:/root/.minio \
|
|
--health-cmd "curl http://localhost:9000/minio/health/live" \
|
|
minio/minio:edge-cicd server /data
|
|
- name: Install py minio
|
|
run: pip3 install minio
|
|
|
|
- name: Wait for minio to come up
|
|
run: |
|
|
curl --connect-timeout 5 \
|
|
--max-time 10 \
|
|
--retry 12 \
|
|
--retry-max-time 120 \
|
|
'http://localhost:9000/minio/health/live'
|
|
|
|
- name: Create minio bucket
|
|
run: |
|
|
python3 - <<'EOF'
|
|
from minio import Minio
|
|
|
|
try:
|
|
minio = Minio(
|
|
'localhost:9000',
|
|
access_key='minioadmin',
|
|
secret_key='minioadmin',
|
|
secure=False
|
|
)
|
|
except Exception as ex:
|
|
raise
|
|
|
|
minio.make_bucket('zot-storage')
|
|
print(f'{minio.list_buckets()}')
|
|
EOF
|
|
|
|
- name: Run haproxy
|
|
run: |
|
|
sudo haproxy -d -f examples/cluster/haproxy.cfg -D
|
|
sleep 10
|
|
|
|
- name: Prepare configuration files
|
|
run: |
|
|
cp test/cluster/config-minio.json test/cluster/config-minio1.json
|
|
sed -i 's/8081/8082/g' test/cluster/config-minio.json
|
|
cp test/cluster/config-minio.json test/cluster/config-minio2.json
|
|
sed -i 's/8082/8083/g' test/cluster/config-minio.json
|
|
cp test/cluster/config-minio.json test/cluster/config-minio3.json
|
|
|
|
- name: Run push-pull tests
|
|
run: |
|
|
make binary
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
|
|
sleep 10
|
|
# run tests
|
|
skopeo --debug copy --format=oci --dest-tls-verify=false docker://ghcr.io/project-zot/golang:1.19 docker://localhost:8080/golang:1.19
|
|
skopeo --debug copy --src-tls-verify=false docker://localhost:8080/golang:1.19 oci:golang:1.19
|
|
echo "{\"name\":\"foo\",\"value\":\"bar\"}" > config.json
|
|
echo "hello world" > artifact.txt
|
|
oras push --plain-http localhost:8080/hello-artifact:v2 \
|
|
--config config.json:application/vnd.acme.rocket.config.v1+json \
|
|
artifact.txt:text/plain -d -v
|
|
rm -f artifact.txt # first delete the file
|
|
oras pull --plain-http localhost:8080/hello-artifact:v2 -d -v
|
|
grep -q "hello world" artifact.txt # should print "hello world"
|
|
if [ $? -ne 0 ]; then \
|
|
killall -r zot-*; \
|
|
exit 1; \
|
|
fi
|
|
|
|
killall -r zot-*
|
|
env:
|
|
AWS_ACCESS_KEY_ID: minioadmin
|
|
AWS_SECRET_ACCESS_KEY: minioadmin
|
|
|
|
- name: Run benchmark with --src-cidr arg
|
|
run: |
|
|
make bench
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
|
|
sleep 10
|
|
# run zb with --src-cidr
|
|
bin/zb-linux-amd64 -c 10 -n 50 -o ci-cd --src-cidr 127.0.0.0/8 http://localhost:8080
|
|
|
|
killall -r zot-*
|
|
|
|
# clean zot storage
|
|
sudo rm -rf /tmp/data/zot-storage/zot
|
|
env:
|
|
AWS_ACCESS_KEY_ID: minioadmin
|
|
AWS_SECRET_ACCESS_KEY: minioadmin
|
|
|
|
- name: Run benchmark with --src-ips arg
|
|
run: |
|
|
make bench
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio1.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio2.json &
|
|
./bin/zot-linux-amd64 serve test/cluster/config-minio3.json &
|
|
sleep 10
|
|
# run zb with --src-ips
|
|
bin/zb-linux-amd64 -c 10 -n 50 -o ci-cd --src-ips 127.0.0.2,127.0.0.3,127.0.0.4,127.0.0.5,127.0.0.6,127.0.12.5,127.0.12.6 http://localhost:8080
|
|
|
|
killall -r zot-*
|
|
env:
|
|
AWS_ACCESS_KEY_ID: minioadmin
|
|
AWS_SECRET_ACCESS_KEY: minioadmin
|
|
|
|
# Download previous benchmark result from cache (if exists)
|
|
- name: Download previous benchmark data
|
|
uses: actions/cache@v3
|
|
with:
|
|
path: ./cache
|
|
key: ${{ runner.os }}-gen1-benchmark-stateless-cluster
|
|
# Run `github-action-benchmark` action
|
|
- name: Store benchmark result
|
|
uses: benchmark-action/github-action-benchmark@v1.15.0
|
|
with:
|
|
# What benchmark tool the output.txt came from
|
|
tool: 'customBiggerIsBetter'
|
|
# Where the output from the benchmark tool is stored
|
|
output-file-path: ci-cd.json
|
|
# Where the previous data file is stored
|
|
external-data-json-path: ./cache/benchmark-data.json
|
|
# Workflow will fail when an alert happens
|
|
fail-on-alert: true
|
|
# Upload the updated cache file for the next job by actions/cache
|