0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-16 21:56:37 -05:00

fix(ci): fix nighlty builds and print zot log on failure (#1799)

now gc stress on s3 storage is using minio for ci/cd builds
gc stress on s3 storage is using localstack for nightly builds

fixed(gc): make sure we don't remove repo if there are blobs
being uploaded or the number of blobs gc'ed is not 0

Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
peusebiu 2023-09-20 19:25:06 +03:00 committed by GitHub
parent a11fe2d195
commit f164fb9e03
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 331 additions and 39 deletions

View file

@ -24,6 +24,7 @@ jobs:
go-version: 1.20.x go-version: 1.20.x
- name: Run zb - name: Run zb
id: bench
run: | run: |
make binary make binary
make bench make bench
@ -35,6 +36,13 @@ jobs:
# clean zot storage # clean zot storage
sudo rm -rf /tmp/zot sudo rm -rf /tmp/zot
continue-on-error: true
- name: Check on failures
if: steps.bench.outcome != 'success'
run: |
cat /tmp/gc-referrers-bench-local.log
exit 1
gc-stress-local: gc-stress-local:
name: GC(without referrers) on filesystem with short interval name: GC(without referrers) on filesystem with short interval
@ -48,6 +56,7 @@ jobs:
go-version: 1.20.x go-version: 1.20.x
- name: Run zb - name: Run zb
id: bench
run: | run: |
make binary make binary
make bench make bench
@ -59,3 +68,188 @@ jobs:
# clean zot storage # clean zot storage
sudo rm -rf /tmp/zot sudo rm -rf /tmp/zot
continue-on-error: true
- name: Check on failures
if: steps.bench.outcome != 'success'
run: |
cat /tmp/gc-bench-local.log
exit 1
gc-referrers-stress-s3:
name: GC(with referrers) on S3(minio) with short interval
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/clean-runner
- uses: actions/setup-go@v4
with:
cache: false
go-version: 1.20.x
- name: Setup localstack service
run: |
pip install localstack # Install LocalStack cli
docker pull localstack/localstack:1.3 # Make sure to pull the latest version of the image
localstack start -d # Start LocalStack in the background
echo "Waiting for LocalStack startup..." # Wait 30 seconds for the LocalStack container
localstack wait -t 30 # to become ready before timing out
echo "Startup complete"
# aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket zot-storage --region us-east-2 --create-bucket-configuration="{\"LocationConstraint\": \"us-east-2\"}"
aws dynamodb --endpoint-url http://localhost:4566 --region "us-east-2" create-table --table-name BlobTable --attribute-definitions AttributeName=Digest,AttributeType=S --key-schema AttributeName=Digest,KeyType=HASH --provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=5
env:
AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake
- name: Setup minio service
run: |
docker run -d -p 9000:9000 --name minio \
-e "MINIO_ACCESS_KEY=minioadmin" \
-e "MINIO_SECRET_KEY=minioadmin" \
-v /tmp/data:/data \
-v /tmp/config:/root/.minio \
--health-cmd "curl http://localhost:9000/minio/health/live" \
minio/minio:edge-cicd server /data
- name: Install py minio
run: pip3 install minio
- name: Wait for minio to come up
run: |
sleep 10
curl --connect-timeout 5 \
--max-time 120 \
--retry 12 \
--retry-max-time 120 \
'http://localhost:9000/minio/health/live'
- name: Create minio bucket
run: |
python3 - <<'EOF'
from minio import Minio
try:
minio = Minio(
'localhost:9000',
access_key='minioadmin',
secret_key='minioadmin',
secure=False
)
except Exception as ex:
raise
minio.make_bucket('zot-storage')
print(f'{minio.list_buckets()}')
EOF
- name: Run zb
id: bench
run: |
make binary
make bench
./bin/zot-linux-amd64 serve test/gc-stress/config-gc-referrers-bench-s3-minio.json &
sleep 10
bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080
killall -r zot-*
# clean zot storage
sudo rm -rf /tmp/zot
env:
AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake
continue-on-error: true
- name: Check on failures
if: steps.bench.outcome != 'success'
run: |
cat /tmp/gc-referrers-bench-s3.log
exit 1
gc-stress-s3:
name: GC(without referrers) on S3(minio) with short interval
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/clean-runner
- uses: actions/setup-go@v4
with:
cache: false
go-version: 1.20.x
- name: Setup localstack service
run: |
pip install localstack # Install LocalStack cli
docker pull localstack/localstack:1.3 # Make sure to pull the latest version of the image
localstack start -d # Start LocalStack in the background
echo "Waiting for LocalStack startup..." # Wait 30 seconds for the LocalStack container
localstack wait -t 30 # to become ready before timing out
echo "Startup complete"
# aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket zot-storage --region us-east-2 --create-bucket-configuration="{\"LocationConstraint\": \"us-east-2\"}"
aws dynamodb --endpoint-url http://localhost:4566 --region "us-east-2" create-table --table-name BlobTable --attribute-definitions AttributeName=Digest,AttributeType=S --key-schema AttributeName=Digest,KeyType=HASH --provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=5
env:
AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake
- name: Setup minio service
run: |
docker run -d -p 9000:9000 --name minio \
-e "MINIO_ACCESS_KEY=minioadmin" \
-e "MINIO_SECRET_KEY=minioadmin" \
-v /tmp/data:/data \
-v /tmp/config:/root/.minio \
--health-cmd "curl http://localhost:9000/minio/health/live" \
minio/minio:edge-cicd server /data
- name: Install py minio
run: pip3 install minio
- name: Wait for minio to come up
run: |
sleep 10
curl --connect-timeout 5 \
--max-time 120 \
--retry 12 \
--retry-max-time 120 \
'http://localhost:9000/minio/health/live'
- name: Create minio bucket
run: |
python3 - <<'EOF'
from minio import Minio
try:
minio = Minio(
'localhost:9000',
access_key='minioadmin',
secret_key='minioadmin',
secure=False
)
except Exception as ex:
raise
minio.make_bucket('zot-storage')
print(f'{minio.list_buckets()}')
EOF
- name: Run zb
id: bench
run: |
make binary
make bench
./bin/zot-linux-amd64 serve test/gc-stress/config-gc-bench-s3-minio.json &
sleep 10
bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080
killall -r zot-*
# clean zot storage
sudo rm -rf /tmp/zot
env:
AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake
continue-on-error: true
- name: Check on failures
if: steps.bench.outcome != 'success'
run: |
cat /tmp/gc-bench-s3.log
exit 1

View file

@ -72,7 +72,7 @@ jobs:
make run-blackbox-sync-nightly make run-blackbox-sync-nightly
gc-referrers-stress-s3: gc-referrers-stress-s3:
name: GC(with referrers) on S3 with short interval name: GC(with referrers) on S3(localstack) with short interval
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@ -96,11 +96,13 @@ jobs:
env: env:
AWS_ACCESS_KEY_ID: fake AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake AWS_SECRET_ACCESS_KEY: fake
- name: Run zb - name: Run zb
id: bench
run: | run: |
make binary make binary
make bench make bench
./bin/zot-linux-amd64 serve test/gc-stress/config-gc-referrers-bench-s3.json & ./bin/zot-linux-amd64 serve test/gc-stress/config-gc-referrers-bench-s3-localstack.json &
sleep 10 sleep 10
bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080 bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080
@ -111,9 +113,16 @@ jobs:
env: env:
AWS_ACCESS_KEY_ID: fake AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake AWS_SECRET_ACCESS_KEY: fake
continue-on-error: true
- name: Check on failures
if: steps.bench.outcome != 'success'
run: |
cat /tmp/gc-referrers-bench-s3.log
exit 1
gc-stress-s3: gc-stress-s3:
name: GC(without referrers) on S3 with short interval name: GC(without referrers) on S3(localstack) with short interval
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@ -137,11 +146,13 @@ jobs:
env: env:
AWS_ACCESS_KEY_ID: fake AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake AWS_SECRET_ACCESS_KEY: fake
- name: Run zb - name: Run zb
id: bench
run: | run: |
make binary make binary
make bench make bench
./bin/zot-linux-amd64 serve test/gc-stress/config-gc-bench-s3.json & ./bin/zot-linux-amd64 serve test/gc-stress/config-gc-bench-s3-localstack.json &
sleep 10 sleep 10
bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080 bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080
@ -152,3 +163,10 @@ jobs:
env: env:
AWS_ACCESS_KEY_ID: fake AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake AWS_SECRET_ACCESS_KEY: fake
continue-on-error: true
- name: Check on failures
if: steps.bench.outcome != 'success'
run: |
cat /tmp/gc-bench-s3.log
exit 1

View file

@ -672,7 +672,7 @@ func (is *ImageStore) deleteImageManifest(repo, reference string, detectCollisio
} }
if _, err := is.storeDriver.WriteFile(file, buf); err != nil { if _, err := is.storeDriver.WriteFile(file, buf); err != nil {
is.log.Debug().Str("deleting reference", reference).Msg("") is.log.Debug().Str("reference", reference).Str("repo", repo).Msg("error while updating index.json")
return err return err
} }
@ -1660,7 +1660,6 @@ func (is *ImageStore) garbageCollectIndexReferrers(repo string, rootIndex ispec.
if gced { if gced {
count++ count++
} }
case ispec.MediaTypeImageManifest, artifactspec.MediaTypeArtifactManifest: case ispec.MediaTypeImageManifest, artifactspec.MediaTypeArtifactManifest:
image, err := common.GetImageManifest(is, repo, desc.Digest, is.log) image, err := common.GetImageManifest(is, repo, desc.Digest, is.log)
if err != nil { if err != nil {
@ -1890,8 +1889,13 @@ func (is *ImageStore) garbageCollectBlobs(imgStore *ImageStore, repo string,
} }
} }
blobUploads, err := is.storeDriver.List(path.Join(is.RootDir(), repo, storageConstants.BlobUploadDir))
if err != nil {
is.log.Debug().Str("repository", repo).Msg("unable to list .uploads/ dir")
}
// if we cleaned all blobs let's also remove the repo so that it won't be returned by catalog // if we cleaned all blobs let's also remove the repo so that it won't be returned by catalog
if reaped == len(allBlobs) { if len(allBlobs) > 0 && reaped == len(allBlobs) && len(blobUploads) == 0 {
log.Info().Str("repository", repo).Msg("garbage collected all blobs, cleaning repo...") log.Info().Str("repository", repo).Msg("garbage collected all blobs, cleaning repo...")
if err := is.storeDriver.Delete(path.Join(is.rootDir, repo)); err != nil { if err := is.storeDriver.Delete(path.Join(is.rootDir, repo)); err != nil {

View file

@ -17,7 +17,9 @@ function setup_file() {
local zot_root_dir=${BATS_FILE_TMPDIR}/zot local zot_root_dir=${BATS_FILE_TMPDIR}/zot
local zot_config_file_dedupe=${BATS_FILE_TMPDIR}/zot_config_dedupe.json local zot_config_file_dedupe=${BATS_FILE_TMPDIR}/zot_config_dedupe.json
local zot_config_file_nodedupe=${BATS_FILE_TMPDIR}/zot_config_nodedupe.json local zot_config_file_nodedupe=${BATS_FILE_TMPDIR}/zot_config_nodedupe.json
local ZOT_LOG_FILE=${zot_root_dir}/zot-log.json local ZOT_LOG_FILE_DEDUPE=${BATS_FILE_TMPDIR}/zot-log-dedupe.json
local ZOT_LOG_FILE_NODEDUPE=${BATS_FILE_TMPDIR}/zot-log-nodedupe.json
mkdir -p ${zot_root_dir} mkdir -p ${zot_root_dir}
cat > ${zot_config_file_dedupe}<<EOF cat > ${zot_config_file_dedupe}<<EOF
@ -48,7 +50,8 @@ function setup_file() {
"port": "8080" "port": "8080"
}, },
"log": { "log": {
"level": "debug" "level": "debug",
"output": "${ZOT_LOG_FILE_DEDUPE}"
} }
} }
EOF EOF
@ -75,7 +78,7 @@ EOF
}, },
"log": { "log": {
"level": "debug", "level": "debug",
"output": "${ZOT_LOG_FILE}" "output": "${ZOT_LOG_FILE_NODEDUPE}"
} }
} }
EOF EOF
@ -87,7 +90,8 @@ EOF
function teardown() { function teardown() {
# conditionally printing on failure is possible from teardown but not from from teardown_file # conditionally printing on failure is possible from teardown but not from from teardown_file
cat ${BATS_FILE_TMPDIR}/zot/zot-log.json cat ${BATS_FILE_TMPDIR}/zot-log-dedupe.json
cat ${BATS_FILE_TMPDIR}/zot-log-nodedupe.json || true
} }
function teardown_file() { function teardown_file() {
@ -111,7 +115,7 @@ function teardown_file() {
@test "restart zot with dedupe false and wait for restore blobs task to finish" { @test "restart zot with dedupe false and wait for restore blobs task to finish" {
local zot_config_file_nodedupe=${BATS_FILE_TMPDIR}/zot_config_nodedupe.json local zot_config_file_nodedupe=${BATS_FILE_TMPDIR}/zot_config_nodedupe.json
local zot_root_dir=${BATS_FILE_TMPDIR}/zot local zot_root_dir=${BATS_FILE_TMPDIR}/zot
local ZOT_LOG_FILE=${zot_root_dir}/zot-log.json local ZOT_LOG_FILE=${BATS_FILE_TMPDIR}/zot-log-nodedupe.json
# stop server # stop server
zot_stop zot_stop

View file

@ -14,6 +14,6 @@
}, },
"log": { "log": {
"level": "debug", "level": "debug",
"output": "/dev/null" "output": "/tmp/gc-bench-local.log"
} }
} }

View file

@ -4,8 +4,8 @@
"rootDirectory": "/tmp/zot/s3", "rootDirectory": "/tmp/zot/s3",
"gc": true, "gc": true,
"gcReferrers": false, "gcReferrers": false,
"gcDelay": "40m", "gcDelay": "50m",
"untaggedImageRetentionDelay": "40m", "untaggedImageRetentionDelay": "50m",
"gcInterval": "2m", "gcInterval": "2m",
"storageDriver": { "storageDriver": {
"name": "s3", "name": "s3",
@ -29,6 +29,6 @@
}, },
"log": { "log": {
"level": "debug", "level": "debug",
"output": "/dev/null" "output": "/tmp/gc-bench-s3.log"
} }
} }

View file

@ -0,0 +1,36 @@
{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot/s3",
"gc": true,
"gcReferrers": false,
"gcDelay": "3m",
"untaggedImageRetentionDelay": "3m",
"gcInterval": "1s",
"storageDriver": {
"name": "s3",
"rootdirectory": "/zot",
"region": "us-east-2",
"bucket": "zot-storage",
"accesskey": "minioadmin",
"secretkey": "minioadmin",
"regionendpoint": "http://localhost:9000",
"secure": false,
"skipverify": false
},
"cacheDriver": {
"name": "dynamodb",
"endpoint": "http://localhost:4566",
"region": "us-east-2",
"cacheTablename": "BlobTable"
}
},
"http": {
"address": "127.0.0.1",
"port": "8080"
},
"log": {
"level": "debug",
"output": "/tmp/gc-bench-s3.log"
}
}

View file

@ -14,6 +14,6 @@
}, },
"log": { "log": {
"level": "debug", "level": "debug",
"output": "/dev/null" "output": "/tmp/gc-referrers-bench-local.log"
} }
} }

View file

@ -4,8 +4,8 @@
"rootDirectory": "/tmp/zot/s3", "rootDirectory": "/tmp/zot/s3",
"gc": true, "gc": true,
"gcReferrers": true, "gcReferrers": true,
"gcDelay": "40m", "gcDelay": "50m",
"untaggedImageRetentionDelay": "40m", "untaggedImageRetentionDelay": "50m",
"gcInterval": "2m", "gcInterval": "2m",
"storageDriver": { "storageDriver": {
"name": "s3", "name": "s3",
@ -29,6 +29,6 @@
}, },
"log": { "log": {
"level": "debug", "level": "debug",
"output": "/dev/null" "output": "/tmp/gc-referrers-bench-s3.log"
} }
} }

View file

@ -0,0 +1,36 @@
{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot/s3",
"gc": true,
"gcReferrers": true,
"gcDelay": "3m",
"untaggedImageRetentionDelay": "3m",
"gcInterval": "1s",
"storageDriver": {
"name": "s3",
"rootdirectory": "/zot",
"region": "us-east-2",
"bucket": "zot-storage",
"accesskey": "minioadmin",
"secretkey": "minioadmin",
"regionendpoint": "http://localhost:9000",
"secure": false,
"skipverify": false
},
"cacheDriver": {
"name": "dynamodb",
"endpoint": "http://localhost:4566",
"region": "us-east-2",
"cacheTablename": "BlobTable"
}
},
"http": {
"address": "127.0.0.1",
"port": "8080"
},
"log": {
"level": "debug",
"output": "/tmp/gc-referrers-bench-s3.log"
}
}