0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-16 21:56:37 -05:00

refactor(storage): refactor storage into a single ImageStore (#1656)

unified both local and s3 ImageStore logic into a single ImageStore
added a new driver interface for common file/dirs manipulations
to be implemented by different storage types

refactor(gc): drop umoci dependency, implemented internal gc

added retentionDelay config option that specifies
the garbage collect delay for images without tags

this will also clean manifests which are part of an index image
(multiarch) that no longer exist.

fix(dedupe): skip blobs under .sync/ directory

if startup dedupe is running while also syncing is running
ignore blobs under sync's temporary storage

fix(storage): do not allow image indexes modifications

when deleting a manifest verify that it is not part of a multiarch image
and throw a MethodNotAllowed error to the client if it is.
we don't want to modify multiarch images

Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
peusebiu 2023-09-01 20:54:39 +03:00 committed by GitHub
parent 72a5968437
commit b80deb9927
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
60 changed files with 6052 additions and 4666 deletions

View file

@ -17,6 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: ./.github/actions/clean-runner
- uses: actions/setup-go@v4
with:
cache: false
@ -98,6 +99,9 @@ jobs:
- name: Run annotations tests
run: |
make test-annotations
- name: Run garbage collect tests
run: |
make test-garbage-collect
- name: Install localstack
run: |
pip install --upgrade pyopenssl
@ -129,4 +133,4 @@ jobs:
sudo du -sh /var/
sudo du -sh /var/lib/docker/
du -sh /home/runner/work/
set +x
set +x

View file

@ -12,8 +12,8 @@ on:
permissions: read-all
jobs:
client-tools:
name: GC with short interval
gc-referrers-stress-local:
name: GC(with referrers) on filesystem with short interval
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
@ -27,7 +27,31 @@ jobs:
run: |
make binary
make bench
./bin/zot-linux-amd64 serve examples/config-gc-bench.json &
./bin/zot-linux-amd64 serve test/gc-stress/config-gc-referrers-bench-local.json &
sleep 10
bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080
killall -r zot-*
# clean zot storage
sudo rm -rf /tmp/zot
gc-stress-local:
name: GC(without referrers) on filesystem with short interval
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: ./.github/actions/clean-runner
- uses: actions/setup-go@v4
with:
cache: false
go-version: 1.20.x
- name: Run zb
run: |
make binary
make bench
./bin/zot-linux-amd64 serve test/gc-stress/config-gc-bench-local.json &
sleep 10
bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080

View file

@ -74,3 +74,84 @@ jobs:
- name: Run sync harness
run: |
make test-sync-harness
gc-referrers-stress-s3:
name: GC(with referrers) on S3 with short interval
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: ./.github/actions/clean-runner
- uses: actions/setup-go@v4
with:
cache: false
go-version: 1.20.x
- name: Setup localstack service
run: |
pip install localstack # Install LocalStack cli
docker pull localstack/localstack:1.3 # Make sure to pull the latest version of the image
localstack start -d # Start LocalStack in the background
echo "Waiting for LocalStack startup..." # Wait 30 seconds for the LocalStack container
localstack wait -t 30 # to become ready before timing out
echo "Startup complete"
aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket zot-storage --region us-east-2 --create-bucket-configuration="{\"LocationConstraint\": \"us-east-2\"}"
aws dynamodb --endpoint-url http://localhost:4566 --region "us-east-2" create-table --table-name BlobTable --attribute-definitions AttributeName=Digest,AttributeType=S --key-schema AttributeName=Digest,KeyType=HASH --provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=5
env:
AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake
- name: Run zb
run: |
make binary
make bench
./bin/zot-linux-amd64 serve test/gc-stress/config-gc-referrers-bench-s3.json &
sleep 10
bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080
killall -r zot-*
# clean zot storage
sudo rm -rf /tmp/zot
env:
AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake
gc-stress-s3:
name: GC(without referrers) on S3 with short interval
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: ./.github/actions/clean-runner
- uses: actions/setup-go@v4
with:
cache: false
go-version: 1.20.x
- name: Setup localstack service
run: |
pip install localstack # Install LocalStack cli
docker pull localstack/localstack:1.3 # Make sure to pull the latest version of the image
localstack start -d # Start LocalStack in the background
echo "Waiting for LocalStack startup..." # Wait 30 seconds for the LocalStack container
localstack wait -t 30 # to become ready before timing out
echo "Startup complete"
aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket zot-storage --region us-east-2 --create-bucket-configuration="{\"LocationConstraint\": \"us-east-2\"}"
aws dynamodb --endpoint-url http://localhost:4566 --region "us-east-2" create-table --table-name BlobTable --attribute-definitions AttributeName=Digest,AttributeType=S --key-schema AttributeName=Digest,KeyType=HASH --provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=5
env:
AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake
- name: Run zb
run: |
make binary
make bench
./bin/zot-linux-amd64 serve test/gc-stress/config-gc-bench-s3.json &
sleep 10
bin/zb-linux-amd64 -c 10 -n 100 -o ci-cd http://localhost:8080
killall -r zot-*
# clean zot storage
sudo rm -rf /tmp/zot
env:
AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake

View file

@ -357,6 +357,14 @@ test-push-pull: check-linux binary check-skopeo $(BATS) $(REGCLIENT) $(ORAS) $(H
test-push-pull-verbose: check-linux binary check-skopeo $(BATS) $(REGCLIENT) $(ORAS) $(HELM) $(CRICTL)
$(BATS) --trace --verbose-run --print-output-on-failure --show-output-of-passing-tests test/blackbox/pushpull.bats
.PHONY: test-garbage-collect
test-garbage-collect: binary check-skopeo $(BATS) $(REGCLIENT) $(ORAS)
$(BATS) --trace --print-output-on-failure test/blackbox/garbage_collect.bats
.PHONY: test-garbage-collect-verbose
test-garbage-collect-verbose: binary check-skopeo $(BATS) $(REGCLIENT) $(ORAS)
$(BATS) --trace --verbose-run --print-output-on-failure --show-output-of-passing-tests test/blackbox/garbage_collect.bats
.PHONY: test-push-pull-running-dedupe
test-push-pull-running-dedupe: check-linux binary check-skopeo $(BATS) $(REGCLIENT) $(ORAS) $(HELM)
$(BATS) --trace --print-output-on-failure test/blackbox/pushpull_running_dedupe.bats

View file

@ -58,6 +58,7 @@ var (
ErrBadBlob = errors.New("blob: bad blob")
ErrBadBlobDigest = errors.New("blob: bad blob digest")
ErrBlobReferenced = errors.New("blob: referenced by manifest")
ErrManifestReferenced = errors.New("manifest: referenced by index image")
ErrUnknownCode = errors.New("error: unknown error code")
ErrBadCACert = errors.New("tls: invalid ca cert")
ErrBadUser = errors.New("auth: non-existent user")
@ -155,4 +156,7 @@ var (
ErrGQLEndpointNotFound = errors.New("cli: the server doesn't have a gql endpoint")
ErrGQLQueryNotSupported = errors.New("cli: query is not supported or has different arguments")
ErrBadHTTPStatusCode = errors.New("cli: the response doesn't contain the expected status code")
ErrFileAlreadyCancelled = errors.New("storageDriver: file already cancelled")
ErrFileAlreadyClosed = errors.New("storageDriver: file already closed")
ErrFileAlreadyCommitted = errors.New("storageDriver: file already committed")
)

View file

@ -3,7 +3,10 @@
"storage": {
"rootDirectory": "/tmp/zot",
"gc": true,
"gcDelay": "1s"
"gcReferrers": true,
"gcDelay": "2h",
"untaggedImageRetentionDelay": "4h",
"gcInterval": "1h"
},
"http": {
"address": "127.0.0.1",

4
go.mod
View file

@ -5,7 +5,7 @@ go 1.20
require (
github.com/99designs/gqlgen v0.17.35
github.com/Masterminds/semver v1.5.0
github.com/apex/log v1.9.0
github.com/apex/log v1.9.0 // indirect
github.com/aquasecurity/trivy-db v0.0.0-20230726112157-167ba4f2faeb
github.com/bmatcuk/doublestar/v4 v4.6.0
github.com/briandowns/spinner v1.23.0
@ -23,7 +23,6 @@ require (
github.com/gorilla/mux v1.8.0
github.com/hashicorp/golang-lru/v2 v2.0.5
github.com/json-iterator/go v1.1.12
github.com/minio/sha256-simd v1.0.1
github.com/mitchellh/mapstructure v1.5.0
github.com/nmcclain/ldap v0.0.0-20210720162743-7f8d1e44eeba
github.com/olekukonko/tablewriter v0.0.5
@ -367,7 +366,6 @@ require (
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.16.6 // indirect
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/knqyf263/go-apk-version v0.0.0-20200609155635-041fdbb8563f // indirect
github.com/knqyf263/go-deb-version v0.0.0-20230223133812-3ed183d23422 // indirect

5
go.sum
View file

@ -1130,8 +1130,6 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -1258,8 +1256,6 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/migueleliasweb/go-github-mock v0.0.19 h1:z/88f6wPqZVFnE7s9DbwXMhCtmV/0FofNxc4M7FuSdU=
github.com/migueleliasweb/go-github-mock v0.0.19/go.mod h1:dBoCB3W9NjzyABhoGkfI0iSlFpzulAXhI7M+9A4ONYI=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
@ -2059,7 +2055,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View file

@ -23,15 +23,17 @@ var (
)
type StorageConfig struct {
RootDirectory string
Dedupe bool
RemoteCache bool
GC bool
Commit bool
GCDelay time.Duration
GCInterval time.Duration
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
CacheDriver map[string]interface{} `mapstructure:",omitempty"`
RootDirectory string
Dedupe bool
RemoteCache bool
GC bool
Commit bool
GCDelay time.Duration
GCInterval time.Duration
GCReferrers bool
UntaggedImageRetentionDelay time.Duration
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
CacheDriver map[string]interface{} `mapstructure:",omitempty"`
}
type TLSConfig struct {
@ -188,8 +190,9 @@ func New() *Config {
BinaryType: BinaryType,
Storage: GlobalStorageConfig{
StorageConfig: StorageConfig{
GC: true, GCDelay: storageConstants.DefaultGCDelay,
GCInterval: storageConstants.DefaultGCInterval, Dedupe: true,
GC: true, GCReferrers: true, GCDelay: storageConstants.DefaultGCDelay,
UntaggedImageRetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
GCInterval: storageConstants.DefaultGCInterval, Dedupe: true,
},
},
HTTP: HTTPConfig{Address: "127.0.0.1", Port: "8080", Auth: &AuthConfig{FailDelay: 0}},

View file

@ -971,7 +971,7 @@ func TestInterruptedBlobUpload(t *testing.T) {
defer cm.StopServer()
client := resty.New()
blob := make([]byte, 50*1024*1024)
blob := make([]byte, 200*1024*1024)
digest := godigest.FromBytes(blob).String()
//nolint: dupl
@ -1024,6 +1024,7 @@ func TestInterruptedBlobUpload(t *testing.T) {
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
})
//nolint: dupl
Convey("Test negative interrupt PATCH blob upload", func() {
resp, err := client.R().Post(baseURL + "/v2/" + AuthorizedNamespace + "/blobs/uploads/")
So(err, ShouldBeNil)
@ -1126,6 +1127,7 @@ func TestInterruptedBlobUpload(t *testing.T) {
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
})
//nolint: dupl
Convey("Test negative interrupt PUT blob upload", func() {
resp, err := client.R().Post(baseURL + "/v2/" + AuthorizedNamespace + "/blobs/uploads/")
So(err, ShouldBeNil)
@ -6746,6 +6748,12 @@ func TestManifestImageIndex(t *testing.T) {
So(digestHdr, ShouldEqual, digest.String())
})
Convey("Deleting manifest contained by a multiarch image should not be allowed", func() {
resp, err = resty.R().Delete(baseURL + fmt.Sprintf("/v2/index/manifests/%s", m2dgst.String()))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusMethodNotAllowed)
})
Convey("Deleting an image index", func() {
// delete manifest by tag should pass
resp, err = resty.R().Delete(baseURL + "/v2/index/manifests/test:index3")
@ -7296,7 +7304,7 @@ func TestInjectTooManyOpenFiles(t *testing.T) {
So(digest, ShouldNotBeNil)
// monolithic blob upload
injected := inject.InjectFailure(0)
injected := inject.InjectFailure(2)
if injected {
request, _ := http.NewRequestWithContext(context.TODO(), http.MethodPut, loc, bytes.NewReader(content))
tokens := strings.Split(loc, "/")
@ -7369,7 +7377,7 @@ func TestInjectTooManyOpenFiles(t *testing.T) {
// Testing router path: @Router /v2/{name}/manifests/{reference} [put]
//nolint:lll // gofumpt conflicts with lll
Convey("Uploading an image manifest blob (when injected simulates that PutImageManifest failed due to 'too many open files' error)", func() {
injected := inject.InjectFailure(1)
injected := inject.InjectFailure(2)
request, _ := http.NewRequestWithContext(context.TODO(), http.MethodPut, baseURL, bytes.NewReader(content))
request = mux.SetURLVars(request, map[string]string{"name": "repotest", "reference": "1.0"})
@ -7430,6 +7438,7 @@ func TestInjectTooManyOpenFiles(t *testing.T) {
So(resp.StatusCode, ShouldEqual, http.StatusCreated)
}
})
Convey("when index.json is not in json format", func() {
resp, err = resty.R().SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json").
SetBody(content).Put(baseURL + "/v2/repotest/manifests/v1.0")
@ -7456,21 +7465,22 @@ func TestInjectTooManyOpenFiles(t *testing.T) {
func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
Convey("Make controller", t, func() {
repoName := "testrepo" //nolint:goconst
tag := "0.0.1"
port := test.GetFreePort()
baseURL := test.GetBaseURL(port)
conf := config.New()
conf.HTTP.Port = port
ctlr := makeController(conf, t.TempDir())
Convey("Garbage collect signatures without subject and manifests without tags", func(c C) {
repoName := "testrepo" //nolint:goconst
tag := "0.0.1"
port := test.GetFreePort()
baseURL := test.GetBaseURL(port)
conf := config.New()
conf.HTTP.Port = port
ctlr := makeController(conf, t.TempDir())
dir := t.TempDir()
ctlr.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.GC = true
ctlr.Config.Storage.GCDelay = 1 * time.Millisecond
ctlr.Config.Storage.UntaggedImageRetentionDelay = 1 * time.Millisecond
ctlr.Config.Storage.Dedupe = false
@ -7582,75 +7592,88 @@ func TestGCSignaturesAndUntaggedManifests(t *testing.T) {
So(err, ShouldBeNil)
})
// push an image without tag
cfg, layers, manifest, err := test.GetImageComponents(2) //nolint:staticcheck
So(err, ShouldBeNil)
Convey("Overwrite original image, signatures should be garbage-collected", func() {
// push an image without tag
cfg, layers, manifest, err := test.GetImageComponents(2) //nolint:staticcheck
So(err, ShouldBeNil)
manifestBuf, err := json.Marshal(manifest)
So(err, ShouldBeNil)
untaggedManifestDigest := godigest.FromBytes(manifestBuf)
manifestBuf, err := json.Marshal(manifest)
So(err, ShouldBeNil)
untaggedManifestDigest := godigest.FromBytes(manifestBuf)
err = test.UploadImage(
test.Image{
Config: cfg,
Layers: layers,
Manifest: manifest,
}, baseURL, repoName, untaggedManifestDigest.String())
So(err, ShouldBeNil)
err = test.UploadImage(
test.Image{
Config: cfg,
Layers: layers,
Manifest: manifest,
}, baseURL, repoName, untaggedManifestDigest.String())
So(err, ShouldBeNil)
// overwrite image so that signatures will get invalidated and gc'ed
cfg, layers, manifest, err = test.GetImageComponents(3) //nolint:staticcheck
So(err, ShouldBeNil)
// overwrite image so that signatures will get invalidated and gc'ed
cfg, layers, manifest, err = test.GetImageComponents(3) //nolint:staticcheck
So(err, ShouldBeNil)
err = test.UploadImage(
test.Image{
Config: cfg,
Layers: layers,
Manifest: manifest,
}, baseURL, repoName, tag)
So(err, ShouldBeNil)
err = test.UploadImage(
test.Image{
Config: cfg,
Layers: layers,
Manifest: manifest,
}, baseURL, repoName, tag)
So(err, ShouldBeNil)
manifestBuf, err = json.Marshal(manifest)
So(err, ShouldBeNil)
newManifestDigest := godigest.FromBytes(manifestBuf)
manifestBuf, err = json.Marshal(manifest)
So(err, ShouldBeNil)
newManifestDigest := godigest.FromBytes(manifestBuf)
err = ctlr.StoreController.DefaultStore.RunGCRepo(repoName)
So(err, ShouldBeNil)
err = ctlr.StoreController.DefaultStore.RunGCRepo(repoName)
So(err, ShouldBeNil)
// both signatures should be gc'ed
resp, err = resty.R().Get(baseURL + fmt.Sprintf("/v2/%s/manifests/%s", repoName, cosignTag))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
// both signatures should be gc'ed
resp, err = resty.R().Get(baseURL + fmt.Sprintf("/v2/%s/manifests/%s", repoName, cosignTag))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
resp, err = resty.R().SetQueryParam("artifactType", notreg.ArtifactTypeNotation).Get(
fmt.Sprintf("%s/v2/%s/referrers/%s", baseURL, repoName, digest.String()))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
resp, err = resty.R().SetQueryParam("artifactType", notreg.ArtifactTypeNotation).Get(
fmt.Sprintf("%s/v2/%s/referrers/%s", baseURL, repoName, digest.String()))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
err = json.Unmarshal(resp.Body(), &index)
So(err, ShouldBeNil)
So(len(index.Manifests), ShouldEqual, 0)
err = json.Unmarshal(resp.Body(), &index)
So(err, ShouldBeNil)
So(len(index.Manifests), ShouldEqual, 0)
resp, err = resty.R().SetQueryParam("artifactType", notreg.ArtifactTypeNotation).Get(
fmt.Sprintf("%s/v2/%s/referrers/%s", baseURL, repoName, newManifestDigest.String()))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
resp, err = resty.R().SetQueryParam("artifactType", notreg.ArtifactTypeNotation).Get(
fmt.Sprintf("%s/v2/%s/referrers/%s", baseURL, repoName, newManifestDigest.String()))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
err = json.Unmarshal(resp.Body(), &index)
So(err, ShouldBeNil)
So(len(index.Manifests), ShouldEqual, 0)
err = json.Unmarshal(resp.Body(), &index)
So(err, ShouldBeNil)
So(len(index.Manifests), ShouldEqual, 0)
// untagged image should also be gc'ed
resp, err = resty.R().Get(baseURL + fmt.Sprintf("/v2/%s/manifests/%s", repoName, untaggedManifestDigest))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
// untagged image should also be gc'ed
resp, err = resty.R().Get(baseURL + fmt.Sprintf("/v2/%s/manifests/%s", repoName, untaggedManifestDigest))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
})
})
Convey("Do not gc manifests which are part of a multiarch image", func(c C) {
repoName := "testrepo" //nolint:goconst
tag := "0.0.1"
port := test.GetFreePort()
baseURL := test.GetBaseURL(port)
conf := config.New()
conf.HTTP.Port = port
ctlr := makeController(conf, t.TempDir())
dir := t.TempDir()
ctlr.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.GC = true
ctlr.Config.Storage.GCDelay = 500 * time.Millisecond
ctlr.Config.Storage.GCDelay = 1 * time.Second
ctlr.Config.Storage.UntaggedImageRetentionDelay = 1 * time.Second
err := test.WriteImageToFileSystem(test.CreateDefaultImage(), repoName, tag,
test.GetDefaultStoreController(dir, ctlr.Log))
@ -7787,7 +7810,10 @@ func TestPeriodicGC(t *testing.T) {
subPaths := make(map[string]config.StorageConfig)
subPaths["/a"] = config.StorageConfig{RootDirectory: subDir, GC: true, GCDelay: 1 * time.Second, GCInterval: 24 * time.Hour, RemoteCache: false, Dedupe: false} //nolint:lll // gofumpt conflicts with lll
subPaths["/a"] = config.StorageConfig{
RootDirectory: subDir, GC: true, GCDelay: 1 * time.Second,
UntaggedImageRetentionDelay: 1 * time.Second, GCInterval: 24 * time.Hour, RemoteCache: false, Dedupe: false,
} //nolint:lll // gofumpt conflicts with lll
ctlr.Config.Storage.Dedupe = false
ctlr.Config.Storage.SubPaths = subPaths

View file

@ -832,6 +832,11 @@ func (rh *RouteHandler) DeleteManifest(response http.ResponseWriter, request *ht
details["reference"] = reference
e := apiErr.NewError(apiErr.UNSUPPORTED).AddDetail(details)
zcommon.WriteJSON(response, http.StatusBadRequest, apiErr.NewErrorList(e))
} else if errors.Is(err, zerr.ErrManifestReferenced) {
// manifest is part of an index image, don't allow index manipulations.
details["reference"] = reference
e := apiErr.NewError(apiErr.DENIED).AddDetail(details)
zcommon.WriteJSON(response, http.StatusMethodNotAllowed, apiErr.NewErrorList(e))
} else {
rh.c.Log.Error().Err(err).Msg("unexpected error")
response.WriteHeader(http.StatusInternalServerError)

View file

@ -468,7 +468,7 @@ func TestNegativeServerResponse(t *testing.T) {
dir := t.TempDir()
imageStore := local.NewImageStore(dir, false, 0, false, false,
imageStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
storeController := storage.StoreController{

View file

@ -27,7 +27,6 @@ import (
"zotregistry.io/zot/pkg/extensions/monitoring"
zlog "zotregistry.io/zot/pkg/log"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/storage/s3"
)
// metadataConfig reports metadata after parsing, which we use to track
@ -631,6 +630,10 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
config.Storage.GCDelay = 0
}
if viperInstance.Get("storage::gcdelay") == nil {
config.Storage.UntaggedImageRetentionDelay = 0
}
if viperInstance.Get("storage::gcinterval") == nil {
config.Storage.GCInterval = 0
}
@ -649,7 +652,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
// s3 dedup=false, check for previous dedup usage and set to true if cachedb found
if !config.Storage.Dedupe && config.Storage.StorageDriver != nil {
cacheDir, _ := config.Storage.StorageDriver["rootdirectory"].(string)
cachePath := path.Join(cacheDir, s3.CacheDBName+storageConstants.DBExtensionName)
cachePath := path.Join(cacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName)
if _, err := os.Stat(cachePath); err == nil {
log.Info().Msg("Config: dedupe set to false for s3 driver but used to be true.")
@ -667,10 +670,10 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
storageConfig.RemoteCache = true
}
// s3 dedup=false, check for previous dedup usage and set to true if cachedb found
// s3 dedup=false, check for previous dedupe usage and set to true if cachedb found
if !storageConfig.Dedupe && storageConfig.StorageDriver != nil {
subpathCacheDir, _ := storageConfig.StorageDriver["rootdirectory"].(string)
subpathCachePath := path.Join(subpathCacheDir, s3.CacheDBName+storageConstants.DBExtensionName)
subpathCachePath := path.Join(subpathCacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName)
if _, err := os.Stat(subpathCachePath); err == nil {
log.Info().Msg("Config: dedupe set to false for s3 driver but used to be true. ")
@ -682,11 +685,21 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
// if gc is enabled
if storageConfig.GC {
// and gcReferrers is not set, it is set to default value
if !viperInstance.IsSet("storage::subpaths::" + name + "::gcreferrers") {
storageConfig.GCReferrers = true
}
// and gcDelay is not set, it is set to default value
if !viperInstance.IsSet("storage::subpaths::" + name + "::gcdelay") {
storageConfig.GCDelay = storageConstants.DefaultGCDelay
}
// and retentionDelay is not set, it is set to default value
if !viperInstance.IsSet("storage::subpaths::" + name + "::retentiondelay") {
storageConfig.UntaggedImageRetentionDelay = storageConstants.DefaultUntaggedImgeRetentionDelay
}
// and gcInterval is not set, it is set to default value
if !viperInstance.IsSet("storage::subpaths::" + name + "::gcinterval") {
storageConfig.GCInterval = storageConstants.DefaultGCInterval

View file

@ -14,7 +14,6 @@ import (
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/cli"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/storage/s3"
. "zotregistry.io/zot/pkg/test"
)
@ -521,7 +520,7 @@ func TestVerify(t *testing.T) {
// s3 dedup=false, check for previous dedup usage and set to true if cachedb found
cacheDir := t.TempDir()
existingDBPath := path.Join(cacheDir, s3.CacheDBName+storageConstants.DBExtensionName)
existingDBPath := path.Join(cacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName)
_, err = os.Create(existingDBPath)
So(err, ShouldBeNil)
@ -537,7 +536,7 @@ func TestVerify(t *testing.T) {
// subpath s3 dedup=false, check for previous dedup usage and set to true if cachedb found
cacheDir = t.TempDir()
existingDBPath = path.Join(cacheDir, s3.CacheDBName+storageConstants.DBExtensionName)
existingDBPath = path.Join(cacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName)
_, err = os.Create(existingDBPath)
So(err, ShouldBeNil)

View file

@ -149,7 +149,7 @@ func TestSignatureUploadAndVerification(t *testing.T) {
writers := io.MultiWriter(os.Stdout, logFile)
logger.Logger = logger.Output(writers)
imageStore := local.NewImageStore(globalDir, false, 0, false, false,
imageStore := local.NewImageStore(globalDir, false, false, 0, 0, false, false,
logger, monitoring.NewMetricsServer(false, logger), nil, nil)
storeController := storage.StoreController{
@ -267,7 +267,7 @@ func TestSignatureUploadAndVerification(t *testing.T) {
writers := io.MultiWriter(os.Stdout, logFile)
logger.Logger = logger.Output(writers)
imageStore := local.NewImageStore(globalDir, false, 0, false, false,
imageStore := local.NewImageStore(globalDir, false, false, 0, 0, false, false,
logger, monitoring.NewMetricsServer(false, logger), nil, nil)
storeController := storage.StoreController{
@ -385,7 +385,7 @@ func TestSignatureUploadAndVerification(t *testing.T) {
writers := io.MultiWriter(os.Stdout, logFile)
logger.Logger = logger.Output(writers)
imageStore := local.NewImageStore(globalDir, false, 0, false, false,
imageStore := local.NewImageStore(globalDir, false, false, 0, 0, false, false,
logger, monitoring.NewMetricsServer(false, logger), nil, nil)
storeController := storage.StoreController{
@ -558,7 +558,7 @@ func TestSignatureUploadAndVerification(t *testing.T) {
writers := io.MultiWriter(os.Stdout, logFile)
logger.Logger = logger.Output(writers)
imageStore := local.NewImageStore(globalDir, false, 0, false, false,
imageStore := local.NewImageStore(globalDir, false, false, 0, 0, false, false,
logger, monitoring.NewMetricsServer(false, logger), nil, nil)
storeController := storage.StoreController{
@ -813,7 +813,7 @@ func TestSignatureUploadAndVerification(t *testing.T) {
writers := io.MultiWriter(os.Stdout, logFile)
logger.Logger = logger.Output(writers)
imageStore := local.NewImageStore(globalDir, false, 0, false, false,
imageStore := local.NewImageStore(globalDir, false, false, 0, 0, false, false,
logger, monitoring.NewMetricsServer(false, logger), nil, nil)
storeController := storage.StoreController{

View file

@ -4,8 +4,6 @@
package extensions
import (
"errors"
"io"
"time"
"zotregistry.io/zot/pkg/api/config"
@ -30,19 +28,25 @@ func EnableScrubExtension(config *config.Config, log log.Logger, storeController
log.Warn().Msg("Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll
}
generator := &taskGenerator{
imgStore: storeController.DefaultStore,
log: log,
// is local imagestore (because of umoci dependency which works only locally)
if config.Storage.StorageDriver == nil {
generator := &taskGenerator{
imgStore: storeController.DefaultStore,
log: log,
}
sch.SubmitGenerator(generator, config.Extensions.Scrub.Interval, scheduler.LowPriority)
}
sch.SubmitGenerator(generator, config.Extensions.Scrub.Interval, scheduler.LowPriority)
if config.Storage.SubPaths != nil {
for route := range config.Storage.SubPaths {
generator := &taskGenerator{
imgStore: storeController.SubStore[route],
log: log,
// is local imagestore (because of umoci dependency which works only locally)
if config.Storage.SubPaths[route].StorageDriver == nil {
generator := &taskGenerator{
imgStore: storeController.SubStore[route],
log: log,
}
sch.SubmitGenerator(generator, config.Extensions.Scrub.Interval, scheduler.LowPriority)
}
sch.SubmitGenerator(generator, config.Extensions.Scrub.Interval, scheduler.LowPriority)
}
}
} else {
@ -59,8 +63,7 @@ type taskGenerator struct {
func (gen *taskGenerator) Next() (scheduler.Task, error) {
repo, err := gen.imgStore.GetNextRepository(gen.lastRepo)
if err != nil && !errors.Is(err, io.EOF) {
if err != nil {
return nil, err
}

View file

@ -490,7 +490,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
var index ispec.Index
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
indexContent, err := imgStore.GetIndexContent("zot-test")
@ -522,7 +522,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
var index ispec.Index
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
indexContent, err := imgStore.GetIndexContent("zot-test")
@ -592,7 +592,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
index.Manifests = append(index.Manifests, manifestDesc)
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
@ -654,7 +654,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
index.Manifests = append(index.Manifests, manifestDesc)
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
@ -718,7 +718,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
index.Manifests = append(index.Manifests, manifestDesc)
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
@ -781,7 +781,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
index.Manifests = append(index.Manifests, manifestDesc)
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
err = os.Chmod(path.Join(dir, "zot-test", "blobs"), 0o000)
@ -879,7 +879,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
index.Manifests = append(index.Manifests, manifestDesc)
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
imgStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
err = os.Chmod(path.Join(dir, "zot-test", "blobs", "sha256", manifest.Config.Digest.Encoded()), 0o000)

View file

@ -198,7 +198,7 @@ func TestRunScrubRepo(t *testing.T) {
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second, true,
imgStore := local.NewImageStore(dir, true, true, 1*time.Second, 1*time.Second, true,
true, log, metrics, nil, cacheDriver)
srcStorageCtlr := test.GetDefaultStoreController(dir, log)
@ -234,7 +234,7 @@ func TestRunScrubRepo(t *testing.T) {
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second, true,
imgStore := local.NewImageStore(dir, true, true, 1*time.Second, 1*time.Second, true,
true, log, metrics, nil, cacheDriver)
srcStorageCtlr := test.GetDefaultStoreController(dir, log)
@ -276,8 +276,8 @@ func TestRunScrubRepo(t *testing.T) {
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second,
true, true, log, metrics, nil, cacheDriver,
imgStore := local.NewImageStore(dir, true, true, 1*time.Second,
1*time.Second, true, true, log, metrics, nil, cacheDriver,
)
srcStorageCtlr := test.GetDefaultStoreController(dir, log)

View file

@ -322,8 +322,8 @@ func TestImageFormat(t *testing.T) {
dbDir := t.TempDir()
metrics := monitoring.NewMetricsServer(false, log)
defaultStore := local.NewImageStore(imgDir, false, storageConstants.DefaultGCDelay,
false, false, log, metrics, nil, nil)
defaultStore := local.NewImageStore(imgDir, false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
storeController := storage.StoreController{DefaultStore: defaultStore}
params := boltdb.DBParameters{

View file

@ -25,6 +25,7 @@ import (
mTypes "zotregistry.io/zot/pkg/meta/types"
"zotregistry.io/zot/pkg/storage"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/storage/imagestore"
"zotregistry.io/zot/pkg/storage/local"
storageTypes "zotregistry.io/zot/pkg/storage/types"
"zotregistry.io/zot/pkg/test"
@ -73,14 +74,14 @@ func TestMultipleStoragePath(t *testing.T) {
// Create ImageStore
firstStore := local.NewImageStore(firstRootDir, false, storageConstants.DefaultGCDelay, false, false, log, metrics,
nil, nil)
firstStore := local.NewImageStore(firstRootDir, false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
secondStore := local.NewImageStore(secondRootDir, false, storageConstants.DefaultGCDelay, false, false, log, metrics,
nil, nil)
secondStore := local.NewImageStore(secondRootDir, false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
thirdStore := local.NewImageStore(thirdRootDir, false, storageConstants.DefaultGCDelay, false, false, log, metrics,
nil, nil)
thirdStore := local.NewImageStore(thirdRootDir, false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
storeController := storage.StoreController{}
@ -188,7 +189,8 @@ func TestTrivyLibraryErrors(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log)
// Create ImageStore
store := local.NewImageStore(rootDir, false, storageConstants.DefaultGCDelay, false, false, log, metrics, nil, nil)
store := local.NewImageStore(rootDir, false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
storeController := storage.StoreController{}
storeController.DefaultStore = store
@ -405,7 +407,8 @@ func TestImageScannable(t *testing.T) {
// Continue with initializing the objects the scanner depends on
metrics := monitoring.NewMetricsServer(false, log)
store := local.NewImageStore(rootDir, false, storageConstants.DefaultGCDelay, false, false, log, metrics, nil, nil)
store := local.NewImageStore(rootDir, false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
storeController := storage.StoreController{}
storeController.DefaultStore = store
@ -471,7 +474,8 @@ func TestDefaultTrivyDBUrl(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log)
// Create ImageStore
store := local.NewImageStore(rootDir, false, storageConstants.DefaultGCDelay, false, false, log, metrics, nil, nil)
store := local.NewImageStore(rootDir, false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
storeController := storage.StoreController{}
storeController.DefaultStore = store
@ -515,7 +519,7 @@ func TestDefaultTrivyDBUrl(t *testing.T) {
func TestIsIndexScanable(t *testing.T) {
Convey("IsIndexScanable", t, func() {
storeController := storage.StoreController{}
storeController.DefaultStore = &local.ImageStoreLocal{}
storeController.DefaultStore = &imagestore.ImageStore{}
metaDB := &boltdb.BoltDB{}
log := log.NewLogger("debug", "")

View file

@ -182,7 +182,7 @@ func TestVulnerableLayer(t *testing.T) {
tempDir := t.TempDir()
log := log.NewLogger("debug", "")
imageStore := local.NewImageStore(tempDir, false, 0, false, false,
imageStore := local.NewImageStore(tempDir, false, false, 0, 0, false, false,
log, monitoring.NewMetricsServer(false, log), nil, nil)
storeController := storage.StoreController{

View file

@ -1193,7 +1193,7 @@ func TestExpandedRepoInfo(t *testing.T) {
ctlr := api.NewController(conf)
imageStore := local.NewImageStore(tempDir, false, 0, false, false,
imageStore := local.NewImageStore(tempDir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
storeController := storage.StoreController{
@ -1325,8 +1325,8 @@ func TestExpandedRepoInfo(t *testing.T) {
log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
testStorage := local.NewImageStore(rootDir, false, storageConstants.DefaultGCDelay,
false, false, log, metrics, nil, nil)
testStorage := local.NewImageStore(rootDir, false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
resp, err := resty.R().Get(baseURL + "/v2/")
So(resp, ShouldNotBeNil)
@ -1671,7 +1671,7 @@ func TestExpandedRepoInfo(t *testing.T) {
conf.Extensions.Search.CVE = nil
ctlr := api.NewController(conf)
imageStore := local.NewImageStore(conf.Storage.RootDirectory, false, 0, false, false,
imageStore := local.NewImageStore(conf.Storage.RootDirectory, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
storeController := storage.StoreController{
@ -5420,8 +5420,8 @@ func TestMetaDBWhenDeletingImages(t *testing.T) {
// get signatur digest
log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
storage := local.NewImageStore(dir, false, storageConstants.DefaultGCDelay,
false, false, log, metrics, nil, nil)
storage := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
indexBlob, err := storage.GetIndexContent(repo)
So(err, ShouldBeNil)
@ -5497,8 +5497,8 @@ func TestMetaDBWhenDeletingImages(t *testing.T) {
// get signatur digest
log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
storage := local.NewImageStore(dir, false, storageConstants.DefaultGCDelay,
false, false, log, metrics, nil, nil)
storage := local.NewImageStore(dir, false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil)
indexBlob, err := storage.GetIndexContent(repo)
So(err, ShouldBeNil)

View file

@ -543,7 +543,7 @@ func TestChangingRepoState(t *testing.T) {
}
// ------ Create the test repos
defaultStore := local.NewImageStore(conf.Storage.RootDirectory, false, 0, false, false,
defaultStore := local.NewImageStore(conf.Storage.RootDirectory, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
err = WriteImageToFileSystem(img, accesibleRepo, "tag", storage.StoreController{

View file

@ -2,7 +2,8 @@ package constants
// references type.
const (
Oras = "OrasReference"
Cosign = "CosignSignature"
OCI = "OCIReference"
Oras = "OrasReference"
Cosign = "CosignSignature"
OCI = "OCIReference"
SyncBlobUploadDir = ".sync"
)

View file

@ -281,8 +281,9 @@ func getImageStoreFromImageReference(imageReference types.ImageReference, repo,
metrics := monitoring.NewMetricsServer(false, log.Logger{})
tempImageStore := local.NewImageStore(tempRootDir, false,
storageConstants.DefaultGCDelay, false, false, log.Logger{}, metrics, nil, nil)
tempImageStore := local.NewImageStore(tempRootDir, false, false,
storageConstants.DefaultGCDelay, storageConstants.DefaultUntaggedImgeRetentionDelay,
false, false, log.Logger{}, metrics, nil, nil)
return tempImageStore
}

View file

@ -12,6 +12,7 @@ import (
"github.com/containers/image/v5/types"
"github.com/gofrs/uuid"
"zotregistry.io/zot/pkg/extensions/sync/constants"
"zotregistry.io/zot/pkg/storage"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/test/inject"
@ -39,7 +40,7 @@ func (oci OciLayoutStorageImpl) GetContext() *types.SystemContext {
func (oci OciLayoutStorageImpl) GetImageReference(repo string, reference string) (types.ImageReference, error) {
localImageStore := oci.storeController.GetImageStore(repo)
tempSyncPath := path.Join(localImageStore.RootDir(), repo, SyncBlobUploadDir)
tempSyncPath := path.Join(localImageStore.RootDir(), repo, constants.SyncBlobUploadDir)
// create session folder
uuid, err := uuid.NewV4()

View file

@ -68,8 +68,8 @@ func TestInjectSyncUtils(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
imageStore := local.NewImageStore(t.TempDir(), false, storageConstants.DefaultGCDelay,
false, false, log, metrics, nil, nil,
imageStore := local.NewImageStore(t.TempDir(), false, false, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, false, log, metrics, nil, nil,
)
injected = inject.InjectFailure(0)
@ -182,8 +182,8 @@ func TestLocalRegistry(t *testing.T) {
UseRelPaths: true,
}, log)
syncImgStore := local.NewImageStore(dir, true, storageConstants.DefaultGCDelay,
true, true, log, metrics, nil, cacheDriver)
syncImgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
repoName := "repo"
registry := NewLocalRegistry(storage.StoreController{DefaultStore: syncImgStore}, nil, log)
@ -300,8 +300,8 @@ func TestLocalRegistry(t *testing.T) {
MandatoryAnnotations: []string{"annot1"},
}, log)
syncImgStore := local.NewImageStore(dir, true, storageConstants.DefaultGCDelay,
true, true, log, metrics, linter, cacheDriver)
syncImgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, linter, cacheDriver)
repoName := "repo"
registry := NewLocalRegistry(storage.StoreController{DefaultStore: syncImgStore}, nil, log)

View file

@ -43,6 +43,7 @@ import (
extconf "zotregistry.io/zot/pkg/extensions/config"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
"zotregistry.io/zot/pkg/extensions/sync"
syncConstants "zotregistry.io/zot/pkg/extensions/sync/constants"
"zotregistry.io/zot/pkg/log"
mTypes "zotregistry.io/zot/pkg/meta/types"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
@ -591,7 +592,7 @@ func TestOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
err = os.MkdirAll(path.Join(destDir, testImage, sync.SyncBlobUploadDir), 0o000)
err = os.MkdirAll(path.Join(destDir, testImage, syncConstants.SyncBlobUploadDir), 0o000)
if err != nil {
panic(err)
}
@ -604,7 +605,7 @@ func TestOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
err = os.Chmod(path.Join(destDir, testImage, sync.SyncBlobUploadDir), 0o755)
err = os.Chmod(path.Join(destDir, testImage, syncConstants.SyncBlobUploadDir), 0o755)
if err != nil {
panic(err)
}
@ -1687,7 +1688,7 @@ func TestPermsDenied(t *testing.T) {
defer dcm.StopServer()
syncSubDir := path.Join(destDir, testImage, sync.SyncBlobUploadDir)
syncSubDir := path.Join(destDir, testImage, syncConstants.SyncBlobUploadDir)
err := os.MkdirAll(syncSubDir, 0o755)
So(err, ShouldBeNil)
@ -1698,7 +1699,7 @@ func TestPermsDenied(t *testing.T) {
dcm.StartAndWait(destPort)
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"couldn't get a local image reference", 20*time.Second)
"couldn't get a local image reference", 50*time.Second)
if err != nil {
panic(err)
}
@ -4911,7 +4912,7 @@ func TestOnDemandPullsOnce(t *testing.T) {
done := make(chan bool)
var maxLen int
syncBlobUploadDir := path.Join(destDir, testImage, sync.SyncBlobUploadDir)
syncBlobUploadDir := path.Join(destDir, testImage, syncConstants.SyncBlobUploadDir)
go func() {
for {
@ -4994,7 +4995,7 @@ func TestError(t *testing.T) {
}()
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"finished syncing all repos", 15*time.Second)
"couldn't commit image to local image store", 30*time.Second)
if err != nil {
panic(err)
}
@ -6531,7 +6532,7 @@ func pushRepo(url, repoName string) godigest.Digest {
func waitSync(rootDir, repoName string) {
// wait for .sync subdirs to be removed
for {
dirs, err := os.ReadDir(path.Join(rootDir, repoName, sync.SyncBlobUploadDir))
dirs, err := os.ReadDir(path.Join(rootDir, repoName, syncConstants.SyncBlobUploadDir))
if err == nil && len(dirs) == 0 {
// stop watching /.sync/ subdirs
return

View file

@ -29,10 +29,6 @@ import (
"zotregistry.io/zot/pkg/test/inject"
)
const (
SyncBlobUploadDir = ".sync"
)
// Get sync.FileCredentials from file.
func getFileCredentials(filepath string) (syncconf.CredentialsFile, error) {
credsFile, err := os.ReadFile(filepath)

View file

@ -31,8 +31,8 @@ func TestOnUpdateManifest(t *testing.T) {
storeController := storage.StoreController{}
log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
storeController.DefaultStore = local.NewImageStore(rootDir, true, 1*time.Second,
true, true, log, metrics, nil, nil,
storeController.DefaultStore = local.NewImageStore(rootDir, true, true, 1*time.Second,
1*time.Second, true, true, log, metrics, nil, nil,
)
params := boltdb.DBParameters{
@ -72,8 +72,8 @@ func TestOnUpdateManifest(t *testing.T) {
storeController := storage.StoreController{}
log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
storeController.DefaultStore = local.NewImageStore(rootDir, true, 1*time.Second,
true, true, log, metrics, nil, nil,
storeController.DefaultStore = local.NewImageStore(rootDir, true, true, 1*time.Second,
1*time.Second, true, true, log, metrics, nil, nil,
)
metaDB := mocks.MetaDBMock{

View file

@ -399,7 +399,7 @@ func TestParseStorageDynamoWrapper(t *testing.T) {
func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB) {
Convey("Test with simple case", func() {
imageStore := local.NewImageStore(rootDir, false, 0, false, false,
imageStore := local.NewImageStore(rootDir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
storeController := storage.StoreController{DefaultStore: imageStore}
@ -485,7 +485,7 @@ func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB) {
})
Convey("Accept orphan signatures", func() {
imageStore := local.NewImageStore(rootDir, false, 0, false, false,
imageStore := local.NewImageStore(rootDir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
storeController := storage.StoreController{DefaultStore: imageStore}
@ -542,7 +542,7 @@ func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB) {
})
Convey("Check statistics after load", func() {
imageStore := local.NewImageStore(rootDir, false, 0, false, false,
imageStore := local.NewImageStore(rootDir, false, false, 0, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil)
storeController := storage.StoreController{DefaultStore: imageStore}

View file

@ -77,6 +77,10 @@ func NewBoltDBCache(parameters interface{}, log zlog.Logger) Cache {
}
}
func (d *BoltDBDriver) UsesRelativePaths() bool {
return d.useRelPaths
}
func (d *BoltDBDriver) Name() string {
return "boltdb"
}

View file

@ -19,4 +19,7 @@ type Cache interface {
// Delete a blob from the cachedb.
DeleteBlob(digest godigest.Digest, path string) error
// UsesRelativePaths returns if cache is storing blobs relative to cache rootDir
UsesRelativePaths() bool
}

View file

@ -99,6 +99,10 @@ func NewDynamoDBCache(parameters interface{}, log zlog.Logger) Cache {
return driver
}
func (d *DynamoDBDriver) UsesRelativePaths() bool {
return false
}
func (d *DynamoDBDriver) Name() string {
return "dynamodb"
}

View file

@ -35,6 +35,8 @@ func TestCache(t *testing.T) {
}, log)
So(cacheDriver, ShouldNotBeNil)
So(cacheDriver.UsesRelativePaths(), ShouldBeTrue)
name := cacheDriver.Name()
So(name, ShouldEqual, "boltdb")

View file

@ -5,22 +5,22 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"path"
"strings"
"time"
"github.com/docker/distribution/registry/storage/driver"
notreg "github.com/notaryproject/notation-go/registry"
godigest "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/schema"
imeta "github.com/opencontainers/image-spec/specs-go"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
oras "github.com/oras-project/artifacts-spec/specs-go/v1"
"github.com/rs/zerolog"
zerr "zotregistry.io/zot/errors"
zcommon "zotregistry.io/zot/pkg/common"
zlog "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/scheduler"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
storageTypes "zotregistry.io/zot/pkg/storage/types"
@ -62,7 +62,7 @@ func GetManifestDescByReference(index ispec.Index, reference string) (ispec.Desc
}
func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaType string, body []byte,
log zerolog.Logger,
log zlog.Logger,
) (godigest.Digest, error) {
// validate the manifest
if !IsSupportedMediaType(mediaType) {
@ -105,7 +105,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
continue
}
ok, _, err := imgStore.StatBlob(repo, layer.Digest)
ok, _, _, err := imgStore.StatBlob(repo, layer.Digest)
if !ok || err != nil {
log.Error().Err(err).Str("digest", layer.Digest.String()).Msg("missing layer blob")
@ -136,7 +136,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
}
for _, manifest := range indexManifest.Manifests {
if ok, _, err := imgStore.StatBlob(repo, manifest.Digest); !ok || err != nil {
if ok, _, _, err := imgStore.StatBlob(repo, manifest.Digest); !ok || err != nil {
log.Error().Err(err).Str("digest", manifest.Digest.String()).Msg("missing manifest blob")
return "", zerr.ErrBadManifest
@ -147,7 +147,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
return "", nil
}
func GetAndValidateRequestDigest(body []byte, digestStr string, log zerolog.Logger) (godigest.Digest, error) {
func GetAndValidateRequestDigest(body []byte, digestStr string, log zlog.Logger) (godigest.Digest, error) {
bodyDigest := godigest.FromBytes(body)
d, err := godigest.Parse(digestStr)
@ -169,7 +169,7 @@ CheckIfIndexNeedsUpdate verifies if an index needs to be updated given a new man
Returns whether or not index needs update, in the latter case it will also return the previous digest.
*/
func CheckIfIndexNeedsUpdate(index *ispec.Index, desc *ispec.Descriptor,
log zerolog.Logger,
log zlog.Logger,
) (bool, godigest.Digest, error) {
var oldDgst godigest.Digest
@ -242,11 +242,15 @@ func CheckIfIndexNeedsUpdate(index *ispec.Index, desc *ispec.Descriptor,
}
// GetIndex returns the contents of index.json.
func GetIndex(imgStore storageTypes.ImageStore, repo string, log zerolog.Logger) (ispec.Index, error) {
func GetIndex(imgStore storageTypes.ImageStore, repo string, log zlog.Logger) (ispec.Index, error) {
var index ispec.Index
buf, err := imgStore.GetIndexContent(repo)
if err != nil {
if errors.As(err, &driver.PathNotFoundError{}) {
return index, zerr.ErrRepoNotFound
}
return index, err
}
@ -260,7 +264,7 @@ func GetIndex(imgStore storageTypes.ImageStore, repo string, log zerolog.Logger)
}
// GetImageIndex returns a multiarch type image.
func GetImageIndex(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, log zerolog.Logger,
func GetImageIndex(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, log zlog.Logger,
) (ispec.Index, error) {
var imageIndex ispec.Index
@ -285,7 +289,7 @@ func GetImageIndex(imgStore storageTypes.ImageStore, repo string, digest godiges
return imageIndex, nil
}
func GetImageManifest(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, log zerolog.Logger,
func GetImageManifest(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, log zlog.Logger,
) (ispec.Manifest, error) {
var manifestContent ispec.Manifest
@ -352,7 +356,7 @@ index, ensure that they do not have a name or they are not in other
manifest indexes else GC can never clean them.
*/
func UpdateIndexWithPrunedImageManifests(imgStore storageTypes.ImageStore, index *ispec.Index, repo string,
desc ispec.Descriptor, oldDgst godigest.Digest, log zerolog.Logger,
desc ispec.Descriptor, oldDgst godigest.Digest, log zlog.Logger,
) error {
if (desc.MediaType == ispec.MediaTypeImageIndex) && (oldDgst != "") {
otherImgIndexes := []ispec.Descriptor{}
@ -385,7 +389,7 @@ same constitutent manifests so that they can be garbage-collected correctly
PruneImageManifestsFromIndex is a helper routine to achieve this.
*/
func PruneImageManifestsFromIndex(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, //nolint:gocyclo,lll
outIndex ispec.Index, otherImgIndexes []ispec.Descriptor, log zerolog.Logger,
outIndex ispec.Index, otherImgIndexes []ispec.Descriptor, log zlog.Logger,
) ([]ispec.Descriptor, error) {
dir := path.Join(imgStore.RootDir(), repo)
@ -459,8 +463,8 @@ func PruneImageManifestsFromIndex(imgStore storageTypes.ImageStore, repo string,
return prunedManifests, nil
}
func isBlobReferencedInManifest(imgStore storageTypes.ImageStore, repo string,
bdigest, mdigest godigest.Digest, log zerolog.Logger,
func isBlobReferencedInImageManifest(imgStore storageTypes.ImageStore, repo string,
bdigest, mdigest godigest.Digest, log zlog.Logger,
) (bool, error) {
if bdigest == mdigest {
return true, nil
@ -487,16 +491,14 @@ func isBlobReferencedInManifest(imgStore storageTypes.ImageStore, repo string,
return false, nil
}
func isBlobReferencedInImageIndex(imgStore storageTypes.ImageStore, repo string,
digest godigest.Digest, index ispec.Index, log zerolog.Logger,
func IsBlobReferencedInImageIndex(imgStore storageTypes.ImageStore, repo string,
digest godigest.Digest, index ispec.Index, log zlog.Logger,
) (bool, error) {
for _, desc := range index.Manifests {
var found bool
switch desc.MediaType {
case ispec.MediaTypeImageIndex:
/* this branch is not needed, because every manifests in index is already checked
when this one is hit, all manifests are referenced in index.json */
indexImage, err := GetImageIndex(imgStore, repo, desc.Digest, log)
if err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
@ -505,9 +507,9 @@ func isBlobReferencedInImageIndex(imgStore storageTypes.ImageStore, repo string,
return false, err
}
found, _ = isBlobReferencedInImageIndex(imgStore, repo, digest, indexImage, log)
found, _ = IsBlobReferencedInImageIndex(imgStore, repo, digest, indexImage, log)
case ispec.MediaTypeImageManifest:
found, _ = isBlobReferencedInManifest(imgStore, repo, digest, desc.Digest, log)
found, _ = isBlobReferencedInImageManifest(imgStore, repo, digest, desc.Digest, log)
}
if found {
@ -519,7 +521,7 @@ func isBlobReferencedInImageIndex(imgStore storageTypes.ImageStore, repo string,
}
func IsBlobReferenced(imgStore storageTypes.ImageStore, repo string,
digest godigest.Digest, log zerolog.Logger,
digest godigest.Digest, log zlog.Logger,
) (bool, error) {
dir := path.Join(imgStore.RootDir(), repo)
if !imgStore.DirExists(dir) {
@ -531,7 +533,133 @@ func IsBlobReferenced(imgStore storageTypes.ImageStore, repo string,
return false, err
}
return isBlobReferencedInImageIndex(imgStore, repo, digest, index, log)
return IsBlobReferencedInImageIndex(imgStore, repo, digest, index, log)
}
/* Garbage Collection */
func AddImageManifestBlobsToReferences(imgStore storageTypes.ImageStore,
repo string, mdigest godigest.Digest, refBlobs map[string]bool, log zlog.Logger,
) error {
manifestContent, err := GetImageManifest(imgStore, repo, mdigest, log)
if err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
Msg("gc: failed to read manifest image")
return err
}
refBlobs[mdigest.String()] = true
refBlobs[manifestContent.Config.Digest.String()] = true
// if there is a Subject, it may not exist yet and that is ok
if manifestContent.Subject != nil {
refBlobs[manifestContent.Subject.Digest.String()] = true
}
for _, layer := range manifestContent.Layers {
refBlobs[layer.Digest.String()] = true
}
return nil
}
func AddORASImageManifestBlobsToReferences(imgStore storageTypes.ImageStore,
repo string, mdigest godigest.Digest, refBlobs map[string]bool, log zlog.Logger,
) error {
manifestContent, err := GetOrasManifestByDigest(imgStore, repo, mdigest, log)
if err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
Msg("gc: failed to read manifest image")
return err
}
refBlobs[mdigest.String()] = true
// if there is a Subject, it may not exist yet and that is ok
if manifestContent.Subject != nil {
refBlobs[manifestContent.Subject.Digest.String()] = true
}
for _, blob := range manifestContent.Blobs {
refBlobs[blob.Digest.String()] = true
}
return nil
}
func AddImageIndexBlobsToReferences(imgStore storageTypes.ImageStore,
repo string, mdigest godigest.Digest, refBlobs map[string]bool, log zlog.Logger,
) error {
index, err := GetImageIndex(imgStore, repo, mdigest, log)
if err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
Msg("gc: failed to read manifest image")
return err
}
refBlobs[mdigest.String()] = true
// if there is a Subject, it may not exist yet and that is ok
if index.Subject != nil {
refBlobs[index.Subject.Digest.String()] = true
}
for _, manifest := range index.Manifests {
refBlobs[manifest.Digest.String()] = true
}
return nil
}
func AddIndexBlobToReferences(imgStore storageTypes.ImageStore,
repo string, index ispec.Index, refBlobs map[string]bool, log zlog.Logger,
) error {
for _, desc := range index.Manifests {
switch desc.MediaType {
case ispec.MediaTypeImageIndex:
if err := AddImageIndexBlobsToReferences(imgStore, repo, desc.Digest, refBlobs, log); err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
Msg("failed to read blobs in multiarch(index) image")
return err
}
case ispec.MediaTypeImageManifest:
if err := AddImageManifestBlobsToReferences(imgStore, repo, desc.Digest, refBlobs, log); err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
Msg("failed to read blobs in image manifest")
return err
}
case oras.MediaTypeArtifactManifest:
if err := AddORASImageManifestBlobsToReferences(imgStore, repo, desc.Digest, refBlobs, log); err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
Msg("failed to read blobs in image manifest")
return err
}
}
}
return nil
}
func AddRepoBlobsToReferences(imgStore storageTypes.ImageStore,
repo string, refBlobs map[string]bool, log zlog.Logger,
) error {
dir := path.Join(imgStore.RootDir(), repo)
if !imgStore.DirExists(dir) {
return zerr.ErrRepoNotFound
}
index, err := GetIndex(imgStore, repo, log)
if err != nil {
return err
}
return AddIndexBlobToReferences(imgStore, repo, index, refBlobs, log)
}
func ApplyLinter(imgStore storageTypes.ImageStore, linter Lint, repo string, descriptor ispec.Descriptor,
@ -580,7 +708,7 @@ func IsSignature(descriptor ispec.Descriptor) bool {
}
func GetOrasReferrers(imgStore storageTypes.ImageStore, repo string, gdigest godigest.Digest, artifactType string,
log zerolog.Logger,
log zlog.Logger,
) ([]oras.Descriptor, error) {
if err := gdigest.Validate(); err != nil {
return nil, err
@ -638,7 +766,7 @@ func GetOrasReferrers(imgStore storageTypes.ImageStore, repo string, gdigest god
}
func GetReferrers(imgStore storageTypes.ImageStore, repo string, gdigest godigest.Digest, artifactTypes []string,
log zerolog.Logger,
log zlog.Logger,
) (ispec.Index, error) {
nilIndex := ispec.Index{}
@ -741,7 +869,7 @@ func GetReferrers(imgStore storageTypes.ImageStore, repo string, gdigest godiges
return index, nil
}
func GetOrasManifestByDigest(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, log zerolog.Logger,
func GetOrasManifestByDigest(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, log zlog.Logger,
) (oras.Manifest, error) {
var artManifest oras.Manifest
@ -827,7 +955,7 @@ type DedupeTaskGenerator struct {
and generating a task for each unprocessed one*/
lastDigests []godigest.Digest
done bool
Log zerolog.Logger
Log zlog.Logger
}
func (gen *DedupeTaskGenerator) Next() (scheduler.Task, error) {
@ -879,11 +1007,11 @@ type dedupeTask struct {
// blobs paths with the same digest ^
duplicateBlobs []string
dedupe bool
log zerolog.Logger
log zlog.Logger
}
func newDedupeTask(imgStore storageTypes.ImageStore, digest godigest.Digest, dedupe bool,
duplicateBlobs []string, log zerolog.Logger,
duplicateBlobs []string, log zlog.Logger,
) *dedupeTask {
return &dedupeTask{imgStore, digest, duplicateBlobs, dedupe, log}
}
@ -929,8 +1057,7 @@ func (gen *GCTaskGenerator) Next() (scheduler.Task, error) {
gen.nextRun = time.Now().Add(time.Duration(delay) * time.Second)
repo, err := gen.ImgStore.GetNextRepository(gen.lastRepo)
if err != nil && !errors.Is(err, io.EOF) {
if err != nil {
return nil, err
}

View file

@ -5,6 +5,7 @@ import (
"encoding/json"
"errors"
"os"
"path"
"testing"
godigest "github.com/opencontainers/go-digest"
@ -36,8 +37,8 @@ func TestValidateManifest(t *testing.T) {
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storageConstants.DefaultGCDelay, true,
true, log, metrics, nil, cacheDriver)
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
content := []byte("this is a blob")
digest := godigest.FromBytes(content)
@ -81,6 +82,37 @@ func TestValidateManifest(t *testing.T) {
So(internalErr.GetDetails()["jsonSchemaValidation"], ShouldEqual, "[schemaVersion: Must be less than or equal to 2]")
})
Convey("bad config blob", func() {
manifest := ispec.Manifest{
Config: ispec.Descriptor{
MediaType: ispec.MediaTypeImageConfig,
Digest: cdigest,
Size: int64(len(cblob)),
},
Layers: []ispec.Descriptor{
{
MediaType: ispec.MediaTypeImageLayer,
Digest: digest,
Size: int64(len(content)),
},
},
}
manifest.SchemaVersion = 2
configBlobPath := imgStore.BlobPath("test", cdigest)
err := os.WriteFile(configBlobPath, []byte("bad config blob"), 0o000)
So(err, ShouldBeNil)
body, err := json.Marshal(manifest)
So(err, ShouldBeNil)
// this was actually an umoci error on config blob
_, _, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, body)
So(err, ShouldBeNil)
})
Convey("manifest with non-distributable layers", func() {
content := []byte("this blob doesn't exist")
digest := godigest.FromBytes(content)
@ -124,29 +156,29 @@ func TestGetReferrersErrors(t *testing.T) {
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storageConstants.DefaultGCDelay, false,
true, log, metrics, nil, cacheDriver)
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, false, true, log, metrics, nil, cacheDriver)
artifactType := "application/vnd.example.icecream.v1"
validDigest := godigest.FromBytes([]byte("blob"))
Convey("Trigger invalid digest error", func(c C) {
_, err := common.GetReferrers(imgStore, "zot-test", "invalidDigest",
[]string{artifactType}, log.With().Caller().Logger())
[]string{artifactType}, log)
So(err, ShouldNotBeNil)
_, err = common.GetOrasReferrers(imgStore, "zot-test", "invalidDigest",
artifactType, log.With().Caller().Logger())
artifactType, log)
So(err, ShouldNotBeNil)
})
Convey("Trigger repo not found error", func(c C) {
_, err := common.GetReferrers(imgStore, "zot-test", validDigest,
[]string{artifactType}, log.With().Caller().Logger())
[]string{artifactType}, log)
So(err, ShouldNotBeNil)
_, err = common.GetOrasReferrers(imgStore, "zot-test", validDigest,
artifactType, log.With().Caller().Logger())
artifactType, log)
So(err, ShouldNotBeNil)
})
@ -179,11 +211,11 @@ func TestGetReferrersErrors(t *testing.T) {
}
_, err = common.GetReferrers(imgStore, "zot-test", validDigest,
[]string{artifactType}, log.With().Caller().Logger())
[]string{artifactType}, log)
So(err, ShouldNotBeNil)
_, err = common.GetOrasReferrers(imgStore, "zot-test", validDigest,
artifactType, log.With().Caller().Logger())
artifactType, log)
So(err, ShouldNotBeNil)
})
@ -198,11 +230,11 @@ func TestGetReferrersErrors(t *testing.T) {
}
_, err = common.GetReferrers(imgStore, "zot-test", validDigest,
[]string{artifactType}, log.With().Caller().Logger())
[]string{artifactType}, log)
So(err, ShouldNotBeNil)
_, err = common.GetOrasReferrers(imgStore, "zot-test", validDigest,
artifactType, log.With().Caller().Logger())
artifactType, log)
So(err, ShouldNotBeNil)
})
@ -227,11 +259,11 @@ func TestGetReferrersErrors(t *testing.T) {
}
_, err = common.GetOrasReferrers(imgStore, "zot-test", validDigest,
artifactType, log.With().Caller().Logger())
artifactType, log)
So(err, ShouldNotBeNil)
_, err = common.GetOrasReferrers(imgStore, "zot-test", digest,
artifactType, log.With().Caller().Logger())
artifactType, log)
So(err, ShouldNotBeNil)
})
@ -245,7 +277,7 @@ func TestGetReferrersErrors(t *testing.T) {
},
}
_, err = common.GetOrasReferrers(imgStore, "zot-test", validDigest, artifactType, log.With().Caller().Logger())
_, err = common.GetOrasReferrers(imgStore, "zot-test", validDigest, artifactType, log)
So(err, ShouldNotBeNil)
})
@ -272,7 +304,7 @@ func TestGetReferrersErrors(t *testing.T) {
}
_, err = common.GetReferrers(imgStore, "zot-test", validDigest,
[]string{artifactType}, log.With().Caller().Logger())
[]string{artifactType}, log)
So(err, ShouldNotBeNil)
})
@ -306,7 +338,7 @@ func TestGetReferrersErrors(t *testing.T) {
}
_, err = common.GetReferrers(imgStore, "zot-test", validDigest,
[]string{artifactType}, log.With().Caller().Logger())
[]string{artifactType}, log)
So(err, ShouldBeNil)
})
@ -326,7 +358,7 @@ func TestGetReferrersErrors(t *testing.T) {
}
_, err = common.GetReferrers(imgStore, "zot-test", validDigest,
[]string{}, log.With().Caller().Logger())
[]string{}, log)
So(err, ShouldNotBeNil)
})
@ -348,7 +380,7 @@ func TestGetReferrersErrors(t *testing.T) {
}
ref, err := common.GetReferrers(imgStore, "zot-test", validDigest,
[]string{"art.type"}, log.With().Caller().Logger())
[]string{"art.type"}, log)
So(err, ShouldBeNil)
So(len(ref.Manifests), ShouldEqual, 0)
})
@ -356,7 +388,7 @@ func TestGetReferrersErrors(t *testing.T) {
}
func TestGetImageIndexErrors(t *testing.T) {
log := zerolog.New(os.Stdout)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
Convey("Trigger invalid digest error", t, func(c C) {
imgStore := &mocks.MockedImageStore{}
@ -400,3 +432,193 @@ func TestIsSignature(t *testing.T) {
So(isSingature, ShouldBeFalse)
})
}
func TestGarbageCollectManifestErrors(t *testing.T) {
Convey("Make imagestore and upload manifest", t, func(c C) {
dir := t.TempDir()
repoName := "test"
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
Convey("trigger repo not found in GetReferencedBlobs()", func() {
err := common.AddRepoBlobsToReferences(imgStore, repoName, map[string]bool{}, log)
So(err, ShouldNotBeNil)
})
content := []byte("this is a blob")
digest := godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
_, blen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(content), digest)
So(err, ShouldBeNil)
So(blen, ShouldEqual, len(content))
cblob, cdigest := test.GetRandomImageConfig()
_, clen, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(cblob), cdigest)
So(err, ShouldBeNil)
So(clen, ShouldEqual, len(cblob))
manifest := ispec.Manifest{
Config: ispec.Descriptor{
MediaType: ispec.MediaTypeImageConfig,
Digest: cdigest,
Size: int64(len(cblob)),
},
Layers: []ispec.Descriptor{
{
MediaType: ispec.MediaTypeImageLayer,
Digest: digest,
Size: int64(len(content)),
},
},
}
manifest.SchemaVersion = 2
body, err := json.Marshal(manifest)
So(err, ShouldBeNil)
manifestDigest := godigest.FromBytes(body)
_, _, err = imgStore.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, body)
So(err, ShouldBeNil)
Convey("trigger GetIndex error in GetReferencedBlobs", func() {
err := os.Chmod(path.Join(imgStore.RootDir(), repoName), 0o000)
So(err, ShouldBeNil)
defer func() {
err := os.Chmod(path.Join(imgStore.RootDir(), repoName), 0o755)
So(err, ShouldBeNil)
}()
err = common.AddRepoBlobsToReferences(imgStore, repoName, map[string]bool{}, log)
So(err, ShouldNotBeNil)
})
Convey("trigger GetImageManifest error in GetReferencedBlobsInImageManifest", func() {
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", manifestDigest.Encoded()), 0o000)
So(err, ShouldBeNil)
defer func() {
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", manifestDigest.Encoded()), 0o755)
So(err, ShouldBeNil)
}()
err = common.AddRepoBlobsToReferences(imgStore, repoName, map[string]bool{}, log)
So(err, ShouldNotBeNil)
})
})
}
func TestGarbageCollectIndexErrors(t *testing.T) {
Convey("Make imagestore and upload manifest", t, func(c C) {
dir := t.TempDir()
repoName := "test"
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
content := []byte("this is a blob")
bdgst := godigest.FromBytes(content)
So(bdgst, ShouldNotBeNil)
_, bsize, err := imgStore.FullBlobUpload(repoName, bytes.NewReader(content), bdgst)
So(err, ShouldBeNil)
So(bsize, ShouldEqual, len(content))
var index ispec.Index
index.SchemaVersion = 2
index.MediaType = ispec.MediaTypeImageIndex
var digest godigest.Digest
for i := 0; i < 4; i++ {
// upload image config blob
upload, err := imgStore.NewBlobUpload(repoName)
So(err, ShouldBeNil)
So(upload, ShouldNotBeEmpty)
cblob, cdigest := test.GetRandomImageConfig()
buf := bytes.NewBuffer(cblob)
buflen := buf.Len()
blob, err := imgStore.PutBlobChunkStreamed(repoName, upload, buf)
So(err, ShouldBeNil)
So(blob, ShouldEqual, buflen)
err = imgStore.FinishBlobUpload(repoName, upload, buf, cdigest)
So(err, ShouldBeNil)
So(blob, ShouldEqual, buflen)
// create a manifest
manifest := ispec.Manifest{
Config: ispec.Descriptor{
MediaType: ispec.MediaTypeImageConfig,
Digest: cdigest,
Size: int64(len(cblob)),
},
Layers: []ispec.Descriptor{
{
MediaType: ispec.MediaTypeImageLayer,
Digest: bdgst,
Size: bsize,
},
},
}
manifest.SchemaVersion = 2
content, err = json.Marshal(manifest)
So(err, ShouldBeNil)
digest = godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
_, _, err = imgStore.PutImageManifest(repoName, digest.String(), ispec.MediaTypeImageManifest, content)
So(err, ShouldBeNil)
index.Manifests = append(index.Manifests, ispec.Descriptor{
Digest: digest,
MediaType: ispec.MediaTypeImageManifest,
Size: int64(len(content)),
})
}
// upload index image
indexContent, err := json.Marshal(index)
So(err, ShouldBeNil)
indexDigest := godigest.FromBytes(indexContent)
So(indexDigest, ShouldNotBeNil)
_, _, err = imgStore.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageIndex, indexContent)
So(err, ShouldBeNil)
err = common.AddRepoBlobsToReferences(imgStore, repoName, map[string]bool{}, log)
So(err, ShouldBeNil)
Convey("trigger GetImageIndex error in GetReferencedBlobsInImageIndex", func() {
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", indexDigest.Encoded()), 0o000)
So(err, ShouldBeNil)
defer func() {
err := os.Chmod(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", indexDigest.Encoded()), 0o755)
So(err, ShouldBeNil)
}()
err = common.AddRepoBlobsToReferences(imgStore, repoName, map[string]bool{}, log)
So(err, ShouldNotBeNil)
})
})
}

View file

@ -6,20 +6,22 @@ import (
const (
// BlobUploadDir defines the upload directory for blob uploads.
BlobUploadDir = ".uploads"
SchemaVersion = 2
DefaultFilePerms = 0o600
DefaultDirPerms = 0o700
RLOCK = "RLock"
RWLOCK = "RWLock"
BlobsCache = "blobs"
DuplicatesBucket = "duplicates"
OriginalBucket = "original"
DBExtensionName = ".db"
DBCacheLockCheckTimeout = 10 * time.Second
BoltdbName = "cache"
DynamoDBDriverName = "dynamodb"
DefaultGCDelay = 1 * time.Hour
DefaultGCInterval = 1 * time.Hour
S3StorageDriverName = "s3"
BlobUploadDir = ".uploads"
SchemaVersion = 2
DefaultFilePerms = 0o600
DefaultDirPerms = 0o700
RLOCK = "RLock"
RWLOCK = "RWLock"
BlobsCache = "blobs"
DuplicatesBucket = "duplicates"
OriginalBucket = "original"
DBExtensionName = ".db"
DBCacheLockCheckTimeout = 10 * time.Second
BoltdbName = "cache"
DynamoDBDriverName = "dynamodb"
DefaultGCDelay = 1 * time.Hour
DefaultUntaggedImgeRetentionDelay = 24 * time.Hour
DefaultGCInterval = 1 * time.Hour
S3StorageDriverName = "s3"
LocalStorageDriverName = "local"
)

File diff suppressed because it is too large Load diff

481
pkg/storage/local/driver.go Normal file
View file

@ -0,0 +1,481 @@
package local
import (
"bufio"
"bytes"
"errors"
"io"
"io/fs"
"os"
"path"
"sort"
"syscall"
"time"
"unicode/utf8"
storagedriver "github.com/docker/distribution/registry/storage/driver"
zerr "zotregistry.io/zot/errors"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/test/inject"
)
type Driver struct {
commit bool
}
func New(commit bool) *Driver {
return &Driver{commit: commit}
}
func (driver *Driver) Name() string {
return storageConstants.LocalStorageDriverName
}
func (driver *Driver) EnsureDir(path string) error {
err := os.MkdirAll(path, storageConstants.DefaultDirPerms)
return driver.formatErr(err)
}
func (driver *Driver) DirExists(path string) bool {
if !utf8.ValidString(path) {
return false
}
fileInfo, err := os.Stat(path)
if err != nil {
if e, ok := err.(*fs.PathError); ok && errors.Is(e.Err, syscall.ENAMETOOLONG) || //nolint: errorlint
errors.Is(e.Err, syscall.EINVAL) {
return false
}
}
if err != nil && os.IsNotExist(err) {
return false
}
if !fileInfo.IsDir() {
return false
}
return true
}
func (driver *Driver) Reader(path string, offset int64) (io.ReadCloser, error) {
file, err := os.OpenFile(path, os.O_RDONLY, storageConstants.DefaultFilePerms)
if err != nil {
if os.IsNotExist(err) {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, driver.formatErr(err)
}
seekPos, err := file.Seek(offset, io.SeekStart)
if err != nil {
file.Close()
return nil, driver.formatErr(err)
} else if seekPos < offset {
file.Close()
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
}
return file, nil
}
func (driver *Driver) ReadFile(path string) ([]byte, error) {
reader, err := driver.Reader(path, 0)
if err != nil {
return nil, err
}
defer reader.Close()
buf, err := io.ReadAll(reader)
if err != nil {
return nil, driver.formatErr(err)
}
return buf, nil
}
func (driver *Driver) Delete(path string) error {
_, err := os.Stat(path)
if err != nil && !os.IsNotExist(err) {
return driver.formatErr(err)
} else if err != nil {
return storagedriver.PathNotFoundError{Path: path}
}
return os.RemoveAll(path)
}
func (driver *Driver) Stat(path string) (storagedriver.FileInfo, error) {
fi, err := os.Stat(path) //nolint: varnamelen
if err != nil {
if os.IsNotExist(err) {
return nil, storagedriver.PathNotFoundError{Path: path}
}
return nil, driver.formatErr(err)
}
return fileInfo{
path: path,
FileInfo: fi,
}, nil
}
func (driver *Driver) Writer(filepath string, append bool) (storagedriver.FileWriter, error) { //nolint:predeclared
if append {
_, err := os.Stat(filepath)
if err != nil {
if os.IsNotExist(err) {
return nil, storagedriver.PathNotFoundError{Path: filepath}
}
return nil, driver.formatErr(err)
}
}
parentDir := path.Dir(filepath)
if err := os.MkdirAll(parentDir, storageConstants.DefaultDirPerms); err != nil {
return nil, driver.formatErr(err)
}
file, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, storageConstants.DefaultFilePerms)
if err != nil {
return nil, driver.formatErr(err)
}
var offset int64
if !append {
err := file.Truncate(0)
if err != nil {
file.Close()
return nil, driver.formatErr(err)
}
} else {
n, err := file.Seek(0, io.SeekEnd) //nolint: varnamelen
if err != nil {
file.Close()
return nil, driver.formatErr(err)
}
offset = n
}
return newFileWriter(file, offset, driver.commit), nil
}
func (driver *Driver) WriteFile(filepath string, content []byte) (int, error) {
writer, err := driver.Writer(filepath, false)
if err != nil {
return -1, err
}
nbytes, err := io.Copy(writer, bytes.NewReader(content))
if err != nil {
_ = writer.Cancel()
return -1, driver.formatErr(err)
}
return int(nbytes), writer.Close()
}
func (driver *Driver) Walk(path string, walkFn storagedriver.WalkFn) error {
children, err := driver.List(path)
if err != nil {
return err
}
sort.Stable(sort.StringSlice(children))
for _, child := range children {
// Calling driver.Stat for every entry is quite
// expensive when running against backends with a slow Stat
// implementation, such as s3. This is very likely a serious
// performance bottleneck.
fileInfo, err := driver.Stat(child)
if err != nil {
switch errors.As(err, &storagedriver.PathNotFoundError{}) {
case true:
// repository was removed in between listing and enumeration. Ignore it.
continue
default:
return err
}
}
err = walkFn(fileInfo)
//nolint: gocritic
if err == nil && fileInfo.IsDir() {
if err := driver.Walk(child, walkFn); err != nil {
return err
}
} else if errors.Is(err, storagedriver.ErrSkipDir) {
// Stop iteration if it's a file, otherwise noop if it's a directory
if !fileInfo.IsDir() {
return nil
}
} else if err != nil {
return driver.formatErr(err)
}
}
return nil
}
func (driver *Driver) List(fullpath string) ([]string, error) {
dir, err := os.Open(fullpath)
if err != nil {
if os.IsNotExist(err) {
return nil, storagedriver.PathNotFoundError{Path: fullpath}
}
return nil, driver.formatErr(err)
}
defer dir.Close()
fileNames, err := dir.Readdirnames(0)
if err != nil {
return nil, driver.formatErr(err)
}
keys := make([]string, 0, len(fileNames))
for _, fileName := range fileNames {
keys = append(keys, path.Join(fullpath, fileName))
}
return keys, nil
}
func (driver *Driver) Move(sourcePath string, destPath string) error {
if _, err := os.Stat(sourcePath); os.IsNotExist(err) {
return storagedriver.PathNotFoundError{Path: sourcePath}
}
if err := os.MkdirAll(path.Dir(destPath), storageConstants.DefaultDirPerms); err != nil {
return driver.formatErr(err)
}
return driver.formatErr(os.Rename(sourcePath, destPath))
}
func (driver *Driver) SameFile(path1, path2 string) bool {
file1, err := os.Stat(path1)
if err != nil {
return false
}
file2, err := os.Stat(path2)
if err != nil {
return false
}
return os.SameFile(file1, file2)
}
func (driver *Driver) Link(src, dest string) error {
if err := os.Remove(dest); err != nil && !os.IsNotExist(err) {
return err
}
return driver.formatErr(os.Link(src, dest))
}
func (driver *Driver) formatErr(err error) error {
switch actual := err.(type) { //nolint: errorlint
case nil:
return nil
case storagedriver.PathNotFoundError:
actual.DriverName = driver.Name()
return actual
case storagedriver.InvalidPathError:
actual.DriverName = driver.Name()
return actual
case storagedriver.InvalidOffsetError:
actual.DriverName = driver.Name()
return actual
default:
storageError := storagedriver.Error{
DriverName: driver.Name(),
Enclosed: err,
}
return storageError
}
}
type fileInfo struct {
os.FileInfo
path string
}
// asserts fileInfo implements storagedriver.FileInfo.
var _ storagedriver.FileInfo = fileInfo{}
// Path provides the full path of the target of this file info.
func (fi fileInfo) Path() string {
return fi.path
}
// Size returns current length in bytes of the file. The return value can
// be used to write to the end of the file at path. The value is
// meaningless if IsDir returns true.
func (fi fileInfo) Size() int64 {
if fi.IsDir() {
return 0
}
return fi.FileInfo.Size()
}
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
func (fi fileInfo) ModTime() time.Time {
return fi.FileInfo.ModTime()
}
// IsDir returns true if the path is a directory.
func (fi fileInfo) IsDir() bool {
return fi.FileInfo.IsDir()
}
type fileWriter struct {
file *os.File
size int64
bw *bufio.Writer
closed bool
committed bool
cancelled bool
commit bool
}
func newFileWriter(file *os.File, size int64, commit bool) *fileWriter {
return &fileWriter{
file: file,
size: size,
commit: commit,
bw: bufio.NewWriter(file),
}
}
func (fw *fileWriter) Write(buf []byte) (int, error) {
//nolint: gocritic
if fw.closed {
return 0, zerr.ErrFileAlreadyClosed
} else if fw.committed {
return 0, zerr.ErrFileAlreadyCommitted
} else if fw.cancelled {
return 0, zerr.ErrFileAlreadyCancelled
}
n, err := fw.bw.Write(buf)
fw.size += int64(n)
return n, err
}
func (fw *fileWriter) Size() int64 {
return fw.size
}
func (fw *fileWriter) Close() error {
if fw.closed {
return zerr.ErrFileAlreadyClosed
}
if err := fw.bw.Flush(); err != nil {
return err
}
if fw.commit {
if err := inject.Error(fw.file.Sync()); err != nil {
return err
}
}
if err := inject.Error(fw.file.Close()); err != nil {
return err
}
fw.closed = true
return nil
}
func (fw *fileWriter) Cancel() error {
if fw.closed {
return zerr.ErrFileAlreadyClosed
}
fw.cancelled = true
fw.file.Close()
return os.Remove(fw.file.Name())
}
func (fw *fileWriter) Commit() error {
//nolint: gocritic
if fw.closed {
return zerr.ErrFileAlreadyClosed
} else if fw.committed {
return zerr.ErrFileAlreadyCommitted
} else if fw.cancelled {
return zerr.ErrFileAlreadyCancelled
}
if err := fw.bw.Flush(); err != nil {
return err
}
if fw.commit {
if err := fw.file.Sync(); err != nil {
return err
}
}
fw.committed = true
return nil
}
func ValidateHardLink(rootDir string) error {
if err := os.MkdirAll(rootDir, storageConstants.DefaultDirPerms); err != nil {
return err
}
err := os.WriteFile(path.Join(rootDir, "hardlinkcheck.txt"),
[]byte("check whether hardlinks work on filesystem"), storageConstants.DefaultFilePerms)
if err != nil {
return err
}
err = os.Link(path.Join(rootDir, "hardlinkcheck.txt"), path.Join(rootDir, "duphardlinkcheck.txt"))
if err != nil {
// Remove hardlinkcheck.txt if hardlink fails
zerr := os.RemoveAll(path.Join(rootDir, "hardlinkcheck.txt"))
if zerr != nil {
return zerr
}
return err
}
err = os.RemoveAll(path.Join(rootDir, "hardlinkcheck.txt"))
if err != nil {
return err
}
return os.RemoveAll(path.Join(rootDir, "duphardlinkcheck.txt"))
}

File diff suppressed because it is too large Load diff

View file

@ -36,8 +36,8 @@ func TestElevatedPrivilegesInvalidDedupe(t *testing.T) {
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storageConstants.DefaultGCDelay, true, true, log,
metrics, nil, cacheDriver)
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
upload, err := imgStore.NewBlobUpload("dedupe1")
So(err, ShouldBeNil)

File diff suppressed because it is too large Load diff

115
pkg/storage/s3/driver.go Normal file
View file

@ -0,0 +1,115 @@
package s3
import (
"context"
"io"
// Add s3 support.
"github.com/docker/distribution/registry/storage/driver"
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
)
type Driver struct {
store driver.StorageDriver
}
func New(storeDriver driver.StorageDriver) *Driver {
return &Driver{store: storeDriver}
}
func (driver *Driver) Name() string {
return storageConstants.S3StorageDriverName
}
func (driver *Driver) EnsureDir(path string) error {
return nil
}
func (driver *Driver) DirExists(path string) bool {
if fi, err := driver.store.Stat(context.Background(), path); err == nil && fi.IsDir() {
return true
}
return false
}
func (driver *Driver) Reader(path string, offset int64) (io.ReadCloser, error) {
return driver.store.Reader(context.Background(), path, offset)
}
func (driver *Driver) ReadFile(path string) ([]byte, error) {
return driver.store.GetContent(context.Background(), path)
}
func (driver *Driver) Delete(path string) error {
return driver.store.Delete(context.Background(), path)
}
func (driver *Driver) Stat(path string) (driver.FileInfo, error) {
return driver.store.Stat(context.Background(), path)
}
func (driver *Driver) Writer(filepath string, append bool) (driver.FileWriter, error) { //nolint:predeclared
return driver.store.Writer(context.Background(), filepath, append)
}
func (driver *Driver) WriteFile(filepath string, content []byte) (int, error) {
var n int
if stwr, err := driver.store.Writer(context.Background(), filepath, false); err == nil {
defer stwr.Close()
if n, err = stwr.Write(content); err != nil {
return -1, err
}
if err := stwr.Commit(); err != nil {
return -1, err
}
} else {
return -1, err
}
return n, nil
}
func (driver *Driver) Walk(path string, f driver.WalkFn) error {
return driver.store.Walk(context.Background(), path, f)
}
func (driver *Driver) List(fullpath string) ([]string, error) {
return driver.store.List(context.Background(), fullpath)
}
func (driver *Driver) Move(sourcePath string, destPath string) error {
return driver.store.Move(context.Background(), sourcePath, destPath)
}
func (driver *Driver) SameFile(path1, path2 string) bool {
fi1, _ := driver.store.Stat(context.Background(), path1)
fi2, _ := driver.store.Stat(context.Background(), path2)
if fi1 != nil && fi2 != nil {
if fi1.IsDir() == fi2.IsDir() &&
fi1.ModTime() == fi2.ModTime() &&
fi1.Path() == fi2.Path() &&
fi1.Size() == fi2.Size() {
return true
}
}
return false
}
/*
Link put an empty file that will act like a link between the original file and deduped one
because s3 doesn't support symlinks, wherever the storage will encounter an empty file, it will get the original one
from cache.
*/
func (driver *Driver) Link(src, dest string) error {
return driver.store.PutContent(context.Background(), dest, []byte{})
}

File diff suppressed because it is too large Load diff

View file

@ -77,15 +77,17 @@ func createMockStorage(rootDir string, cacheDir string, dedupe bool, store drive
var cacheDriver cache.Cache
// from pkg/cli/root.go/applyDefaultValues, s3 magic
if _, err := os.Stat(path.Join(cacheDir, "s3_cache.db")); dedupe || (!dedupe && err == nil) {
if _, err := os.Stat(path.Join(cacheDir,
storageConstants.BoltdbName+storageConstants.DBExtensionName)); dedupe || (!dedupe && err == nil) {
cacheDriver, _ = storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: cacheDir,
Name: "s3_cache",
Name: "cache",
UseRelPaths: false,
}, log)
}
il := s3.NewImageStore(rootDir, cacheDir, false, storageConstants.DefaultGCDelay,
dedupe, false, log, metrics, nil, store, cacheDriver,
il := s3.NewImageStore(rootDir, cacheDir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, dedupe, false, log, metrics, nil, store, cacheDriver,
)
return il
@ -97,8 +99,8 @@ func createMockStorageWithMockCache(rootDir string, dedupe bool, store driver.St
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := s3.NewImageStore(rootDir, "", false, storageConstants.DefaultGCDelay,
dedupe, false, log, metrics, nil, store, cacheDriver,
il := s3.NewImageStore(rootDir, "", true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, dedupe, false, log, metrics, nil, store, cacheDriver,
)
return il
@ -150,17 +152,17 @@ func createObjectsStore(rootDir string, cacheDir string, dedupe bool) (
var err error
// from pkg/cli/root.go/applyDefaultValues, s3 magic
s3CacheDBPath := path.Join(cacheDir, s3.CacheDBName+storageConstants.DBExtensionName)
s3CacheDBPath := path.Join(cacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName)
if _, err = os.Stat(s3CacheDBPath); dedupe || (!dedupe && err == nil) {
cacheDriver, _ = storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: cacheDir,
Name: "s3_cache",
Name: "cache",
UseRelPaths: false,
}, log)
}
il := s3.NewImageStore(rootDir, cacheDir, false, storageConstants.DefaultGCDelay,
dedupe, false, log, metrics, nil, store, cacheDriver)
il := s3.NewImageStore(rootDir, cacheDir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, dedupe, false, log, metrics, nil, store, cacheDriver)
return store, il, err
}
@ -194,8 +196,8 @@ func createObjectsStoreDynamo(rootDir string, cacheDir string, dedupe bool, tabl
panic(err)
}
il := s3.NewImageStore(rootDir, cacheDir, false, storageConstants.DefaultGCDelay,
dedupe, false, log, metrics, nil, store, cacheDriver)
il := s3.NewImageStore(rootDir, cacheDir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, dedupe, false, log, metrics, nil, store, cacheDriver)
return store, il, err
}
@ -893,7 +895,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
_, _, err = imgStore.CheckBlob(testImage, digest)
So(err, ShouldNotBeNil)
_, _, err = imgStore.StatBlob(testImage, digest)
_, _, _, err = imgStore.StatBlob(testImage, digest)
So(err, ShouldNotBeNil)
})
@ -1050,7 +1052,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
WriterFn: func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) {
return &FileWriterMock{WriteFn: func(b []byte) (int, error) {
return 0, errS3
}}, nil
}}, errS3
},
})
_, err := imgStore.PutBlobChunkStreamed(testImage, "uuid", io.NopCloser(strings.NewReader("")))
@ -1091,7 +1093,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
WriteFn: func(b []byte) (int, error) {
return 0, errS3
},
}, nil
}, errS3
},
})
_, err := imgStore.PutBlobChunk(testImage, "uuid", 12, 100, io.NopCloser(strings.NewReader("")))
@ -1280,7 +1282,7 @@ func TestS3Dedupe(t *testing.T) {
So(checkBlobSize1, ShouldBeGreaterThan, 0)
So(err, ShouldBeNil)
ok, checkBlobSize1, err = imgStore.StatBlob("dedupe1", digest)
ok, checkBlobSize1, _, err = imgStore.StatBlob("dedupe1", digest)
So(ok, ShouldBeTrue)
So(checkBlobSize1, ShouldBeGreaterThan, 0)
So(err, ShouldBeNil)
@ -1466,12 +1468,12 @@ func TestS3Dedupe(t *testing.T) {
Convey("Check backward compatibility - switch dedupe to false", func() {
/* copy cache to the new storage with dedupe false (doing this because we
already have a cache object holding the lock on cache db file) */
input, err := os.ReadFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName))
input, err := os.ReadFile(path.Join(tdir, storageConstants.BoltdbName+storageConstants.DBExtensionName))
So(err, ShouldBeNil)
tdir = t.TempDir()
err = os.WriteFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName), input, 0o600)
err = os.WriteFile(path.Join(tdir, storageConstants.BoltdbName+storageConstants.DBExtensionName), input, 0o600)
So(err, ShouldBeNil)
storeDriver, imgStore, _ := createObjectsStore(testDir, tdir, false)
@ -3306,6 +3308,7 @@ func TestS3ManifestImageIndex(t *testing.T) {
err = imgStore.DeleteImageManifest("index", "test:index1", false)
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("index", "test:index1")
So(err, ShouldNotBeNil)
@ -3599,7 +3602,7 @@ func TestS3DedupeErr(t *testing.T) {
imgStore = createMockStorage(testDir, tdir, true, &StorageDriverMock{})
err = os.Remove(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName))
err = os.Remove(path.Join(tdir, storageConstants.BoltdbName+storageConstants.DBExtensionName))
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
// trigger unable to insert blob record
@ -3640,8 +3643,9 @@ func TestS3DedupeErr(t *testing.T) {
err := imgStore.DedupeBlob("", digest, "dst")
So(err, ShouldBeNil)
// error will be triggered in driver.SameFile()
err = imgStore.DedupeBlob("", digest, "dst2")
So(err, ShouldNotBeNil)
So(err, ShouldBeNil)
})
Convey("Test DedupeBlob - error on store.PutContent()", t, func(c C) {
@ -3776,12 +3780,12 @@ func TestS3DedupeErr(t *testing.T) {
So(err, ShouldBeNil)
// copy cache db to the new imagestore
input, err := os.ReadFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName))
input, err := os.ReadFile(path.Join(tdir, storageConstants.BoltdbName+storageConstants.DBExtensionName))
So(err, ShouldBeNil)
tdir = t.TempDir()
err = os.WriteFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName), input, 0o600)
err = os.WriteFile(path.Join(tdir, storageConstants.BoltdbName+storageConstants.DBExtensionName), input, 0o600)
So(err, ShouldBeNil)
imgStore = createMockStorage(testDir, tdir, true, &StorageDriverMock{
@ -3797,12 +3801,14 @@ func TestS3DedupeErr(t *testing.T) {
_, _, err = imgStore.GetBlob("repo2", digest, "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldNotBeNil)
// now it should move content from /repo1/dst1 to /repo2/dst2
_, err = imgStore.GetBlobContent("repo2", digest)
So(err, ShouldNotBeNil)
So(err, ShouldBeNil)
_, _, err = imgStore.StatBlob("repo2", digest)
So(err, ShouldNotBeNil)
_, _, _, err = imgStore.StatBlob("repo2", digest)
So(err, ShouldBeNil)
// it errors out because of bad range, as mock store returns a driver.FileInfo with 0 size
_, _, _, err = imgStore.GetBlobPartial("repo2", digest, "application/vnd.oci.image.layer.v1.tar+gzip", 0, 1)
So(err, ShouldNotBeNil)
})
@ -3822,12 +3828,12 @@ func TestS3DedupeErr(t *testing.T) {
So(err, ShouldBeNil)
// copy cache db to the new imagestore
input, err := os.ReadFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName))
input, err := os.ReadFile(path.Join(tdir, storageConstants.BoltdbName+storageConstants.DBExtensionName))
So(err, ShouldBeNil)
tdir = t.TempDir()
err = os.WriteFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName), input, 0o600)
err = os.WriteFile(path.Join(tdir, storageConstants.BoltdbName+storageConstants.DBExtensionName), input, 0o600)
So(err, ShouldBeNil)
imgStore = createMockStorage(testDir, tdir, true, &StorageDriverMock{
@ -3887,7 +3893,7 @@ func TestS3DedupeErr(t *testing.T) {
_, err = imgStore.GetBlobContent("repo2", digest)
So(err, ShouldNotBeNil)
_, _, err = imgStore.StatBlob("repo2", digest)
_, _, _, err = imgStore.StatBlob("repo2", digest)
So(err, ShouldNotBeNil)
_, _, _, err = imgStore.GetBlobPartial("repo2", digest, "application/vnd.oci.image.layer.v1.tar+gzip", 0, 1)

View file

@ -39,8 +39,8 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storageConstants.DefaultGCDelay,
true, true, log, metrics, nil, cacheDriver)
imgStore := local.NewImageStore(dir, true, true, storageConstants.DefaultGCDelay,
storageConstants.DefaultUntaggedImgeRetentionDelay, true, true, log, metrics, nil, cacheDriver)
Convey("Scrub only one repo", t, func(c C) {
// initialize repo
@ -113,7 +113,7 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
// verify error message
So(actual, ShouldContainSubstring, "test 1.0 affected parse application/vnd.oci.image.manifest.v1+json")
index, err := common.GetIndex(imgStore, repoName, log.With().Caller().Logger())
index, err := common.GetIndex(imgStore, repoName, log)
So(err, ShouldBeNil)
So(len(index.Manifests), ShouldEqual, 1)
@ -193,7 +193,7 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
err = os.Chmod(layerFile, 0x0200)
So(err, ShouldBeNil)
index, err := common.GetIndex(imgStore, repoName, log.With().Caller().Logger())
index, err := common.GetIndex(imgStore, repoName, log)
So(err, ShouldBeNil)
So(len(index.Manifests), ShouldEqual, 1)
@ -327,7 +327,7 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
So(actual, ShouldContainSubstring, "test 1.0 affected")
So(actual, ShouldContainSubstring, "no such file or directory")
index, err := common.GetIndex(imgStore, repoName, log.With().Caller().Logger())
index, err := common.GetIndex(imgStore, repoName, log)
So(err, ShouldBeNil)
So(len(index.Manifests), ShouldEqual, 2)

View file

@ -51,8 +51,9 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer
if config.Storage.StorageDriver == nil {
// false positive lint - linter does not implement Lint method
//nolint:typecheck,contextcheck
defaultStore = local.NewImageStore(config.Storage.RootDirectory,
config.Storage.GC, config.Storage.GCDelay,
rootDir := config.Storage.RootDirectory
defaultStore = local.NewImageStore(rootDir,
config.Storage.GC, config.Storage.GCReferrers, config.Storage.GCDelay, config.Storage.UntaggedImageRetentionDelay,
config.Storage.Dedupe, config.Storage.Commit, log, metrics, linter,
CreateCacheDatabaseDriver(config.Storage.StorageConfig, log),
)
@ -80,7 +81,8 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer
// false positive lint - linter does not implement Lint method
//nolint: typecheck,contextcheck
defaultStore = s3.NewImageStore(rootDir, config.Storage.RootDirectory,
config.Storage.GC, config.Storage.GCDelay, config.Storage.Dedupe,
config.Storage.GC, config.Storage.GCReferrers, config.Storage.GCDelay,
config.Storage.UntaggedImageRetentionDelay, config.Storage.Dedupe,
config.Storage.Commit, log, metrics, linter, store,
CreateCacheDatabaseDriver(config.Storage.StorageConfig, log))
}
@ -152,9 +154,13 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig,
// add it to uniqueSubFiles
// Create a new image store and assign it to imgStoreMap
if isUnique {
imgStoreMap[storageConfig.RootDirectory] = local.NewImageStore(storageConfig.RootDirectory,
storageConfig.GC, storageConfig.GCDelay, storageConfig.Dedupe,
storageConfig.Commit, log, metrics, linter, CreateCacheDatabaseDriver(storageConfig, log))
rootDir := storageConfig.RootDirectory
imgStoreMap[storageConfig.RootDirectory] = local.NewImageStore(rootDir,
storageConfig.GC, storageConfig.GCReferrers, storageConfig.GCDelay,
storageConfig.UntaggedImageRetentionDelay, storageConfig.Dedupe,
storageConfig.Commit, log, metrics, linter,
CreateCacheDatabaseDriver(storageConfig, log),
)
subImageStore[route] = imgStoreMap[storageConfig.RootDirectory]
}
@ -183,8 +189,9 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig,
// false positive lint - linter does not implement Lint method
//nolint: typecheck
subImageStore[route] = s3.NewImageStore(rootDir, storageConfig.RootDirectory,
storageConfig.GC, storageConfig.GCDelay,
storageConfig.Dedupe, storageConfig.Commit, log, metrics, linter, store,
storageConfig.GC, storageConfig.GCReferrers, storageConfig.GCDelay,
storageConfig.UntaggedImageRetentionDelay, storageConfig.Dedupe,
storageConfig.Commit, log, metrics, linter, store,
CreateCacheDatabaseDriver(storageConfig, log),
)
}

File diff suppressed because it is too large Load diff

View file

@ -4,6 +4,7 @@ import (
"io"
"time"
storagedriver "github.com/docker/distribution/registry/storage/driver"
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
@ -38,7 +39,7 @@ type ImageStore interface { //nolint:interfacebloat
DeleteBlobUpload(repo, uuid string) error
BlobPath(repo string, digest godigest.Digest) string
CheckBlob(repo string, digest godigest.Digest) (bool, int64, error)
StatBlob(repo string, digest godigest.Digest) (bool, int64, error)
StatBlob(repo string, digest godigest.Digest) (bool, int64, time.Time, error)
GetBlob(repo string, digest godigest.Digest, mediaType string) (io.ReadCloser, int64, error)
GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,
) (io.ReadCloser, int64, int64, error)
@ -52,4 +53,22 @@ type ImageStore interface { //nolint:interfacebloat
RunDedupeBlobs(interval time.Duration, sch *scheduler.Scheduler)
RunDedupeForDigest(digest godigest.Digest, dedupe bool, duplicateBlobs []string) error
GetNextDigestWithBlobPaths(lastDigests []godigest.Digest) (godigest.Digest, []string, error)
GetAllBlobs(repo string) ([]string, error)
}
type Driver interface { //nolint:interfacebloat
Name() string
EnsureDir(path string) error
DirExists(path string) bool
Reader(path string, offset int64) (io.ReadCloser, error)
ReadFile(path string) ([]byte, error)
Delete(path string) error
Stat(path string) (storagedriver.FileInfo, error)
Writer(filepath string, append bool) (storagedriver.FileWriter, error) //nolint: predeclared
WriteFile(filepath string, content []byte) (int, error)
Walk(path string, f storagedriver.WalkFn) error
List(fullpath string) ([]string, error)
Move(sourcePath string, destPath string) error
SameFile(path1, path2 string) bool
Link(src, dest string) error
}

View file

@ -2068,7 +2068,7 @@ func GetDefaultLayersBlobs() [][]byte {
}
func GetDefaultImageStore(rootDir string, log zLog.Logger) stypes.ImageStore {
return local.NewImageStore(rootDir, false, time.Hour, false, false, log,
return local.NewImageStore(rootDir, false, false, time.Hour, time.Hour, false, false, log,
monitoring.NewMetricsServer(false, log),
mocks.MockedLint{
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore stypes.ImageStore) (bool, error) {

View file

@ -17,6 +17,16 @@ type CacheMock struct {
// Delete a blob from the cachedb.
DeleteBlobFn func(digest godigest.Digest, path string) error
UsesRelativePathsFn func() bool
}
func (cacheMock CacheMock) UsesRelativePaths() bool {
if cacheMock.UsesRelativePathsFn != nil {
return cacheMock.UsesRelativePaths()
}
return false
}
func (cacheMock CacheMock) Name() string {

View file

@ -35,7 +35,7 @@ type MockedImageStore struct {
DeleteBlobUploadFn func(repo string, uuid string) error
BlobPathFn func(repo string, digest godigest.Digest) string
CheckBlobFn func(repo string, digest godigest.Digest) (bool, int64, error)
StatBlobFn func(repo string, digest godigest.Digest) (bool, int64, error)
StatBlobFn func(repo string, digest godigest.Digest) (bool, int64, time.Time, error)
GetBlobPartialFn func(repo string, digest godigest.Digest, mediaType string, from, to int64,
) (io.ReadCloser, int64, int64, error)
GetBlobFn func(repo string, digest godigest.Digest, mediaType string) (io.ReadCloser, int64, error)
@ -51,6 +51,7 @@ type MockedImageStore struct {
RunDedupeBlobsFn func(interval time.Duration, sch *scheduler.Scheduler)
RunDedupeForDigestFn func(digest godigest.Digest, dedupe bool, duplicateBlobs []string) error
GetNextDigestWithBlobPathsFn func(lastDigests []godigest.Digest) (godigest.Digest, []string, error)
GetAllBlobsFn func(repo string) ([]string, error)
}
func (is MockedImageStore) Lock(t *time.Time) {
@ -142,6 +143,14 @@ func (is MockedImageStore) GetImageTags(name string) ([]string, error) {
return []string{}, nil
}
func (is MockedImageStore) GetAllBlobs(repo string) ([]string, error) {
if is.GetAllBlobsFn != nil {
return is.GetAllBlobsFn(repo)
}
return []string{}, nil
}
func (is MockedImageStore) DeleteImageManifest(name string, reference string, detectCollision bool) error {
if is.DeleteImageManifestFn != nil {
return is.DeleteImageManifestFn(name, reference, detectCollision)
@ -252,12 +261,12 @@ func (is MockedImageStore) CheckBlob(repo string, digest godigest.Digest) (bool,
return true, 0, nil
}
func (is MockedImageStore) StatBlob(repo string, digest godigest.Digest) (bool, int64, error) {
func (is MockedImageStore) StatBlob(repo string, digest godigest.Digest) (bool, int64, time.Time, error) {
if is.StatBlobFn != nil {
return is.StatBlobFn(repo, digest)
}
return true, 0, nil
return true, 0, time.Time{}, nil
}
func (is MockedImageStore) GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,

View file

@ -346,7 +346,7 @@ func TestExtractImageDetails(t *testing.T) {
Convey("extractImageDetails good workflow", t, func() {
dir := t.TempDir()
testLogger := log.NewLogger("debug", "")
imageStore := local.NewImageStore(dir, false, 0, false, false,
imageStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil)
storeController := storage.StoreController{
@ -382,7 +382,7 @@ func TestExtractImageDetails(t *testing.T) {
Convey("extractImageDetails bad ispec.ImageManifest", t, func() {
dir := t.TempDir()
testLogger := log.NewLogger("debug", "")
imageStore := local.NewImageStore(dir, false, 0, false, false,
imageStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil)
storeController := storage.StoreController{
@ -402,7 +402,7 @@ func TestExtractImageDetails(t *testing.T) {
Convey("extractImageDetails bad imageConfig", t, func() {
dir := t.TempDir()
testLogger := log.NewLogger("debug", "")
imageStore := local.NewImageStore(dir, false, 0, false, false,
imageStore := local.NewImageStore(dir, false, false, 0, 0, false, false,
testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil)
storeController := storage.StoreController{

View file

@ -0,0 +1,159 @@
load helpers_zot
function verify_prerequisites {
if [ ! $(command -v curl) ]; then
echo "you need to install curl as a prerequisite to running the tests" >&3
return 1
fi
if [ ! $(command -v jq) ]; then
echo "you need to install jq as a prerequisite to running the tests" >&3
return 1
fi
return 0
}
function setup_file() {
# Verify prerequisites are available
if ! $(verify_prerequisites); then
exit 1
fi
# Download test data to folder common for the entire suite, not just this file
skopeo --insecure-policy copy --format=oci docker://ghcr.io/project-zot/golang:1.20 oci:${TEST_DATA_DIR}/golang:1.20
# Setup zot server
local zot_root_dir=${BATS_FILE_TMPDIR}/zot
local zot_config_file=${BATS_FILE_TMPDIR}/zot_config.json
local oci_data_dir=${BATS_FILE_TMPDIR}/oci
mkdir -p ${zot_root_dir}
mkdir -p ${oci_data_dir}
cat > ${zot_config_file}<<EOF
{
"distSpecVersion": "1.1.0",
"storage": {
"rootDirectory": "${zot_root_dir}",
"gc": true,
"gcReferrers": true,
"gcDelay": "30s",
"untaggedImageRetentionDelay": "40s",
"gcInterval": "1s"
},
"http": {
"address": "0.0.0.0",
"port": "8080"
},
"log": {
"level": "debug",
"output": "/tmp/gc.log"
}
}
EOF
zot_serve ${ZOT_PATH} ${zot_config_file}
wait_zot_reachable 8080
}
function teardown_file() {
zot_stop_all
}
@test "push image" {
run skopeo --insecure-policy copy --dest-tls-verify=false \
oci:${TEST_DATA_DIR}/golang:1.20 \
docker://127.0.0.1:8080/golang:1.20
[ "$status" -eq 0 ]
run curl http://127.0.0.1:8080/v2/_catalog
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.repositories[]') = '"golang"' ]
run curl http://127.0.0.1:8080/v2/golang/tags/list
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.tags[]') = '"1.20"' ]
}
@test "push image index" {
# --multi-arch below pushes an image index (containing many images) instead
# of an image manifest (single image)
run skopeo --insecure-policy copy --format=oci --dest-tls-verify=false --multi-arch=all \
docker://public.ecr.aws/docker/library/busybox:latest \
docker://127.0.0.1:8080/busybox:latest
[ "$status" -eq 0 ]
run curl http://127.0.0.1:8080/v2/_catalog
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.repositories[0]') = '"busybox"' ]
run curl http://127.0.0.1:8080/v2/busybox/tags/list
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.tags[]') = '"latest"' ]
}
@test "attach oras artifacts" {
# attach signature to image
echo "{\"artifact\": \"\", \"signature\": \"pat hancock\"}" > signature.json
run oras attach --plain-http 127.0.0.1:8080/golang:1.20 --image-spec v1.1-image --artifact-type 'signature/example' ./signature.json:application/json
[ "$status" -eq 0 ]
# attach sbom to image
echo "{\"version\": \"0.0.0.0\", \"artifact\": \"'127.0.0.1:8080/golang:1.20'\", \"contents\": \"good\"}" > sbom.json
run oras attach --plain-http 127.0.0.1:8080/golang:1.20 --image-spec v1.1-image --artifact-type 'sbom/example' ./sbom.json:application/json
[ "$status" -eq 0 ]
# attach signature to index image
run oras attach --plain-http 127.0.0.1:8080/busybox:latest --image-spec v1.1-image --artifact-type 'signature/example' ./signature.json:application/json
[ "$status" -eq 0 ]
# attach sbom to index image
echo "{\"version\": \"0.0.0.0\", \"artifact\": \"'127.0.0.1:8080/golang:1.20'\", \"contents\": \"good\"}" > sbom.json
run oras attach --plain-http 127.0.0.1:8080/busybox:latest --image-spec v1.1-image --artifact-type 'sbom/example' ./sbom.json:application/json
[ "$status" -eq 0 ]
}
@test "push OCI artifact with regclient" {
run regctl registry set 127.0.0.1:8080 --tls disabled
[ "$status" -eq 0 ]
run regctl artifact put --artifact-type application/vnd.example.artifact --subject 127.0.0.1:8080/golang:1.20 <<EOF
this is an artifact
EOF
[ "$status" -eq 0 ]
run regctl artifact get --subject 127.0.0.1:8080/golang:1.20
[ "$status" -eq 0 ]
run regctl artifact put --artifact-type application/vnd.example.artifact --subject 127.0.0.1:8080/busybox:latest <<EOF
this is an artifact
EOF
[ "$status" -eq 0 ]
run regctl artifact get --subject 127.0.0.1:8080/busybox:latest
[ "$status" -eq 0 ]
}
@test "garbage collect all artifacts after image delete" {
run skopeo --insecure-policy delete --tls-verify=false \
docker://127.0.0.1:8080/golang:1.20
[ "$status" -eq 0 ]
run skopeo --insecure-policy delete --tls-verify=false \
docker://127.0.0.1:8080/busybox:latest
[ "$status" -eq 0 ]
# sleep past gc delay
sleep 100
# gc should have removed artifacts
run regctl artifact get --subject 127.0.0.1:8080/golang:1.20
[ "$status" -eq 1 ]
run regctl artifact get --subject 127.0.0.1:8080/busybox:latest
[ "$status" -eq 1 ]
run oras discover --plain-http -o json 127.0.0.1:8080/golang:1.20
[ "$status" -eq 1 ]
run oras discover --plain-http -o json 127.0.0.1:8080/busybox:latest
[ "$status" -eq 1 ]
# repos should also be gc'ed
run curl http://127.0.0.1:8080/v2/_catalog
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq -r '.repositories | length') -eq 0 ]
}

View file

@ -65,7 +65,7 @@ function teardown() {
wait_zot_reachable 8080
# wait for scrub to be done and logs to get populated
run sleep 15s
run sleep 20s
run not_affected
[ "$status" -eq 0 ]
[ $(echo "${lines[0]}" ) = 'true' ]
@ -79,7 +79,7 @@ function teardown() {
wait_zot_reachable 8080
# wait for scrub to be done and logs to get populated
run sleep 15s
run sleep 20s
run affected
[ "$status" -eq 0 ]
[ $(echo "${lines[0]}" ) = 'true' ]

View file

@ -0,0 +1,19 @@
{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot/local",
"gc": true,
"gcReferrers": false,
"gcDelay": "20s",
"untaggedImageRetentionDelay": "20s",
"gcInterval": "1s"
},
"http": {
"address": "127.0.0.1",
"port": "8080"
},
"log": {
"level": "debug",
"output": "/dev/null"
}
}

View file

@ -0,0 +1,34 @@
{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot/s3",
"gc": true,
"gcReferrers": false,
"gcDelay": "40m",
"untaggedImageRetentionDelay": "40m",
"gcInterval": "2m",
"storageDriver": {
"name": "s3",
"rootdirectory": "/zot",
"region": "us-east-2",
"bucket": "zot-storage",
"regionendpoint": "http://localhost:4566",
"secure": false,
"skipverify": false
},
"cacheDriver": {
"name": "dynamodb",
"endpoint": "http://localhost:4566",
"region": "us-east-2",
"cacheTablename": "BlobTable"
}
},
"http": {
"address": "127.0.0.1",
"port": "8080"
},
"log": {
"level": "debug",
"output": "/dev/null"
}
}

View file

@ -1,9 +1,11 @@
{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot",
"rootDirectory": "/tmp/zot/local",
"gc": true,
"gcDelay": "10s",
"gcReferrers": true,
"gcDelay": "20s",
"untaggedImageRetentionDelay": "20s",
"gcInterval": "1s"
},
"http": {

View file

@ -0,0 +1,34 @@
{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot/s3",
"gc": true,
"gcReferrers": true,
"gcDelay": "40m",
"untaggedImageRetentionDelay": "40m",
"gcInterval": "2m",
"storageDriver": {
"name": "s3",
"rootdirectory": "/zot",
"region": "us-east-2",
"bucket": "zot-storage",
"regionendpoint": "http://localhost:4566",
"secure": false,
"skipverify": false
},
"cacheDriver": {
"name": "dynamodb",
"endpoint": "http://localhost:4566",
"region": "us-east-2",
"cacheTablename": "BlobTable"
}
},
"http": {
"address": "127.0.0.1",
"port": "8080"
},
"log": {
"level": "debug",
"output": "/dev/null"
}
}