0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-16 21:56:37 -05:00

feat: add support for docker images (#2714)

* feat: add support for docker images

Issue #724

A new config section under "HTTP" called "Compat" is added which
currently takes a list of possible compatible legacy media-types.

https://github.com/opencontainers/image-spec/blob/main/media-types.md#compatibility-matrix

Only "docker2s2" (Docker Manifest V2 Schema V2) is currently supported.

Garbage collection also needs to be made aware of non-OCI compatible
layer types.
feat: add cve support for non-OCI compatible layer types

Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com>

* 

Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com>

* test: add more docker compat tests

Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com>

* feat: add additional validation checks for non-OCI images

Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com>

* ci: make "full" images docker-compatible

Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com>

---------

Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com>
This commit is contained in:
Ramkumar Chinchani 2024-10-31 00:44:04 -07:00 committed by GitHub
parent 403fd4eb61
commit cb2af94b0b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
44 changed files with 436 additions and 191 deletions

View file

@ -17,7 +17,8 @@ RUN echo '{\n\
},\n\ },\n\
"http": {\n\ "http": {\n\
"address": "0.0.0.0",\n\ "address": "0.0.0.0",\n\
"port": "5000"\n\ "port": "5000",\n\
"compat": ["docker2s2"]\n\
},\n\ },\n\
"log": {\n\ "log": {\n\
"level": "debug"\n\ "level": "debug"\n\

View file

@ -21,7 +21,8 @@ build:
}, },
"http":{ "http":{
"address":"0.0.0.0", "address":"0.0.0.0",
"port":"5000" "port":"5000",
"compat": ["docker2s2"]
}, },
"log":{ "log":{
"level":"debug" "level":"debug"

View file

@ -0,0 +1,14 @@
{
"distSpecVersion": "1.1.0",
"storage": {
"rootDirectory": "/tmp/zot"
},
"http": {
"address": "127.0.0.1",
"port": "8080",
"compat": ["docker2s2"]
},
"log": {
"level": "debug"
}
}

2
go.mod
View file

@ -22,7 +22,7 @@ require (
github.com/containers/image/v5 v5.32.2 github.com/containers/image/v5 v5.32.2
github.com/dchest/siphash v1.2.3 github.com/dchest/siphash v1.2.3
github.com/didip/tollbooth/v7 v7.0.2 github.com/didip/tollbooth/v7 v7.0.2
github.com/distribution/distribution/v3 v3.0.0-beta.1.0.20240729175644-f0bd0f689923 github.com/distribution/distribution/v3 v3.0.0-beta.1.0.20241007110747-0ab7f326e651
github.com/dustin/go-humanize v1.0.1 github.com/dustin/go-humanize v1.0.1
github.com/fsnotify/fsnotify v1.7.0 github.com/fsnotify/fsnotify v1.7.0
github.com/go-ldap/ldap/v3 v3.4.8 github.com/go-ldap/ldap/v3 v3.4.8

4
go.sum
View file

@ -605,8 +605,8 @@ github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1G
github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/distribution/distribution/v3 v3.0.0-beta.1.0.20240729175644-f0bd0f689923 h1:j3L5ly19rMyoy+okWyeMdf9+X8NyU3ZuIje8Hc6B+IA= github.com/distribution/distribution/v3 v3.0.0-beta.1.0.20241007110747-0ab7f326e651 h1:IznKfIVhvCjhopIrrvaXrTMloRCkfJGmYiDANR6dBwo=
github.com/distribution/distribution/v3 v3.0.0-beta.1.0.20240729175644-f0bd0f689923/go.mod h1:3PKHxunRSv4DqZ1Q9HgsocS2/PBIHFfm0ICBKkJQE1g= github.com/distribution/distribution/v3 v3.0.0-beta.1.0.20241007110747-0ab7f326e651/go.mod h1:Unn8+BXBntRw4BZHI7UVY9wJ7yGW/xtOspXPxo6hFIs=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=

View file

@ -954,7 +954,7 @@ func TestCookiestoreCleanup(t *testing.T) {
err = os.Chtimes(sessionPath, changeTime, changeTime) err = os.Chtimes(sessionPath, changeTime, changeTime)
So(err, ShouldBeNil) So(err, ShouldBeNil)
imgStore := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil) imgStore := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imgStore, DefaultStore: imgStore,
@ -989,7 +989,7 @@ func TestCookiestoreCleanup(t *testing.T) {
err = os.WriteFile(sessionPath, []byte("session"), storageConstants.DefaultFilePerms) err = os.WriteFile(sessionPath, []byte("session"), storageConstants.DefaultFilePerms)
So(err, ShouldBeNil) So(err, ShouldBeNil)
imgStore := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil) imgStore := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imgStore, DefaultStore: imgStore,

View file

@ -7,6 +7,7 @@ import (
distspec "github.com/opencontainers/distribution-spec/specs-go" distspec "github.com/opencontainers/distribution-spec/specs-go"
"zotregistry.dev/zot/pkg/compat"
extconf "zotregistry.dev/zot/pkg/extensions/config" extconf "zotregistry.dev/zot/pkg/extensions/config"
storageConstants "zotregistry.dev/zot/pkg/storage/constants" storageConstants "zotregistry.dev/zot/pkg/storage/constants"
) )
@ -123,6 +124,7 @@ type HTTPConfig struct {
AccessControl *AccessControlConfig `mapstructure:"accessControl,omitempty"` AccessControl *AccessControlConfig `mapstructure:"accessControl,omitempty"`
Realm string Realm string
Ratelimit *RatelimitConfig `mapstructure:",omitempty"` Ratelimit *RatelimitConfig `mapstructure:",omitempty"`
Compat []compat.MediaCompatibility `mapstructure:",omitempty"`
} }
type SchedulerConfig struct { type SchedulerConfig struct {

View file

@ -688,7 +688,7 @@ func (rh *RouteHandler) UpdateManifest(response http.ResponseWriter, request *ht
} }
mediaType := request.Header.Get("Content-Type") mediaType := request.Header.Get("Content-Type")
if !storageCommon.IsSupportedMediaType(mediaType) { if !storageCommon.IsSupportedMediaType(rh.c.Config.HTTP.Compat, mediaType) {
err := apiErr.NewError(apiErr.MANIFEST_INVALID).AddDetail(map[string]string{"mediaType": mediaType}) err := apiErr.NewError(apiErr.MANIFEST_INVALID).AddDetail(map[string]string{"mediaType": mediaType})
zcommon.WriteJSON(response, http.StatusUnsupportedMediaType, apiErr.NewErrorList(err)) zcommon.WriteJSON(response, http.StatusUnsupportedMediaType, apiErr.NewErrorList(err))

View file

@ -119,7 +119,7 @@ func TestNegativeServerResponse(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
imageStore := local.NewImageStore(dir, false, false, imageStore := local.NewImageStore(dir, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil) log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,

69
pkg/compat/compat.go Normal file
View file

@ -0,0 +1,69 @@
package compat
import (
dockerList "github.com/distribution/distribution/v3/manifest/manifestlist"
docker "github.com/distribution/distribution/v3/manifest/schema2"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"zotregistry.dev/zot/errors"
)
// MediaCompatibility determines non-OCI media-compatilibility.
type MediaCompatibility string
const (
DockerManifestV2SchemaV2 = "docker2s2"
)
// docker
func CompatibleManifestMediaTypes() []string {
return []string{docker.MediaTypeManifest}
}
func IsCompatibleManifestMediaType(mediatype string) bool {
for _, mt := range CompatibleManifestMediaTypes() {
if mt == mediatype {
return true
}
}
return false
}
func CompatibleManifestListMediaTypes() []string {
return []string{dockerList.MediaTypeManifestList}
}
func IsCompatibleManifestListMediaType(mediatype string) bool {
for _, mt := range CompatibleManifestListMediaTypes() {
if mt == mediatype {
return true
}
}
return false
}
func Validate(body []byte, mediaType string) ([]v1.Descriptor, error) {
switch mediaType {
case docker.MediaTypeManifest:
var desm docker.DeserializedManifest
if err := desm.UnmarshalJSON(body); err != nil {
return nil, err
}
return desm.References(), nil
case dockerList.MediaTypeManifestList:
var desm dockerList.DeserializedManifestList
if err := desm.UnmarshalJSON(body); err != nil {
return nil, err
}
return desm.References(), nil
}
return nil, errors.ErrMediaTypeNotSupported
}

View file

@ -200,7 +200,7 @@ func RunSignatureUploadAndVerificationTests(t *testing.T, cacheDriverParams map[
logger.Logger = logger.Output(writers) logger.Logger = logger.Output(writers)
imageStore := local.NewImageStore(globalDir, false, false, imageStore := local.NewImageStore(globalDir, false, false,
logger, monitoring.NewMetricsServer(false, logger), nil, nil) logger, monitoring.NewMetricsServer(false, logger), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,
@ -321,7 +321,7 @@ func RunSignatureUploadAndVerificationTests(t *testing.T, cacheDriverParams map[
logger.Logger = logger.Output(writers) logger.Logger = logger.Output(writers)
imageStore := local.NewImageStore(globalDir, false, false, imageStore := local.NewImageStore(globalDir, false, false,
logger, monitoring.NewMetricsServer(false, logger), nil, nil) logger, monitoring.NewMetricsServer(false, logger), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,
@ -429,7 +429,7 @@ func RunSignatureUploadAndVerificationTests(t *testing.T, cacheDriverParams map[
logger.Logger = logger.Output(writers) logger.Logger = logger.Output(writers)
imageStore := local.NewImageStore(globalDir, false, false, imageStore := local.NewImageStore(globalDir, false, false,
logger, monitoring.NewMetricsServer(false, logger), nil, nil) logger, monitoring.NewMetricsServer(false, logger), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,
@ -592,7 +592,7 @@ func RunSignatureUploadAndVerificationTests(t *testing.T, cacheDriverParams map[
logger.Logger = logger.Output(writers) logger.Logger = logger.Output(writers)
imageStore := local.NewImageStore(globalDir, false, false, imageStore := local.NewImageStore(globalDir, false, false,
logger, monitoring.NewMetricsServer(false, logger), nil, nil) logger, monitoring.NewMetricsServer(false, logger), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,
@ -856,7 +856,7 @@ func RunSignatureUploadAndVerificationTests(t *testing.T, cacheDriverParams map[
logger.Logger = logger.Output(writers) logger.Logger = logger.Output(writers)
imageStore := local.NewImageStore(globalDir, false, false, imageStore := local.NewImageStore(globalDir, false, false,
logger, monitoring.NewMetricsServer(false, logger), nil, nil) logger, monitoring.NewMetricsServer(false, logger), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,

View file

@ -489,7 +489,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", "")) linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, false, imgStore := local.NewImageStore(dir, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil) log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil, nil)
indexContent, err := imgStore.GetIndexContent("zot-test") indexContent, err := imgStore.GetIndexContent("zot-test")
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -521,7 +521,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", "")) linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, false, imgStore := local.NewImageStore(dir, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil) log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil, nil)
indexContent, err := imgStore.GetIndexContent("zot-test") indexContent, err := imgStore.GetIndexContent("zot-test")
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -591,7 +591,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", "")) linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, false, imgStore := local.NewImageStore(dir, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil) log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil, nil)
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore) pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -653,7 +653,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", "")) linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, false, imgStore := local.NewImageStore(dir, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil) log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil, nil)
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore) pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)
@ -717,7 +717,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", "")) linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, false, imgStore := local.NewImageStore(dir, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil) log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil, nil)
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore) pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -780,7 +780,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", "")) linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, false, imgStore := local.NewImageStore(dir, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil) log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil, nil)
err = os.Chmod(path.Join(dir, "zot-test", "blobs"), 0o000) err = os.Chmod(path.Join(dir, "zot-test", "blobs"), 0o000)
if err != nil { if err != nil {
@ -878,7 +878,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", "")) linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, false, imgStore := local.NewImageStore(dir, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil) log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil, nil)
err = os.Chmod(path.Join(dir, "zot-test", "blobs", "sha256", manifest.Config.Digest.Encoded()), 0o000) err = os.Chmod(path.Join(dir, "zot-test", "blobs", "sha256", manifest.Config.Digest.Encoded()), 0o000)
if err != nil { if err != nil {

View file

@ -195,7 +195,7 @@ func TestRunScrubRepo(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, imgStore := local.NewImageStore(dir, true,
true, log, metrics, nil, cacheDriver) true, log, metrics, nil, cacheDriver, nil)
srcStorageCtlr := ociutils.GetDefaultStoreController(dir, log) srcStorageCtlr := ociutils.GetDefaultStoreController(dir, log)
image := CreateDefaultVulnerableImage() image := CreateDefaultVulnerableImage()
@ -231,7 +231,7 @@ func TestRunScrubRepo(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, imgStore := local.NewImageStore(dir, true,
true, log, metrics, nil, cacheDriver) true, log, metrics, nil, cacheDriver, nil)
srcStorageCtlr := ociutils.GetDefaultStoreController(dir, log) srcStorageCtlr := ociutils.GetDefaultStoreController(dir, log)
image := CreateDefaultVulnerableImage() image := CreateDefaultVulnerableImage()
@ -272,7 +272,7 @@ func TestRunScrubRepo(t *testing.T) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
srcStorageCtlr := ociutils.GetDefaultStoreController(dir, log) srcStorageCtlr := ociutils.GetDefaultStoreController(dir, log)
image := CreateDefaultVulnerableImage() image := CreateDefaultVulnerableImage()

View file

@ -11,6 +11,7 @@ import (
zerr "zotregistry.dev/zot/errors" zerr "zotregistry.dev/zot/errors"
zcommon "zotregistry.dev/zot/pkg/common" zcommon "zotregistry.dev/zot/pkg/common"
"zotregistry.dev/zot/pkg/compat"
cvemodel "zotregistry.dev/zot/pkg/extensions/search/cve/model" cvemodel "zotregistry.dev/zot/pkg/extensions/search/cve/model"
"zotregistry.dev/zot/pkg/extensions/search/cve/trivy" "zotregistry.dev/zot/pkg/extensions/search/cve/trivy"
"zotregistry.dev/zot/pkg/log" "zotregistry.dev/zot/pkg/log"
@ -69,8 +70,10 @@ func (cveinfo BaseCveInfo) GetImageListForCVE(ctx context.Context, repo, cveID s
} }
for tag, descriptor := range repoMeta.Tags { for tag, descriptor := range repoMeta.Tags {
switch descriptor.MediaType { if descriptor.MediaType == ispec.MediaTypeImageManifest ||
case ispec.MediaTypeImageManifest, ispec.MediaTypeImageIndex: descriptor.MediaType == ispec.MediaTypeImageIndex ||
compat.IsCompatibleManifestMediaType(descriptor.MediaType) ||
compat.IsCompatibleManifestListMediaType(descriptor.MediaType) {
manifestDigestStr := descriptor.Digest manifestDigestStr := descriptor.Digest
manifestDigest := godigest.Digest(manifestDigestStr) manifestDigest := godigest.Digest(manifestDigestStr)
@ -102,7 +105,7 @@ func (cveinfo BaseCveInfo) GetImageListForCVE(ctx context.Context, repo, cveID s
}, },
}) })
} }
default: } else {
cveinfo.Log.Debug().Str("image", repo+":"+tag).Str("mediaType", descriptor.MediaType). cveinfo.Log.Debug().Str("image", repo+":"+tag).Str("mediaType", descriptor.MediaType).
Msg("image media type not supported for scanning") Msg("image media type not supported for scanning")
} }
@ -129,8 +132,9 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(ctx context.Context, repo, c
return []cvemodel.TagInfo{}, ctx.Err() return []cvemodel.TagInfo{}, ctx.Err()
} }
switch descriptor.MediaType { //nolint:gocritic // cannot convert to switch-case
case ispec.MediaTypeImageManifest: if descriptor.MediaType == ispec.MediaTypeImageManifest ||
compat.IsCompatibleManifestMediaType(descriptor.MediaType) {
manifestDigestStr := descriptor.Digest manifestDigestStr := descriptor.Digest
tagInfo, err := getTagInfoForManifest(tag, manifestDigestStr, cveinfo.MetaDB) tagInfo, err := getTagInfoForManifest(tag, manifestDigestStr, cveinfo.MetaDB)
@ -146,7 +150,8 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(ctx context.Context, repo, c
if cveinfo.isManifestVulnerable(ctx, repo, tag, manifestDigestStr, cveID) { if cveinfo.isManifestVulnerable(ctx, repo, tag, manifestDigestStr, cveID) {
vulnerableTags = append(vulnerableTags, tagInfo) vulnerableTags = append(vulnerableTags, tagInfo)
} }
case ispec.MediaTypeImageIndex: } else if descriptor.MediaType == ispec.MediaTypeImageIndex ||
compat.IsCompatibleManifestListMediaType(descriptor.MediaType) {
indexDigestStr := descriptor.Digest indexDigestStr := descriptor.Digest
indexContent, err := getIndexContent(cveinfo.MetaDB, indexDigestStr) indexContent, err := getIndexContent(cveinfo.MetaDB, indexDigestStr)
@ -201,7 +206,7 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(ctx context.Context, repo, c
Timestamp: mostRecentUpdate(vulnerableManifests), Timestamp: mostRecentUpdate(vulnerableManifests),
}) })
} }
default: } else {
cveinfo.Log.Debug().Str("mediaType", descriptor.MediaType). cveinfo.Log.Debug().Str("mediaType", descriptor.MediaType).
Msg("image media type not supported for scanning") Msg("image media type not supported for scanning")
} }
@ -533,18 +538,20 @@ func GetFixedTags(allTags, vulnerableTags []cvemodel.TagInfo) []cvemodel.TagInfo
for _, tag := range vulnerableTags { for _, tag := range vulnerableTags {
vulnerableTagMap[tag.Tag] = tag vulnerableTagMap[tag.Tag] = tag
switch tag.Descriptor.MediaType { //nolint:gocritic // cannot convert to switch-case
case ispec.MediaTypeImageManifest: if tag.Descriptor.MediaType == ispec.MediaTypeImageManifest ||
compat.IsCompatibleManifestMediaType(tag.Descriptor.MediaType) {
if tag.Timestamp.Before(earliestVulnerable.Timestamp) { if tag.Timestamp.Before(earliestVulnerable.Timestamp) {
earliestVulnerable = tag earliestVulnerable = tag
} }
case ispec.MediaTypeImageIndex: } else if tag.Descriptor.MediaType == ispec.MediaTypeImageIndex ||
compat.IsCompatibleManifestListMediaType(tag.Descriptor.MediaType) {
for _, manifestDesc := range tag.Manifests { for _, manifestDesc := range tag.Manifests {
if manifestDesc.Timestamp.Before(earliestVulnerable.Timestamp) { if manifestDesc.Timestamp.Before(earliestVulnerable.Timestamp) {
earliestVulnerable = tag earliestVulnerable = tag
} }
} }
default: } else {
continue continue
} }
} }
@ -557,8 +564,9 @@ func GetFixedTags(allTags, vulnerableTags []cvemodel.TagInfo) []cvemodel.TagInfo
// There may be older images which have a fix or // There may be older images which have a fix or
// newer images which don't // newer images which don't
for _, tag := range allTags { for _, tag := range allTags {
switch tag.Descriptor.MediaType { //nolint:gocritic // cannot convert to switch-case
case ispec.MediaTypeImageManifest: if tag.Descriptor.MediaType == ispec.MediaTypeImageManifest ||
compat.IsCompatibleManifestMediaType(tag.Descriptor.MediaType) {
if tag.Timestamp.Before(earliestVulnerable.Timestamp) { if tag.Timestamp.Before(earliestVulnerable.Timestamp) {
// The vulnerability did not exist at the time this // The vulnerability did not exist at the time this
// image was built // image was built
@ -570,7 +578,8 @@ func GetFixedTags(allTags, vulnerableTags []cvemodel.TagInfo) []cvemodel.TagInfo
if _, ok := vulnerableTagMap[tag.Tag]; !ok { if _, ok := vulnerableTagMap[tag.Tag]; !ok {
fixedTags = append(fixedTags, tag) fixedTags = append(fixedTags, tag)
} }
case ispec.MediaTypeImageIndex: } else if tag.Descriptor.MediaType == ispec.MediaTypeImageIndex ||
compat.IsCompatibleManifestListMediaType(tag.Descriptor.MediaType) {
fixedManifests := []cvemodel.DescriptorInfo{} fixedManifests := []cvemodel.DescriptorInfo{}
// If the latest update inside the index is before the earliest vulnerability found then // If the latest update inside the index is before the earliest vulnerability found then
@ -599,7 +608,7 @@ func GetFixedTags(allTags, vulnerableTags []cvemodel.TagInfo) []cvemodel.TagInfo
fixedTags = append(fixedTags, fixedTag) fixedTags = append(fixedTags, fixedTag)
} }
default: } else {
continue continue
} }
} }

View file

@ -319,7 +319,7 @@ func TestImageFormat(t *testing.T) {
dbDir := t.TempDir() dbDir := t.TempDir()
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
defaultStore := local.NewImageStore(imgDir, false, false, log, metrics, nil, nil) defaultStore := local.NewImageStore(imgDir, false, false, log, metrics, nil, nil, nil)
storeController := storage.StoreController{DefaultStore: defaultStore} storeController := storage.StoreController{DefaultStore: defaultStore}
params := boltdb.DBParameters{ params := boltdb.DBParameters{

View file

@ -505,7 +505,7 @@ func TestScanGeneratorWithRealData(t *testing.T) {
metrics := monitoring.NewMetricsServer(true, logger) metrics := monitoring.NewMetricsServer(true, logger)
imageStore := local.NewImageStore(rootDir, false, false, imageStore := local.NewImageStore(rootDir, false, false,
logger, metrics, nil, nil) logger, metrics, nil, nil, nil)
storeController := storage.StoreController{DefaultStore: imageStore} storeController := storage.StoreController{DefaultStore: imageStore}
image := CreateRandomVulnerableImage() image := CreateRandomVulnerableImage()

View file

@ -26,6 +26,7 @@ import (
zerr "zotregistry.dev/zot/errors" zerr "zotregistry.dev/zot/errors"
zcommon "zotregistry.dev/zot/pkg/common" zcommon "zotregistry.dev/zot/pkg/common"
"zotregistry.dev/zot/pkg/compat"
cvecache "zotregistry.dev/zot/pkg/extensions/search/cve/cache" cvecache "zotregistry.dev/zot/pkg/extensions/search/cve/cache"
cvemodel "zotregistry.dev/zot/pkg/extensions/search/cve/model" cvemodel "zotregistry.dev/zot/pkg/extensions/search/cve/model"
"zotregistry.dev/zot/pkg/log" "zotregistry.dev/zot/pkg/log"
@ -243,22 +244,23 @@ func (scanner Scanner) IsImageFormatScannable(repo, ref string) (bool, error) {
func (scanner Scanner) IsImageMediaScannable(repo, digestStr, mediaType string) (bool, error) { func (scanner Scanner) IsImageMediaScannable(repo, digestStr, mediaType string) (bool, error) {
image := repo + "@" + digestStr image := repo + "@" + digestStr
switch mediaType { if mediaType == ispec.MediaTypeImageManifest || //nolint:gocritic // not converting to switch-case
case ispec.MediaTypeImageManifest: compat.IsCompatibleManifestMediaType(mediaType) {
ok, err := scanner.isManifestScanable(digestStr) ok, err := scanner.isManifestScanable(digestStr)
if err != nil { if err != nil {
return ok, fmt.Errorf("image '%s' %w", image, err) return ok, fmt.Errorf("image '%s' %w", image, err)
} }
return ok, nil return ok, nil
case ispec.MediaTypeImageIndex: } else if mediaType == ispec.MediaTypeImageIndex ||
compat.IsCompatibleManifestListMediaType(mediaType) {
ok, err := scanner.isIndexScannable(digestStr) ok, err := scanner.isIndexScannable(digestStr)
if err != nil { if err != nil {
return ok, fmt.Errorf("image '%s' %w", image, err) return ok, fmt.Errorf("image '%s' %w", image, err)
} }
return ok, nil return ok, nil
default: } else {
return false, nil return false, nil
} }
} }
@ -379,10 +381,11 @@ func (scanner Scanner) ScanImage(ctx context.Context, image string) (map[string]
err error err error
) )
switch mediaType { if mediaType == ispec.MediaTypeImageIndex ||
case ispec.MediaTypeImageIndex: compat.IsCompatibleManifestListMediaType(mediaType) {
cveIDMap, err = scanner.scanIndex(ctx, repo, digest) cveIDMap, err = scanner.scanIndex(ctx, repo, digest)
default: } else if mediaType == ispec.MediaTypeImageManifest ||
compat.IsCompatibleManifestMediaType(mediaType) {
cveIDMap, err = scanner.scanManifest(ctx, repo, digest) cveIDMap, err = scanner.scanManifest(ctx, repo, digest)
} }

View file

@ -54,11 +54,11 @@ func TestMultipleStoragePath(t *testing.T) {
// Create ImageStore // Create ImageStore
firstStore := local.NewImageStore(firstRootDir, false, false, log, metrics, nil, nil) firstStore := local.NewImageStore(firstRootDir, false, false, log, metrics, nil, nil, nil)
secondStore := local.NewImageStore(secondRootDir, false, false, log, metrics, nil, nil) secondStore := local.NewImageStore(secondRootDir, false, false, log, metrics, nil, nil, nil)
thirdStore := local.NewImageStore(thirdRootDir, false, false, log, metrics, nil, nil) thirdStore := local.NewImageStore(thirdRootDir, false, false, log, metrics, nil, nil, nil)
storeController := storage.StoreController{} storeController := storage.StoreController{}
@ -172,7 +172,7 @@ func TestTrivyLibraryErrors(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
// Create ImageStore // Create ImageStore
store := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil) store := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil, nil)
storeController := storage.StoreController{} storeController := storage.StoreController{}
storeController.DefaultStore = store storeController.DefaultStore = store
@ -313,7 +313,7 @@ func TestImageScannable(t *testing.T) {
// Continue with initializing the objects the scanner depends on // Continue with initializing the objects the scanner depends on
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
store := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil) store := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil, nil)
storeController := storage.StoreController{} storeController := storage.StoreController{}
storeController.DefaultStore = store storeController.DefaultStore = store
@ -367,7 +367,7 @@ func TestDefaultTrivyDBUrl(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
// Create ImageStore // Create ImageStore
store := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil) store := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil, nil)
storeController := storage.StoreController{} storeController := storage.StoreController{}
storeController.DefaultStore = store storeController.DefaultStore = store

View file

@ -168,7 +168,7 @@ func TestVulnerableLayer(t *testing.T) {
log := log.NewLogger("debug", "") log := log.NewLogger("debug", "")
imageStore := local.NewImageStore(tempDir, false, false, imageStore := local.NewImageStore(tempDir, false, false,
log, monitoring.NewMetricsServer(false, log), nil, nil) log, monitoring.NewMetricsServer(false, log), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,
@ -239,7 +239,7 @@ func TestVulnerableLayer(t *testing.T) {
log := log.NewLogger("debug", "") log := log.NewLogger("debug", "")
imageStore := local.NewImageStore(tempDir, false, false, imageStore := local.NewImageStore(tempDir, false, false,
log, monitoring.NewMetricsServer(false, log), nil, nil) log, monitoring.NewMetricsServer(false, log), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,

View file

@ -1158,7 +1158,7 @@ func TestExpandedRepoInfo(t *testing.T) {
ctlr := api.NewController(conf) ctlr := api.NewController(conf)
imageStore := local.NewImageStore(tempDir, false, false, imageStore := local.NewImageStore(tempDir, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil) log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,
@ -1280,7 +1280,7 @@ func TestExpandedRepoInfo(t *testing.T) {
log := log.NewLogger("debug", "") log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
testStorage := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil) testStorage := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil, nil)
resp, err := resty.R().Get(baseURL + "/v2/") resp, err := resty.R().Get(baseURL + "/v2/")
So(resp, ShouldNotBeNil) So(resp, ShouldNotBeNil)
@ -1636,7 +1636,7 @@ func TestExpandedRepoInfo(t *testing.T) {
ctlr := api.NewController(conf) ctlr := api.NewController(conf)
imageStore := local.NewImageStore(conf.Storage.RootDirectory, false, false, imageStore := local.NewImageStore(conf.Storage.RootDirectory, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil) log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,
@ -5774,7 +5774,7 @@ func TestMetaDBWhenDeletingImages(t *testing.T) {
// get signatur digest // get signatur digest
log := log.NewLogger("debug", "") log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
storage := local.NewImageStore(dir, false, false, log, metrics, nil, nil) storage := local.NewImageStore(dir, false, false, log, metrics, nil, nil, nil)
indexBlob, err := storage.GetIndexContent(repo) indexBlob, err := storage.GetIndexContent(repo)
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -5848,7 +5848,7 @@ func TestMetaDBWhenDeletingImages(t *testing.T) {
// get signatur digest // get signatur digest
log := log.NewLogger("debug", "") log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
storage := local.NewImageStore(dir, false, false, log, metrics, nil, nil) storage := local.NewImageStore(dir, false, false, log, metrics, nil, nil, nil)
indexBlob, err := storage.GetIndexContent(repo) indexBlob, err := storage.GetIndexContent(repo)
So(err, ShouldBeNil) So(err, ShouldBeNil)

View file

@ -304,5 +304,5 @@ func getTempRootDirFromImageReference(imageReference types.ImageReference, repo,
func getImageStore(rootDir string, log log.Logger) storageTypes.ImageStore { func getImageStore(rootDir string, log log.Logger) storageTypes.ImageStore {
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
return local.NewImageStore(rootDir, false, false, log, metrics, nil, nil) return local.NewImageStore(rootDir, false, false, log, metrics, nil, nil, nil)
} }

View file

@ -72,7 +72,7 @@ func TestInjectSyncUtils(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)} log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
imageStore := local.NewImageStore(t.TempDir(), false, false, log, metrics, nil, nil) imageStore := local.NewImageStore(t.TempDir(), false, false, log, metrics, nil, nil, nil)
injected = inject.InjectFailure(0) injected = inject.InjectFailure(0)
ols := NewOciLayoutStorage(storage.StoreController{DefaultStore: imageStore}) ols := NewOciLayoutStorage(storage.StoreController{DefaultStore: imageStore})
@ -216,7 +216,7 @@ func TestDestinationRegistry(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
syncImgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) syncImgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := "repo" repoName := "repo"
storeController := storage.StoreController{DefaultStore: syncImgStore} storeController := storage.StoreController{DefaultStore: syncImgStore}
@ -334,7 +334,7 @@ func TestDestinationRegistry(t *testing.T) {
MandatoryAnnotations: []string{"annot1"}, MandatoryAnnotations: []string{"annot1"},
}, log) }, log)
syncImgStore := local.NewImageStore(dir, true, true, log, metrics, linter, cacheDriver) syncImgStore := local.NewImageStore(dir, true, true, log, metrics, linter, cacheDriver, nil)
repoName := "repo" repoName := "repo"
storeController := storage.StoreController{DefaultStore: syncImgStore} storeController := storage.StoreController{DefaultStore: syncImgStore}

View file

@ -26,7 +26,7 @@ func TestOnUpdateManifest(t *testing.T) {
storeController := storage.StoreController{} storeController := storage.StoreController{}
log := log.NewLogger("debug", "") log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
storeController.DefaultStore = local.NewImageStore(rootDir, true, true, log, metrics, nil, nil) storeController.DefaultStore = local.NewImageStore(rootDir, true, true, log, metrics, nil, nil, nil)
params := boltdb.DBParameters{ params := boltdb.DBParameters{
RootDir: rootDir, RootDir: rootDir,

View file

@ -11,6 +11,7 @@ import (
zerr "zotregistry.dev/zot/errors" zerr "zotregistry.dev/zot/errors"
zcommon "zotregistry.dev/zot/pkg/common" zcommon "zotregistry.dev/zot/pkg/common"
"zotregistry.dev/zot/pkg/compat"
"zotregistry.dev/zot/pkg/log" "zotregistry.dev/zot/pkg/log"
"zotregistry.dev/zot/pkg/meta/convert" "zotregistry.dev/zot/pkg/meta/convert"
mTypes "zotregistry.dev/zot/pkg/meta/types" mTypes "zotregistry.dev/zot/pkg/meta/types"
@ -309,8 +310,7 @@ func SetImageMetaFromInput(ctx context.Context, repo, reference, mediaType strin
) error { ) error {
var imageMeta mTypes.ImageMeta var imageMeta mTypes.ImageMeta
switch mediaType { if mediaType == ispec.MediaTypeImageManifest || compat.IsCompatibleManifestMediaType(mediaType) { //nolint:gocritic,lll // mixing checking mechanisms
case ispec.MediaTypeImageManifest:
manifestContent := ispec.Manifest{} manifestContent := ispec.Manifest{}
configContent := ispec.Image{} configContent := ispec.Image{}
@ -367,7 +367,7 @@ func SetImageMetaFromInput(ctx context.Context, repo, reference, mediaType strin
} }
imageMeta = convert.GetImageManifestMeta(manifestContent, configContent, int64(len(blob)), digest) imageMeta = convert.GetImageManifestMeta(manifestContent, configContent, int64(len(blob)), digest)
case ispec.MediaTypeImageIndex: } else if mediaType == ispec.MediaTypeImageIndex || compat.IsCompatibleManifestListMediaType(mediaType) {
indexContent := ispec.Index{} indexContent := ispec.Index{}
err := json.Unmarshal(blob, &indexContent) err := json.Unmarshal(blob, &indexContent)
@ -376,7 +376,7 @@ func SetImageMetaFromInput(ctx context.Context, repo, reference, mediaType strin
} }
imageMeta = convert.GetImageIndexMeta(indexContent, int64(len(blob)), digest) imageMeta = convert.GetImageIndexMeta(indexContent, int64(len(blob)), digest)
default: } else {
return nil return nil
} }

View file

@ -344,7 +344,7 @@ func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB, log log.Logger)
Convey("Test with simple case", func() { Convey("Test with simple case", func() {
imageStore := local.NewImageStore(rootDir, false, false, imageStore := local.NewImageStore(rootDir, false, false,
log, monitoring.NewMetricsServer(false, log), nil, nil) log, monitoring.NewMetricsServer(false, log), nil, nil, nil)
storeController := storage.StoreController{DefaultStore: imageStore} storeController := storage.StoreController{DefaultStore: imageStore}
manifests := []ispec.Manifest{} manifests := []ispec.Manifest{}
@ -419,7 +419,7 @@ func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB, log log.Logger)
Convey("Accept orphan signatures", func() { Convey("Accept orphan signatures", func() {
imageStore := local.NewImageStore(rootDir, false, false, imageStore := local.NewImageStore(rootDir, false, false,
log, monitoring.NewMetricsServer(false, log), nil, nil) log, monitoring.NewMetricsServer(false, log), nil, nil, nil)
storeController := storage.StoreController{DefaultStore: imageStore} storeController := storage.StoreController{DefaultStore: imageStore}
@ -464,7 +464,7 @@ func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB, log log.Logger)
Convey("Check statistics after load", func() { Convey("Check statistics after load", func() {
imageStore := local.NewImageStore(rootDir, false, false, imageStore := local.NewImageStore(rootDir, false, false,
log, monitoring.NewMetricsServer(false, log), nil, nil) log, monitoring.NewMetricsServer(false, log), nil, nil, nil)
storeController := storage.StoreController{DefaultStore: imageStore} storeController := storage.StoreController{DefaultStore: imageStore}
// add an image // add an image
@ -505,7 +505,7 @@ func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB, log log.Logger)
// make sure pushTimestamp is always populated to not interfere with retention logic // make sure pushTimestamp is always populated to not interfere with retention logic
Convey("Always update pushTimestamp if its value is 0(time.Time{})", func() { Convey("Always update pushTimestamp if its value is 0(time.Time{})", func() {
imageStore := local.NewImageStore(rootDir, false, false, imageStore := local.NewImageStore(rootDir, false, false,
log, monitoring.NewMetricsServer(false, log), nil, nil) log, monitoring.NewMetricsServer(false, log), nil, nil, nil)
storeController := storage.StoreController{DefaultStore: imageStore} storeController := storage.StoreController{DefaultStore: imageStore}
// add an image // add an image

View file

@ -11,6 +11,8 @@ import (
"strings" "strings"
"time" "time"
dockerList "github.com/distribution/distribution/v3/manifest/manifestlist"
docker "github.com/distribution/distribution/v3/manifest/schema2"
"github.com/distribution/distribution/v3/registry/storage/driver" "github.com/distribution/distribution/v3/registry/storage/driver"
godigest "github.com/opencontainers/go-digest" godigest "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/schema" "github.com/opencontainers/image-spec/schema"
@ -19,6 +21,7 @@ import (
zerr "zotregistry.dev/zot/errors" zerr "zotregistry.dev/zot/errors"
zcommon "zotregistry.dev/zot/pkg/common" zcommon "zotregistry.dev/zot/pkg/common"
"zotregistry.dev/zot/pkg/compat"
"zotregistry.dev/zot/pkg/extensions/monitoring" "zotregistry.dev/zot/pkg/extensions/monitoring"
zlog "zotregistry.dev/zot/pkg/log" zlog "zotregistry.dev/zot/pkg/log"
"zotregistry.dev/zot/pkg/scheduler" "zotregistry.dev/zot/pkg/scheduler"
@ -62,10 +65,10 @@ func GetManifestDescByReference(index ispec.Index, reference string) (ispec.Desc
} }
func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaType string, body []byte, func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaType string, body []byte,
log zlog.Logger, compats []compat.MediaCompatibility, log zlog.Logger,
) error { ) error {
// validate the manifest // validate the manifest
if !IsSupportedMediaType(mediaType) { if !IsSupportedMediaType(compats, mediaType) {
log.Debug().Interface("actual", mediaType). log.Debug().Interface("actual", mediaType).
Msg("bad manifest media type") Msg("bad manifest media type")
@ -145,6 +148,23 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
log.Error().Err(err).Str("digest", manifest.Digest.String()). log.Error().Err(err).Str("digest", manifest.Digest.String()).
Msg("failed to stat manifest due to missing manifest blob") Msg("failed to stat manifest due to missing manifest blob")
return zerr.ErrBadManifest
}
}
default:
// non-OCI compatible
descriptors, err := compat.Validate(body, mediaType)
if err != nil {
log.Error().Err(err).Msg("failed to unmarshal JSON")
return zerr.ErrBadManifest
}
for _, desc := range descriptors {
if ok, _, _, err := imgStore.StatBlob(repo, desc.Digest); !ok || err != nil {
log.Error().Err(err).Str("digest", desc.Digest.String()).
Msg("failed to stat non-OCI descriptor due to missing blob")
return zerr.ErrBadManifest return zerr.ErrBadManifest
} }
} }
@ -796,7 +816,15 @@ func getBlobDescriptorFromManifest(imgStore storageTypes.ImageStore, repo string
return ispec.Descriptor{}, zerr.ErrBlobNotFound return ispec.Descriptor{}, zerr.ErrBlobNotFound
} }
func IsSupportedMediaType(mediaType string) bool { func IsSupportedMediaType(compats []compat.MediaCompatibility, mediaType string) bool {
// check for some supported legacy formats if configured
for _, comp := range compats {
if comp == compat.DockerManifestV2SchemaV2 &&
(mediaType == docker.MediaTypeManifest || mediaType == dockerList.MediaTypeManifestList) {
return true
}
}
return mediaType == ispec.MediaTypeImageIndex || return mediaType == ispec.MediaTypeImageIndex ||
mediaType == ispec.MediaTypeImageManifest mediaType == ispec.MediaTypeImageManifest
} }

View file

@ -37,7 +37,7 @@ func TestValidateManifest(t *testing.T) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
content := []byte("this is a blob") content := []byte("this is a blob")
digest := godigest.FromBytes(content) digest := godigest.FromBytes(content)
@ -180,7 +180,7 @@ func TestGetReferrersErrors(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, false, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, false, true, log, metrics, nil, cacheDriver, nil)
artifactType := "application/vnd.example.icecream.v1" artifactType := "application/vnd.example.icecream.v1"
validDigest := godigest.FromBytes([]byte("blob")) validDigest := godigest.FromBytes([]byte("blob"))
@ -401,7 +401,7 @@ func TestGetBlobDescriptorFromRepo(t *testing.T) {
driver := local.New(true) driver := local.New(true)
imgStore := imagestore.NewImageStore(tdir, tdir, true, imgStore := imagestore.NewImageStore(tdir, tdir, true,
true, log, metrics, nil, driver, cacheDriver) true, log, metrics, nil, driver, cacheDriver, nil)
repoName := "zot-test" repoName := "zot-test"

View file

@ -17,6 +17,7 @@ import (
zerr "zotregistry.dev/zot/errors" zerr "zotregistry.dev/zot/errors"
"zotregistry.dev/zot/pkg/api/config" "zotregistry.dev/zot/pkg/api/config"
zcommon "zotregistry.dev/zot/pkg/common" zcommon "zotregistry.dev/zot/pkg/common"
"zotregistry.dev/zot/pkg/compat"
zlog "zotregistry.dev/zot/pkg/log" zlog "zotregistry.dev/zot/pkg/log"
mTypes "zotregistry.dev/zot/pkg/meta/types" mTypes "zotregistry.dev/zot/pkg/meta/types"
"zotregistry.dev/zot/pkg/retention" "zotregistry.dev/zot/pkg/retention"
@ -219,8 +220,7 @@ func (gc GarbageCollect) removeIndexReferrers(repo string, rootIndex *ispec.Inde
var err error var err error
for _, desc := range index.Manifests { for _, desc := range index.Manifests {
switch desc.MediaType { if (desc.MediaType == ispec.MediaTypeImageIndex) || compat.IsCompatibleManifestListMediaType(desc.MediaType) {
case ispec.MediaTypeImageIndex:
indexImage, err := common.GetImageIndex(gc.imgStore, repo, desc.Digest, gc.log) indexImage, err := common.GetImageIndex(gc.imgStore, repo, desc.Digest, gc.log)
if err != nil { if err != nil {
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", desc.Digest.String()). gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", desc.Digest.String()).
@ -249,7 +249,7 @@ func (gc GarbageCollect) removeIndexReferrers(repo string, rootIndex *ispec.Inde
if gced { if gced {
count++ count++
} }
case ispec.MediaTypeImageManifest: } else if (desc.MediaType == ispec.MediaTypeImageManifest) || compat.IsCompatibleManifestMediaType(desc.MediaType) {
image, err := common.GetImageManifest(gc.imgStore, repo, desc.Digest, gc.log) image, err := common.GetImageManifest(gc.imgStore, repo, desc.Digest, gc.log)
if err != nil { if err != nil {
gc.log.Error().Err(err).Str("module", "gc").Str("repo", repo).Str("digest", desc.Digest.String()). gc.log.Error().Err(err).Str("module", "gc").Str("repo", repo).Str("digest", desc.Digest.String()).
@ -513,8 +513,7 @@ func (gc GarbageCollect) identifyManifestsReferencedInIndex(index ispec.Index, r
referenced map[godigest.Digest]bool, referenced map[godigest.Digest]bool,
) error { ) error {
for _, desc := range index.Manifests { for _, desc := range index.Manifests {
switch desc.MediaType { if (desc.MediaType == ispec.MediaTypeImageIndex) || compat.IsCompatibleManifestListMediaType(desc.MediaType) {
case ispec.MediaTypeImageIndex:
indexImage, err := common.GetImageIndex(gc.imgStore, repo, desc.Digest, gc.log) indexImage, err := common.GetImageIndex(gc.imgStore, repo, desc.Digest, gc.log)
if err != nil { if err != nil {
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo). gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).
@ -534,7 +533,7 @@ func (gc GarbageCollect) identifyManifestsReferencedInIndex(index ispec.Index, r
if err := gc.identifyManifestsReferencedInIndex(indexImage, repo, referenced); err != nil { if err := gc.identifyManifestsReferencedInIndex(indexImage, repo, referenced); err != nil {
return err return err
} }
case ispec.MediaTypeImageManifest: } else if (desc.MediaType == ispec.MediaTypeImageManifest) || compat.IsCompatibleManifestMediaType(desc.MediaType) {
image, err := common.GetImageManifest(gc.imgStore, repo, desc.Digest, gc.log) image, err := common.GetImageManifest(gc.imgStore, repo, desc.Digest, gc.log)
if err != nil { if err != nil {
gc.log.Error().Err(err).Str("module", "gc").Str("repo", repo). gc.log.Error().Err(err).Str("module", "gc").Str("repo", repo).
@ -675,15 +674,14 @@ func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duratio
func (gc GarbageCollect) addIndexBlobsToReferences(repo string, index ispec.Index, refBlobs map[string]bool, func (gc GarbageCollect) addIndexBlobsToReferences(repo string, index ispec.Index, refBlobs map[string]bool,
) error { ) error {
for _, desc := range index.Manifests { for _, desc := range index.Manifests {
switch desc.MediaType { if (desc.MediaType == ispec.MediaTypeImageIndex) || compat.IsCompatibleManifestListMediaType(desc.MediaType) {
case ispec.MediaTypeImageIndex:
if err := gc.addImageIndexBlobsToReferences(repo, desc.Digest, refBlobs); err != nil { if err := gc.addImageIndexBlobsToReferences(repo, desc.Digest, refBlobs); err != nil {
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo). gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", desc.Digest.String()).Msg("failed to read blobs in multiarch(index) image") Str("digest", desc.Digest.String()).Msg("failed to read blobs in multiarch(index) image")
return err return err
} }
case ispec.MediaTypeImageManifest: } else if (desc.MediaType == ispec.MediaTypeImageManifest) || compat.IsCompatibleManifestMediaType(desc.MediaType) {
if err := gc.addImageManifestBlobsToReferences(repo, desc.Digest, refBlobs); err != nil { if err := gc.addImageManifestBlobsToReferences(repo, desc.Digest, refBlobs); err != nil {
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo). gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", desc.Digest.String()).Msg("failed to read blobs in image manifest") Str("digest", desc.Digest.String()).Msg("failed to read blobs in image manifest")

View file

@ -47,7 +47,7 @@ func TestGarbageCollectManifestErrors(t *testing.T) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{ gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Delay: storageConstants.DefaultGCDelay, Delay: storageConstants.DefaultGCDelay,
@ -171,7 +171,7 @@ func TestGarbageCollectIndexErrors(t *testing.T) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{ gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Delay: storageConstants.DefaultGCDelay, Delay: storageConstants.DefaultGCDelay,

View file

@ -140,13 +140,13 @@ func TestGarbageCollectAndRetention(t *testing.T) {
panic(err) panic(err)
} }
imgStore = s3.NewImageStore(rootDir, cacheDir, true, false, log, metrics, nil, store, nil) imgStore = s3.NewImageStore(rootDir, cacheDir, true, false, log, metrics, nil, store, nil, nil)
} else { } else {
// Create temporary directory // Create temporary directory
rootDir := t.TempDir() rootDir := t.TempDir()
// Create ImageStore // Create ImageStore
imgStore = local.NewImageStore(rootDir, false, false, log, metrics, nil, nil) imgStore = local.NewImageStore(rootDir, false, false, log, metrics, nil, nil, nil)
// init metaDB // init metaDB
params := boltdb.DBParameters{ params := boltdb.DBParameters{
@ -1105,7 +1105,7 @@ func TestGarbageCollectDeletion(t *testing.T) {
rootDir := t.TempDir() rootDir := t.TempDir()
// Create ImageStore // Create ImageStore
imgStore := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil) imgStore := local.NewImageStore(rootDir, false, false, log, metrics, nil, nil, nil)
// init metaDB // init metaDB
params := boltdb.DBParameters{ params := boltdb.DBParameters{

View file

@ -21,6 +21,7 @@ import (
zerr "zotregistry.dev/zot/errors" zerr "zotregistry.dev/zot/errors"
zcommon "zotregistry.dev/zot/pkg/common" zcommon "zotregistry.dev/zot/pkg/common"
"zotregistry.dev/zot/pkg/compat"
"zotregistry.dev/zot/pkg/extensions/monitoring" "zotregistry.dev/zot/pkg/extensions/monitoring"
syncConstants "zotregistry.dev/zot/pkg/extensions/sync/constants" syncConstants "zotregistry.dev/zot/pkg/extensions/sync/constants"
zlog "zotregistry.dev/zot/pkg/log" zlog "zotregistry.dev/zot/pkg/log"
@ -49,6 +50,7 @@ type ImageStore struct {
dedupe bool dedupe bool
linter common.Lint linter common.Lint
commit bool commit bool
compat []compat.MediaCompatibility
} }
func (is *ImageStore) Name() string { func (is *ImageStore) Name() string {
@ -67,7 +69,8 @@ func (is *ImageStore) DirExists(d string) bool {
// see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers // see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers
// Use the last argument to properly set a cache database, or it will default to boltDB local storage. // Use the last argument to properly set a cache database, or it will default to boltDB local storage.
func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlog.Logger, func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlog.Logger,
metrics monitoring.MetricServer, linter common.Lint, storeDriver storageTypes.Driver, cacheDriver cache.Cache, metrics monitoring.MetricServer, linter common.Lint, storeDriver storageTypes.Driver,
cacheDriver cache.Cache, compat []compat.MediaCompatibility,
) storageTypes.ImageStore { ) storageTypes.ImageStore {
if err := storeDriver.EnsureDir(rootDir); err != nil { if err := storeDriver.EnsureDir(rootDir); err != nil {
log.Error().Err(err).Str("rootDir", rootDir).Msg("failed to create root dir") log.Error().Err(err).Str("rootDir", rootDir).Msg("failed to create root dir")
@ -85,6 +88,7 @@ func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlo
linter: linter, linter: linter,
commit: commit, commit: commit,
cache: cacheDriver, cache: cacheDriver,
compat: compat,
} }
return imgStore return imgStore
@ -490,7 +494,7 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli
refIsDigest = false refIsDigest = false
} }
err = common.ValidateManifest(is, repo, reference, mediaType, body, is.log) err = common.ValidateManifest(is, repo, reference, mediaType, body, is.compat, is.log)
if err != nil { if err != nil {
return mDigest, "", err return mDigest, "", err
} }

View file

@ -1,6 +1,7 @@
package local package local
import ( import (
"zotregistry.dev/zot/pkg/compat"
"zotregistry.dev/zot/pkg/extensions/monitoring" "zotregistry.dev/zot/pkg/extensions/monitoring"
zlog "zotregistry.dev/zot/pkg/log" zlog "zotregistry.dev/zot/pkg/log"
"zotregistry.dev/zot/pkg/storage/cache" "zotregistry.dev/zot/pkg/storage/cache"
@ -13,6 +14,7 @@ import (
// Use the last argument to properly set a cache database, or it will default to boltDB local storage. // Use the last argument to properly set a cache database, or it will default to boltDB local storage.
func NewImageStore(rootDir string, dedupe, commit bool, log zlog.Logger, func NewImageStore(rootDir string, dedupe, commit bool, log zlog.Logger,
metrics monitoring.MetricServer, linter common.Lint, cacheDriver cache.Cache, metrics monitoring.MetricServer, linter common.Lint, cacheDriver cache.Cache,
compat []compat.MediaCompatibility,
) storageTypes.ImageStore { ) storageTypes.ImageStore {
return imagestore.NewImageStore( return imagestore.NewImageStore(
rootDir, rootDir,
@ -24,5 +26,6 @@ func NewImageStore(rootDir string, dedupe, commit bool, log zlog.Logger,
linter, linter,
New(commit), New(commit),
cacheDriver, cacheDriver,
compat,
) )
} }

View file

@ -35,7 +35,7 @@ func TestElevatedPrivilegesInvalidDedupe(t *testing.T) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
upload, err := imgStore.NewBlobUpload("dedupe1") upload, err := imgStore.NewBlobUpload("dedupe1")
So(err, ShouldBeNil) So(err, ShouldBeNil)

View file

@ -83,7 +83,7 @@ func TestStorageFSAPIs(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
Convey("Repo layout", t, func(c C) { Convey("Repo layout", t, func(c C) {
Convey("Bad image manifest", func() { Convey("Bad image manifest", func() {
@ -217,7 +217,7 @@ func FuzzNewBlobUpload(f *testing.F) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
_, err := imgStore.NewBlobUpload(data) _, err := imgStore.NewBlobUpload(data)
if err != nil { if err != nil {
@ -244,7 +244,7 @@ func FuzzPutBlobChunk(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := data repoName := data
@ -280,7 +280,7 @@ func FuzzPutBlobChunkStreamed(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := data repoName := data
@ -314,7 +314,7 @@ func FuzzGetBlobUpload(f *testing.F) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, imgStore := local.NewImageStore(dir, true, true, log, metrics, nil,
cacheDriver) cacheDriver, nil)
_, err := imgStore.GetBlobUpload(data1, data2) _, err := imgStore.GetBlobUpload(data1, data2)
if err != nil { if err != nil {
@ -340,7 +340,7 @@ func FuzzTestPutGetImageManifest(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
cblob, cdigest := GetRandomImageConfig() cblob, cdigest := GetRandomImageConfig()
@ -396,7 +396,7 @@ func FuzzTestPutDeleteImageManifest(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
cblob, cdigest := GetRandomImageConfig() cblob, cdigest := GetRandomImageConfig()
@ -457,7 +457,7 @@ func FuzzTestDeleteImageManifest(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
digest, _, err := newRandomBlobForFuzz(data) digest, _, err := newRandomBlobForFuzz(data)
if err != nil { if err != nil {
@ -494,7 +494,7 @@ func FuzzInitRepo(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
err := imgStore.InitRepo(data) err := imgStore.InitRepo(data)
if err != nil { if err != nil {
@ -520,7 +520,7 @@ func FuzzInitValidateRepo(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
err := imgStore.InitRepo(data) err := imgStore.InitRepo(data)
if err != nil { if err != nil {
@ -555,7 +555,7 @@ func FuzzGetImageTags(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
_, err := imgStore.GetImageTags(data) _, err := imgStore.GetImageTags(data)
if err != nil { if err != nil {
@ -581,7 +581,7 @@ func FuzzBlobUploadPath(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
_ = imgStore.BlobUploadPath(repo, uuid) _ = imgStore.BlobUploadPath(repo, uuid)
}) })
@ -600,7 +600,7 @@ func FuzzBlobUploadInfo(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
repo := data repo := data
_, err := imgStore.BlobUploadInfo(repo, uuid) _, err := imgStore.BlobUploadInfo(repo, uuid)
@ -626,7 +626,7 @@ func FuzzTestGetImageManifest(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := data repoName := data
@ -655,7 +655,7 @@ func FuzzFinishBlobUpload(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := data repoName := data
@ -707,7 +707,7 @@ func FuzzFullBlobUpload(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
ldigest, lblob, err := newRandomBlobForFuzz(data) ldigest, lblob, err := newRandomBlobForFuzz(data)
if err != nil { if err != nil {
@ -750,7 +750,7 @@ func TestStorageCacheErrors(t *testing.T) {
GetBlobFn: func(digest godigest.Digest) (string, error) { GetBlobFn: func(digest godigest.Digest) (string, error) {
return getBlobPath, nil return getBlobPath, nil
}, },
}) }, nil)
err := imgStore.InitRepo(originRepo) err := imgStore.InitRepo(originRepo)
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -780,7 +780,7 @@ func FuzzDedupeBlob(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
blobDigest := godigest.FromString(data) blobDigest := godigest.FromString(data)
@ -821,7 +821,7 @@ func FuzzDeleteBlobUpload(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
uuid, err := imgStore.NewBlobUpload(repoName) uuid, err := imgStore.NewBlobUpload(repoName)
if err != nil { if err != nil {
@ -853,7 +853,7 @@ func FuzzBlobPath(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
digest := godigest.FromString(data) digest := godigest.FromString(data)
_ = imgStore.BlobPath(repoName, digest) _ = imgStore.BlobPath(repoName, digest)
@ -874,7 +874,7 @@ func FuzzCheckBlob(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
digest := godigest.FromString(data) digest := godigest.FromString(data)
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest) _, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
@ -907,7 +907,7 @@ func FuzzGetBlob(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
digest := godigest.FromString(data) digest := godigest.FromString(data)
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest) _, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
@ -948,7 +948,7 @@ func FuzzDeleteBlob(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
digest := godigest.FromString(data) digest := godigest.FromString(data)
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest) _, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
@ -985,7 +985,7 @@ func FuzzGetIndexContent(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
digest := godigest.FromString(data) digest := godigest.FromString(data)
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest) _, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
@ -1022,7 +1022,7 @@ func FuzzGetBlobContent(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, *log) }, *log)
imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, *log, metrics, nil, cacheDriver, nil)
digest := godigest.FromString(data) digest := godigest.FromString(data)
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest) _, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
@ -1060,7 +1060,7 @@ func FuzzRunGCRepo(f *testing.F) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Delay: storageConstants.DefaultGCDelay, Delay: storageConstants.DefaultGCDelay,
@ -1104,9 +1104,9 @@ func TestDedupeLinks(t *testing.T) {
var imgStore storageTypes.ImageStore var imgStore storageTypes.ImageStore
if testCase.dedupe { if testCase.dedupe {
imgStore = local.NewImageStore(dir, testCase.dedupe, true, log, metrics, nil, cacheDriver) imgStore = local.NewImageStore(dir, testCase.dedupe, true, log, metrics, nil, cacheDriver, nil)
} else { } else {
imgStore = local.NewImageStore(dir, testCase.dedupe, true, log, metrics, nil, nil) imgStore = local.NewImageStore(dir, testCase.dedupe, true, log, metrics, nil, nil, nil)
} }
// run on empty image store // run on empty image store
@ -1282,7 +1282,7 @@ func TestDedupeLinks(t *testing.T) {
Convey("test RunDedupeForDigest directly, trigger stat error on original blob", func() { Convey("test RunDedupeForDigest directly, trigger stat error on original blob", func() {
// rebuild with dedupe true // rebuild with dedupe true
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
duplicateBlobs := []string{ duplicateBlobs := []string{
path.Join(dir, "dedupe1", "blobs", "sha256", blobDigest1), path.Join(dir, "dedupe1", "blobs", "sha256", blobDigest1),
@ -1303,7 +1303,7 @@ func TestDedupeLinks(t *testing.T) {
defer taskScheduler.Shutdown() defer taskScheduler.Shutdown()
// rebuild with dedupe true // rebuild with dedupe true
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler) imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler)
@ -1317,7 +1317,7 @@ func TestDedupeLinks(t *testing.T) {
defer taskScheduler.Shutdown() defer taskScheduler.Shutdown()
// rebuild with dedupe true // rebuild with dedupe true
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler) imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler)
// wait until rebuild finishes // wait until rebuild finishes
@ -1337,7 +1337,7 @@ func TestDedupeLinks(t *testing.T) {
taskScheduler := runAndGetScheduler() taskScheduler := runAndGetScheduler()
defer taskScheduler.Shutdown() defer taskScheduler.Shutdown()
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, nil) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, nil, nil)
// rebuild with dedupe true // rebuild with dedupe true
imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler) imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler)
@ -1367,7 +1367,7 @@ func TestDedupeLinks(t *testing.T) {
PutBlobFn: func(digest godigest.Digest, path string) error { PutBlobFn: func(digest godigest.Digest, path string) error {
return errCache return errCache
}, },
}) }, nil)
// rebuild with dedupe true, should have samefile blobs // rebuild with dedupe true, should have samefile blobs
imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler) imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler)
// wait until rebuild finishes // wait until rebuild finishes
@ -1400,7 +1400,7 @@ func TestDedupeLinks(t *testing.T) {
return nil return nil
}, },
}) }, nil)
// rebuild with dedupe true, should have samefile blobs // rebuild with dedupe true, should have samefile blobs
imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler) imgStore.RunDedupeBlobs(time.Duration(0), taskScheduler)
// wait until rebuild finishes // wait until rebuild finishes
@ -1495,7 +1495,7 @@ func TestDedupe(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
il := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) il := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
So(il.DedupeBlob("", "", "", ""), ShouldNotBeNil) So(il.DedupeBlob("", "", "", ""), ShouldNotBeNil)
}) })
@ -1516,7 +1516,7 @@ func TestNegativeCases(t *testing.T) {
}, log) }, log)
So(local.NewImageStore(dir, true, So(local.NewImageStore(dir, true,
true, log, metrics, nil, cacheDriver), ShouldNotBeNil) true, log, metrics, nil, cacheDriver, nil), ShouldNotBeNil)
if os.Geteuid() != 0 { if os.Geteuid() != 0 {
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{ cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
@ -1524,7 +1524,7 @@ func TestNegativeCases(t *testing.T) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
So(local.NewImageStore("/deadBEEF", true, true, log, metrics, nil, cacheDriver), ShouldBeNil) So(local.NewImageStore("/deadBEEF", true, true, log, metrics, nil, cacheDriver, nil), ShouldBeNil)
} }
}) })
@ -1539,7 +1539,7 @@ func TestNegativeCases(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
err := os.Chmod(dir, 0o000) // remove all perms err := os.Chmod(dir, 0o000) // remove all perms
if err != nil { if err != nil {
@ -1589,7 +1589,7 @@ func TestNegativeCases(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
So(imgStore, ShouldNotBeNil) So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil) So(imgStore.InitRepo("test"), ShouldBeNil)
@ -1703,7 +1703,7 @@ func TestNegativeCases(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
So(imgStore, ShouldNotBeNil) So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil) So(imgStore.InitRepo("test"), ShouldBeNil)
@ -1730,7 +1730,7 @@ func TestNegativeCases(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
So(imgStore, ShouldNotBeNil) So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil) So(imgStore.InitRepo("test"), ShouldBeNil)
@ -1778,7 +1778,7 @@ func TestNegativeCases(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
So(imgStore, ShouldNotBeNil) So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil) So(imgStore.InitRepo("test"), ShouldBeNil)
@ -1956,7 +1956,7 @@ func TestInjectWriteFile(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, false, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, false, log, metrics, nil, cacheDriver, nil)
Convey("Failure path not reached", func() { Convey("Failure path not reached", func() {
err := imgStore.InitRepo("repo1") err := imgStore.InitRepo("repo1")
@ -1987,7 +1987,7 @@ func TestGarbageCollectForImageStore(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := "gc-all-repos-short" //nolint:goconst // test data repoName := "gc-all-repos-short" //nolint:goconst // test data
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
@ -2035,7 +2035,7 @@ func TestGarbageCollectForImageStore(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := "gc-all-repos-short" repoName := "gc-all-repos-short"
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
@ -2073,7 +2073,7 @@ func TestGarbageCollectForImageStore(t *testing.T) {
Name: "cache", Name: "cache",
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := "gc-sig" repoName := "gc-sig"
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
@ -2151,7 +2151,7 @@ func TestGarbageCollectForImageStore(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := "gc-all-repos-short" repoName := "gc-all-repos-short"
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
@ -2226,7 +2226,7 @@ func TestGarbageCollectImageUnknownManifest(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imgStore, DefaultStore: imgStore,
@ -2409,7 +2409,7 @@ func TestGarbageCollectErrors(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := "gc-index" repoName := "gc-index"
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
@ -2656,7 +2656,7 @@ func TestInitRepo(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
err := os.Mkdir(path.Join(dir, "test-dir"), 0o000) err := os.Mkdir(path.Join(dir, "test-dir"), 0o000)
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -2678,7 +2678,7 @@ func TestValidateRepo(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
err := os.Mkdir(path.Join(dir, "test-dir"), 0o000) err := os.Mkdir(path.Join(dir, "test-dir"), 0o000)
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -2698,7 +2698,7 @@ func TestValidateRepo(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
_, err := imgStore.ValidateRepo(".") _, err := imgStore.ValidateRepo(".")
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)
@ -2743,7 +2743,7 @@ func TestGetRepositories(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
// Create valid directory with permissions // Create valid directory with permissions
err := os.Mkdir(path.Join(dir, "test-dir"), 0o755) //nolint: gosec err := os.Mkdir(path.Join(dir, "test-dir"), 0o755) //nolint: gosec
@ -2838,7 +2838,7 @@ func TestGetRepositories(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
// Root dir does not contain repos // Root dir does not contain repos
repos, err := imgStore.GetRepositories() repos, err := imgStore.GetRepositories()
@ -2885,7 +2885,7 @@ func TestGetRepositories(t *testing.T) {
}, log) }, log)
imgStore := local.NewImageStore(rootDir, imgStore := local.NewImageStore(rootDir,
true, true, log, metrics, nil, cacheDriver, true, true, log, metrics, nil, cacheDriver, nil,
) )
// Root dir does not contain repos // Root dir does not contain repos
@ -2928,7 +2928,7 @@ func TestGetNextRepository(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
firstRepoName := "repo1" firstRepoName := "repo1"
secondRepoName := "repo2" secondRepoName := "repo2"
@ -2981,7 +2981,7 @@ func TestPutBlobChunkStreamed(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
uuid, err := imgStore.NewBlobUpload("test") uuid, err := imgStore.NewBlobUpload("test")
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -3011,7 +3011,7 @@ func TestPullRange(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
repoName := "pull-range" repoName := "pull-range"
upload, err := imgStore.NewBlobUpload(repoName) upload, err := imgStore.NewBlobUpload(repoName)
@ -3053,7 +3053,7 @@ func TestStatIndex(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
log := zlog.Logger{Logger: zerolog.New(os.Stdout)} log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, nil) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, nil, nil)
err := WriteImageToFileSystem(CreateRandomImage(), "repo", "tag", err := WriteImageToFileSystem(CreateRandomImage(), "repo", "tag",
storage.StoreController{DefaultStore: imgStore}) storage.StoreController{DefaultStore: imgStore})
@ -3077,7 +3077,7 @@ func TestStorageDriverErr(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver, nil)
Convey("Init repo", t, func() { Convey("Init repo", t, func() {
err := imgStore.InitRepo(repoName) err := imgStore.InitRepo(repoName)

View file

@ -6,6 +6,7 @@ import (
// Load s3 driver. // Load s3 driver.
_ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws" _ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"
"zotregistry.dev/zot/pkg/compat"
"zotregistry.dev/zot/pkg/extensions/monitoring" "zotregistry.dev/zot/pkg/extensions/monitoring"
zlog "zotregistry.dev/zot/pkg/log" zlog "zotregistry.dev/zot/pkg/log"
"zotregistry.dev/zot/pkg/storage/cache" "zotregistry.dev/zot/pkg/storage/cache"
@ -18,7 +19,8 @@ import (
// see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers // see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers
// Use the last argument to properly set a cache database, or it will default to boltDB local storage. // Use the last argument to properly set a cache database, or it will default to boltDB local storage.
func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlog.Logger, func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlog.Logger,
metrics monitoring.MetricServer, linter common.Lint, store driver.StorageDriver, cacheDriver cache.Cache, metrics monitoring.MetricServer, linter common.Lint, store driver.StorageDriver,
cacheDriver cache.Cache, compat []compat.MediaCompatibility,
) storageTypes.ImageStore { ) storageTypes.ImageStore {
return imagestore.NewImageStore( return imagestore.NewImageStore(
rootDir, rootDir,
@ -30,5 +32,6 @@ func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlo
linter, linter,
New(store), New(store),
cacheDriver, cacheDriver,
compat,
) )
} }

View file

@ -75,7 +75,7 @@ func createMockStorage(rootDir string, cacheDir string, dedupe bool, store drive
}, log) }, log)
} }
il := s3.NewImageStore(rootDir, cacheDir, dedupe, false, log, metrics, nil, store, cacheDriver) il := s3.NewImageStore(rootDir, cacheDir, dedupe, false, log, metrics, nil, store, cacheDriver, nil)
return il return il
} }
@ -86,7 +86,7 @@ func createMockStorageWithMockCache(rootDir string, dedupe bool, store driver.St
log := log.Logger{Logger: zerolog.New(os.Stdout)} log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log) metrics := monitoring.NewMetricsServer(false, log)
il := s3.NewImageStore(rootDir, "", dedupe, false, log, metrics, nil, store, cacheDriver) il := s3.NewImageStore(rootDir, "", dedupe, false, log, metrics, nil, store, cacheDriver, nil)
return il return il
} }
@ -147,7 +147,7 @@ func createObjectsStore(rootDir string, cacheDir string, dedupe bool) (
}, log) }, log)
} }
il := s3.NewImageStore(rootDir, cacheDir, dedupe, false, log, metrics, nil, store, cacheDriver) il := s3.NewImageStore(rootDir, cacheDir, dedupe, false, log, metrics, nil, store, cacheDriver, nil)
return store, il, err return store, il, err
} }
@ -181,7 +181,7 @@ func createObjectsStoreDynamo(rootDir string, cacheDir string, dedupe bool, tabl
panic(err) panic(err)
} }
il := s3.NewImageStore(rootDir, cacheDir, dedupe, false, log, metrics, nil, store, cacheDriver) il := s3.NewImageStore(rootDir, cacheDir, dedupe, false, log, metrics, nil, store, cacheDriver, nil)
return store, il, err return store, il, err
} }

View file

@ -49,7 +49,7 @@ func TestLocalCheckAllBlobsIntegrity(t *testing.T) {
UseRelPaths: true, UseRelPaths: true,
}, log) }, log)
driver := local.New(true) driver := local.New(true)
imgStore := local.NewImageStore(tdir, true, true, log, metrics, nil, cacheDriver) imgStore := local.NewImageStore(tdir, true, true, log, metrics, nil, cacheDriver, nil)
RunCheckAllBlobsIntegrityTests(t, imgStore, driver, log) RunCheckAllBlobsIntegrityTests(t, imgStore, driver, log)
}) })

View file

@ -58,7 +58,7 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer
//nolint:typecheck,contextcheck //nolint:typecheck,contextcheck
rootDir := config.Storage.RootDirectory rootDir := config.Storage.RootDirectory
defaultStore = local.NewImageStore(rootDir, defaultStore = local.NewImageStore(rootDir,
config.Storage.Dedupe, config.Storage.Commit, log, metrics, linter, cacheDriver, config.Storage.Dedupe, config.Storage.Commit, log, metrics, linter, cacheDriver, config.HTTP.Compat,
) )
} else { } else {
storeName := fmt.Sprintf("%v", config.Storage.StorageDriver["name"]) storeName := fmt.Sprintf("%v", config.Storage.StorageDriver["name"])
@ -92,7 +92,7 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer
// false positive lint - linter does not implement Lint method // false positive lint - linter does not implement Lint method
//nolint: typecheck,contextcheck //nolint: typecheck,contextcheck
defaultStore = s3.NewImageStore(rootDir, config.Storage.RootDirectory, defaultStore = s3.NewImageStore(rootDir, config.Storage.RootDirectory,
config.Storage.Dedupe, config.Storage.Commit, log, metrics, linter, store, cacheDriver) config.Storage.Dedupe, config.Storage.Commit, log, metrics, linter, store, cacheDriver, config.HTTP.Compat)
} }
storeController.DefaultStore = defaultStore storeController.DefaultStore = defaultStore
@ -170,7 +170,7 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig,
rootDir := storageConfig.RootDirectory rootDir := storageConfig.RootDirectory
imgStoreMap[storageConfig.RootDirectory] = local.NewImageStore(rootDir, imgStoreMap[storageConfig.RootDirectory] = local.NewImageStore(rootDir,
storageConfig.Dedupe, storageConfig.Commit, log, metrics, linter, cacheDriver, storageConfig.Dedupe, storageConfig.Commit, log, metrics, linter, cacheDriver, cfg.HTTP.Compat,
) )
subImageStore[route] = imgStoreMap[storageConfig.RootDirectory] subImageStore[route] = imgStoreMap[storageConfig.RootDirectory]
@ -210,7 +210,7 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig,
// false positive lint - linter does not implement Lint method // false positive lint - linter does not implement Lint method
//nolint: typecheck //nolint: typecheck
subImageStore[route] = s3.NewImageStore(rootDir, storageConfig.RootDirectory, subImageStore[route] = s3.NewImageStore(rootDir, storageConfig.RootDirectory,
storageConfig.Dedupe, storageConfig.Commit, log, metrics, linter, store, cacheDriver, storageConfig.Dedupe, storageConfig.Commit, log, metrics, linter, store, cacheDriver, cfg.HTTP.Compat,
) )
} }
} }

View file

@ -102,7 +102,7 @@ func createObjectsStore(rootDir string, cacheDir string) (
UseRelPaths: false, UseRelPaths: false,
}, log) }, log)
il := s3.NewImageStore(rootDir, cacheDir, true, false, log, metrics, nil, store, cacheDriver) il := s3.NewImageStore(rootDir, cacheDir, true, false, log, metrics, nil, store, cacheDriver, nil)
return store, il, err return store, il, err
} }
@ -167,7 +167,7 @@ func TestGetAllDedupeReposCandidates(t *testing.T) {
driver := local.New(true) driver := local.New(true)
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver) imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver, nil)
} }
Convey("Push repos with deduped blobs", t, func(c C) { Convey("Push repos with deduped blobs", t, func(c C) {
@ -237,7 +237,7 @@ func TestStorageAPIs(t *testing.T) {
driver := local.New(true) driver := local.New(true)
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver) imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver, nil)
} }
Convey("Repo layout", t, func(c C) { Convey("Repo layout", t, func(c C) {
@ -952,7 +952,7 @@ func TestMandatoryAnnotations(t *testing.T) {
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storageTypes.ImageStore) (bool, error) { LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storageTypes.ImageStore) (bool, error) {
return false, nil return false, nil
}, },
}, driver, nil) }, driver, nil, nil)
defer cleanupStorage(store, testDir) defer cleanupStorage(store, testDir)
} else { } else {
@ -968,7 +968,7 @@ func TestMandatoryAnnotations(t *testing.T) {
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storageTypes.ImageStore) (bool, error) { LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storageTypes.ImageStore) (bool, error) {
return false, nil return false, nil
}, },
}, driver, cacheDriver) }, driver, cacheDriver, nil)
} }
Convey("Setup manifest", t, func() { Convey("Setup manifest", t, func() {
@ -1022,7 +1022,7 @@ func TestMandatoryAnnotations(t *testing.T) {
//nolint: goerr113 //nolint: goerr113
return false, errors.New("linter error") return false, errors.New("linter error")
}, },
}, driver, nil) }, driver, nil, nil)
} else { } else {
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{ cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: tdir, RootDir: tdir,
@ -1036,7 +1036,7 @@ func TestMandatoryAnnotations(t *testing.T) {
//nolint: goerr113 //nolint: goerr113
return false, errors.New("linter error") return false, errors.New("linter error")
}, },
}, driver, cacheDriver) }, driver, cacheDriver, nil)
} }
_, _, err = imgStore.PutImageManifest("test", "1.0.0", ispec.MediaTypeImageManifest, manifestBuf) _, _, err = imgStore.PutImageManifest("test", "1.0.0", ispec.MediaTypeImageManifest, manifestBuf)
@ -1150,7 +1150,7 @@ func TestDeleteBlobsInUse(t *testing.T) {
}, log) }, log)
driver := local.New(true) driver := local.New(true)
imgStore = imagestore.NewImageStore(tdir, tdir, true, imgStore = imagestore.NewImageStore(tdir, tdir, true,
true, log, metrics, nil, driver, cacheDriver) true, log, metrics, nil, driver, cacheDriver, nil)
} }
Convey("Setup manifest", t, func() { Convey("Setup manifest", t, func() {
@ -1458,7 +1458,7 @@ func TestReuploadCorruptedBlob(t *testing.T) {
}, log) }, log)
driver = local.New(true) driver = local.New(true)
imgStore = imagestore.NewImageStore(tdir, tdir, true, imgStore = imagestore.NewImageStore(tdir, tdir, true,
true, log, metrics, nil, driver, cacheDriver) true, log, metrics, nil, driver, cacheDriver, nil)
} }
Convey("Test errors paths", t, func() { Convey("Test errors paths", t, func() {
@ -1604,11 +1604,14 @@ func TestStorageHandler(t *testing.T) {
driver := local.New(true) driver := local.New(true)
// Create ImageStore // Create ImageStore
firstStore = imagestore.NewImageStore(firstRootDir, firstRootDir, false, false, log, metrics, nil, driver, nil) firstStore = imagestore.NewImageStore(firstRootDir, firstRootDir, false, false,
log, metrics, nil, driver, nil, nil)
secondStore = imagestore.NewImageStore(secondRootDir, secondRootDir, false, false, log, metrics, nil, driver, nil) secondStore = imagestore.NewImageStore(secondRootDir, secondRootDir, false, false,
log, metrics, nil, driver, nil, nil)
thirdStore = imagestore.NewImageStore(thirdRootDir, thirdRootDir, false, false, log, metrics, nil, driver, nil) thirdStore = imagestore.NewImageStore(thirdRootDir, thirdRootDir, false, false, log,
metrics, nil, driver, nil, nil)
} }
Convey("Test storage handler", t, func() { Convey("Test storage handler", t, func() {
@ -1693,7 +1696,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
driver := local.New(true) driver := local.New(true)
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver) imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver, nil)
} }
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
@ -1865,7 +1868,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
driver := local.New(true) driver := local.New(true)
imgStore = imagestore.NewImageStore(dir, dir, true, imgStore = imagestore.NewImageStore(dir, dir, true,
true, log, metrics, nil, driver, cacheDriver) true, log, metrics, nil, driver, cacheDriver, nil)
} }
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
@ -2150,7 +2153,7 @@ func TestGarbageCollectImageManifest(t *testing.T) {
driver := local.New(true) driver := local.New(true)
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver) imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver, nil)
} }
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
@ -2395,7 +2398,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
driver := local.New(true) driver := local.New(true)
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver) imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver, nil)
} }
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
@ -2524,7 +2527,7 @@ func TestGarbageCollectImageIndex(t *testing.T) {
driver := local.New(true) driver := local.New(true)
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver) imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver, nil)
} }
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
@ -2812,7 +2815,7 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
driver := local.New(true) driver := local.New(true)
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver) imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver, nil)
} }
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{ gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{

View file

@ -433,7 +433,7 @@ func TestExtractImageDetails(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
testLogger := log.NewLogger("debug", "") testLogger := log.NewLogger("debug", "")
imageStore := local.NewImageStore(dir, false, false, imageStore := local.NewImageStore(dir, false, false,
testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil) testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,
@ -457,7 +457,7 @@ func TestExtractImageDetails(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
testLogger := log.NewLogger("debug", "") testLogger := log.NewLogger("debug", "")
imageStore := local.NewImageStore(dir, false, false, imageStore := local.NewImageStore(dir, false, false,
testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil) testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,
@ -477,7 +477,7 @@ func TestExtractImageDetails(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
testLogger := log.NewLogger("debug", "") testLogger := log.NewLogger("debug", "")
imageStore := local.NewImageStore(dir, false, false, imageStore := local.NewImageStore(dir, false, false,
testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil) testLogger, monitoring.NewMetricsServer(false, testLogger), nil, nil, nil)
storeController := storage.StoreController{ storeController := storage.StoreController{
DefaultStore: imageStore, DefaultStore: imageStore,

View file

@ -19,7 +19,7 @@ func GetDefaultImageStore(rootDir string, log zLog.Logger) stypes.ImageStore {
return true, nil return true, nil
}, },
}, },
mocks.CacheMock{}, mocks.CacheMock{}, nil,
) )
} }

View file

@ -9,7 +9,7 @@ PATH=$PATH:${SCRIPTPATH}/../../hack/tools/bin
tests=("pushpull" "pushpull_authn" "delete_images" "referrers" "metadata" "anonymous_policy" tests=("pushpull" "pushpull_authn" "delete_images" "referrers" "metadata" "anonymous_policy"
"annotations" "detect_manifest_collision" "cve" "sync" "sync_docker" "sync_replica_cluster" "annotations" "detect_manifest_collision" "cve" "sync" "sync_docker" "sync_replica_cluster"
"scrub" "garbage_collect" "metrics" "metrics_minimal" "multiarch_index") "scrub" "garbage_collect" "metrics" "metrics_minimal" "multiarch_index" "docker_compat")
for test in ${tests[*]}; do for test in ${tests[*]}; do
${BATS} ${BATS_FLAGS} ${SCRIPTPATH}/${test}.bats > ${test}.log & pids+=($!) ${BATS} ${BATS_FLAGS} ${SCRIPTPATH}/${test}.bats > ${test}.log & pids+=($!)

View file

@ -0,0 +1,94 @@
# Note: Intended to be run as "make run-blackbox-tests" or "make run-blackbox-ci"
# Makefile target installs & checks all necessary tooling
# Extra tools that are not covered in Makefile target needs to be added in verify_prerequisites()
load helpers_zot
function verify_prerequisites {
if [ ! $(command -v curl) ]; then
echo "you need to install curl as a prerequisite to running the tests" >&3
return 1
fi
if [ ! $(command -v jq) ]; then
echo "you need to install jq as a prerequisite to running the tests" >&3
return 1
fi
return 0
}
function setup_file() {
# Verify prerequisites are available
if ! $(verify_prerequisites); then
exit 1
fi
# Download test data to folder common for the entire suite, not just this file
skopeo --insecure-policy copy --format=oci docker://ghcr.io/project-zot/golang:1.20 oci:${TEST_DATA_DIR}/golang:1.20
# Setup zot server
local zot_root_dir=${BATS_FILE_TMPDIR}/zot
local zot_config_file=${BATS_FILE_TMPDIR}/zot_config.json
local oci_data_dir=${BATS_FILE_TMPDIR}/oci
mkdir -p ${zot_root_dir}
mkdir -p ${oci_data_dir}
zot_port=$(get_free_port)
echo ${zot_port} > ${BATS_FILE_TMPDIR}/zot.port
cat > ${zot_config_file}<<EOF
{
"distSpecVersion": "1.1.0",
"storage": {
"rootDirectory": "${zot_root_dir}"
},
"http": {
"address": "0.0.0.0",
"port": "${zot_port}",
"compat": ["docker2s2"]
},
"log": {
"level": "debug",
"output": "${BATS_FILE_TMPDIR}/zot.log"
}
}
EOF
git -C ${BATS_FILE_TMPDIR} clone https://github.com/project-zot/helm-charts.git
zot_serve ${ZOT_PATH} ${zot_config_file}
wait_zot_reachable ${zot_port}
}
function teardown() {
# conditionally printing on failure is possible from teardown but not from from teardown_file
cat ${BATS_FILE_TMPDIR}/zot.log
}
function teardown_file() {
zot_stop_all
}
@test "push docker image to compatible zot" {
zot_port=`cat ${BATS_FILE_TMPDIR}/zot.port`
zot_root_dir=${BATS_FILE_TMPDIR}/zot
cat > Dockerfile <<EOF
FROM public.ecr.aws/docker/library/busybox:latest
RUN echo "hello world" > /testfile
EOF
docker build -f Dockerfile . -t localhost:${zot_port}/test:latest
run docker push localhost:${zot_port}/test:latest
[ "$status" -eq 0 ]
[ $(cat ${zot_root_dir}/test/index.json | jq .manifests[0].mediaType) = '"application/vnd.docker.distribution.manifest.v2+json"' ]
run docker pull localhost:${zot_port}/test:latest
[ "$status" -eq 0 ]
# inspect and trigger a CVE scan
run skopeo inspect --tls-verify=false docker://localhost:${zot_port}/test:latest
[ "$status" -eq 0 ]
# delete
run skopeo delete --tls-verify=false docker://localhost:${zot_port}/test:latest
[ "$status" -eq 0 ]
run skopeo inspect --tls-verify=false docker://localhost:${zot_port}/test:latest
[ "$status" -ne 0 ]
# re-push
run docker push localhost:${zot_port}/test:latest
[ "$status" -eq 0 ]
run skopeo inspect --tls-verify=false docker://localhost:${zot_port}/test:latest
[ "$status" -eq 0 ]
}

View file

@ -359,3 +359,16 @@ EOF
[ "$status" -eq 0 ] [ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.manifests | length') -eq 0 ] [ $(echo "${lines[-1]}" | jq '.manifests | length') -eq 0 ]
} }
@test "push docker image" {
zot_port=`cat ${BATS_FILE_TMPDIR}/zot.port`
cat > Dockerfile <<EOF
FROM public.ecr.aws/docker/library/busybox:latest
RUN echo "hello world" > /testfile
EOF
docker build -f Dockerfile . -t localhost:${zot_port}/test
run docker push localhost:${zot_port}/test
[ "$status" -eq 1 ]
run docker pull localhost:${zot_port}/test
[ "$status" -eq 1 ]
}