0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-16 21:56:37 -05:00

feat(retention): added image retention policies (#1866)

feat(metaDB): add more image statistics info

Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
peusebiu 2023-11-01 18:16:18 +02:00 committed by GitHub
parent a79d79a03a
commit 9074f8483b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
71 changed files with 3454 additions and 745 deletions

View file

@ -585,8 +585,8 @@ endif
.PHONY: check-not-freebds
check-not-freebds:
ifneq ($(shell go env GOOS),freebsd)
$(error makefile target can't be run on freebsd)
ifeq ($(shell go env GOOS),freebsd)
$(error makefile target can't be run on freebsd)
endif
.PHONY: check-compatibility

View file

@ -164,4 +164,5 @@ var (
ErrFlagValueUnsupported = errors.New("supported values ")
ErrUnknownSubcommand = errors.New("cli: unknown subcommand")
ErrMultipleReposSameName = errors.New("test: can't have multiple repos with the same name")
ErrRetentionPolicyNotFound = errors.New("retention: repo or tag policy not found")
)

View file

@ -84,6 +84,12 @@ to wasted storage, and background garbage collection can be enabled with:
"gc": true,
```
Orphan blobs are removed if they are older than gcDelay.
```
"gcDelay": "2h"
```
It is also possible to store and serve images from multiple filesystems with
their own repository paths, dedupe and garbage collection settings with:
@ -106,6 +112,77 @@ their own repository paths, dedupe and garbage collection settings with:
},
```
## Retention
You can define tag retention rules that govern how many tags of a given repository to retain, or for how long to retain certain tags.
There are 4 possible rules for tags:
mostRecentlyPushedCount: x - top x most recently pushed tags
mostRecentlyPulledCount: x - top x most recently pulled tags
pulledWithin: x hours - tags pulled in the last x hours
pushedWithin: x hours - tags pushed in the last x hours
If ANY of these rules are met by a tag, then it will be retained, in other words there is an OR logic between them
repoNames uses glob patterns
tag patterns uses regex
```
"retention": {
"dryRun": false, // if enabled will just log the retain action without actually removing
"delay": "24h", // is applied on untagged and referrers, will remove them only if they are older than 24h
"policies": [ // a repo will match a policy if it matches any repoNames[] glob pattern, it will select the first policy it can matches
{
"repoNames": ["infra/*", "prod/*"], // patterns to match
"deleteReferrers": false, // delete manifests with missing Subject (default is false)
"deleteUntagged": true, // delete untagged manifests (default is true)
"KeepTags": [{ // same as repo, the first pattern(this time regex) matched is the policy applied
"patterns": ["v2.*", ".*-prod"] // if there is no rule then the default is to retain always, this tagRetention will retain all tags matching the regexes in the patterns list.
},
{
"patterns": ["v3.*", ".*-prod"],
"pulledWithin": "168h" // will keep v3.* and .*-prod tags that are pulled within last 168h
}]
}, // all tags under infra/* and prod/* will be removed! because they don't match any retention policy
{
"repoNames": ["tmp/**"], // matches recursively all repos under tmp/
"deleteReferrers": true,
"deleteUntagged": true,
"KeepTags": [{ // will retain all tags starting with v1 and pulled within the last 168h
"patterns": ["v1.*"], // all the other tags will be removed
"pulledWithin": "168h",
"pushedWithin": "168h"
}]
},
{
"repoNames": ["**"],
"deleteReferrers": true,
"deleteUntagged": true,
"keepTags": [{
"mostRecentlyPushedCount": 10, // top 10 recently pushed tags
"mostRecentlyPulledCount": 10, // top 10 recently pulled tags
"pulledWithin": "720h",
"pushedWithin": "720h"
}]
}
]
}
```
If a repo doesn't match any policy, then that repo and all its tags are retained. (default is to not delete anything)
If keepTags is empty, then all tags are retained (default is to retain all tags)
If we have at least one tagRetention policy in the tagRetention list then all tags that don't match at least one of them will be removed!
For safety purpose you can have a default policy as the last policy in list, all tags that don't match the above policies will be retained by this one:
```
"keepTags": [
{
"patterns": [".*"] // will retain all tags
}
}]
```
## Authentication
TLS mutual authentication and passphrase-based authentication are supported.

View file

@ -4,13 +4,13 @@
"rootDirectory": "/tmp/zot",
"gc": true,
"gcDelay": "1h",
"gcInterval": "24h",
"gcInterval": "1h",
"subPaths": {
"/a": {
"rootDirectory": "/tmp/zot1",
"gc": true,
"gcDelay": "1h",
"gcInterval": "24h"
"gcInterval": "1h"
}
}
},

View file

@ -3,9 +3,7 @@
"storage": {
"rootDirectory": "/tmp/zot",
"gc": true,
"gcReferrers": true,
"gcDelay": "2h",
"untaggedImageRetentionDelay": "4h",
"gcInterval": "1h"
},
"http": {

View file

@ -0,0 +1,68 @@
{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot",
"gc": true,
"gcDelay": "2h",
"gcInterval": "1h",
"retention": {
"dryRun": false,
"delay": "24h",
"policies": [
{
"repositories": ["infra/*", "prod/*"],
"deleteReferrers": false,
"keepTags": [{
"patterns": ["v2.*", ".*-prod"]
},
{
"patterns": ["v3.*", ".*-prod"],
"pulledWithin": "168h"
}]
},
{
"repositories": ["tmp/**"],
"deleteReferrers": true,
"deleteUntagged": true,
"keepTags": [{
"patterns": ["v1.*"],
"pulledWithin": "168h",
"pushedWithin": "168h"
}]
},
{
"repositories": ["**"],
"deleteReferrers": true,
"deleteUntagged": true,
"keepTags": [{
"mostRecentlyPushedCount": 10,
"mostRecentlyPulledCount": 10,
"pulledWithin": "720h",
"pushedWithin": "720h"
}]
}
]
},
"subPaths": {
"/a": {
"rootDirectory": "/tmp/zot1",
"dedupe": true,
"retention": {
"policies": [
{
"repositories": ["infra/*", "prod/*"],
"deleteReferrers": false
}
]
}
}
}
},
"http": {
"address": "127.0.0.1",
"port": "8080"
},
"log": {
"level": "debug"
}
}

View file

@ -1,37 +0,0 @@
{
"distspecversion":"1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot_to_sync",
"dedupe": false,
"gc": false
},
"http": {
"address": "127.0.0.1",
"port": "8081"
},
"log": {
"level": "debug"
},
"extensions": {
"sync": {
"registries": [
{
"urls": [
"http://localhost:8080"
],
"onDemand": true,
"tlsVerify": false,
"PollInterval": "30s",
"content": [
{
"prefix": "**"
}
]
}
]
},
"scrub": {
"interval": "24h"
}
}
}

View file

@ -35,12 +35,12 @@
}
},
{
"prefix": "/repo1/repo",
"prefix": "/repo2/repo",
"destination": "/repo",
"stripPrefix": true
},
{
"prefix": "/repo2/repo"
"prefix": "/repo3/**"
}
]
},
@ -54,7 +54,7 @@
"onDemand": false,
"content": [
{
"prefix": "/repo2",
"prefix": "**",
"tags": {
"semver": true
}

View file

@ -23,17 +23,37 @@ var (
)
type StorageConfig struct {
RootDirectory string
Dedupe bool
RemoteCache bool
GC bool
Commit bool
GCDelay time.Duration
GCInterval time.Duration
GCReferrers bool
UntaggedImageRetentionDelay time.Duration
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
CacheDriver map[string]interface{} `mapstructure:",omitempty"`
RootDirectory string
Dedupe bool
RemoteCache bool
GC bool
Commit bool
GCDelay time.Duration // applied for blobs
GCInterval time.Duration
Retention ImageRetention
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
CacheDriver map[string]interface{} `mapstructure:",omitempty"`
}
type ImageRetention struct {
DryRun bool
Delay time.Duration // applied for referrers and untagged
Policies []RetentionPolicy
}
type RetentionPolicy struct {
Repositories []string
DeleteReferrers bool
DeleteUntagged *bool
KeepTags []KeepTagsPolicy
}
type KeepTagsPolicy struct {
Patterns []string
PulledWithin *time.Duration
PushedWithin *time.Duration
MostRecentlyPushedCount int
MostRecentlyPulledCount int
}
type TLSConfig struct {
@ -195,9 +215,11 @@ func New() *Config {
BinaryType: BinaryType,
Storage: GlobalStorageConfig{
StorageConfig: StorageConfig{
GC: true, GCReferrers: true, GCDelay: storageConstants.DefaultGCDelay,
UntaggedImageRetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
GCInterval: storageConstants.DefaultGCInterval, Dedupe: true,
Dedupe: true,
GC: true,
GCDelay: storageConstants.DefaultGCDelay,
GCInterval: storageConstants.DefaultGCInterval,
Retention: ImageRetention{},
},
},
HTTP: HTTPConfig{Address: "127.0.0.1", Port: "8080", Auth: &AuthConfig{FailDelay: 0}},
@ -373,6 +395,42 @@ func (c *Config) IsImageTrustEnabled() bool {
return c.Extensions != nil && c.Extensions.Trust != nil && *c.Extensions.Trust.Enable
}
// check if tags retention is enabled.
func (c *Config) IsRetentionEnabled() bool {
var needsMetaDB bool
for _, retentionPolicy := range c.Storage.Retention.Policies {
for _, tagRetentionPolicy := range retentionPolicy.KeepTags {
if c.isTagsRetentionEnabled(tagRetentionPolicy) {
needsMetaDB = true
}
}
}
for _, subpath := range c.Storage.SubPaths {
for _, retentionPolicy := range subpath.Retention.Policies {
for _, tagRetentionPolicy := range retentionPolicy.KeepTags {
if c.isTagsRetentionEnabled(tagRetentionPolicy) {
needsMetaDB = true
}
}
}
}
return needsMetaDB
}
func (c *Config) isTagsRetentionEnabled(tagRetentionPolicy KeepTagsPolicy) bool {
if tagRetentionPolicy.MostRecentlyPulledCount != 0 ||
tagRetentionPolicy.MostRecentlyPushedCount != 0 ||
tagRetentionPolicy.PulledWithin != nil ||
tagRetentionPolicy.PushedWithin != nil {
return true
}
return false
}
func (c *Config) IsCosignEnabled() bool {
return c.IsImageTrustEnabled() && c.Extensions.Trust.Cosign
}

View file

@ -65,6 +65,7 @@ func TestConfig(t *testing.T) {
So(err, ShouldBeNil)
So(isSame, ShouldBeTrue)
})
Convey("Test DeepCopy() & Sanitize()", t, func() {
conf := config.New()
So(conf, ShouldNotBeNil)
@ -81,4 +82,48 @@ func TestConfig(t *testing.T) {
err = config.DeepCopy(obj, conf)
So(err, ShouldNotBeNil)
})
Convey("Test IsRetentionEnabled()", t, func() {
conf := config.New()
So(conf.IsRetentionEnabled(), ShouldBeFalse)
conf.Storage.Retention.Policies = []config.RetentionPolicy{
{
Repositories: []string{"repo"},
},
}
So(conf.IsRetentionEnabled(), ShouldBeFalse)
policies := []config.RetentionPolicy{
{
Repositories: []string{"repo"},
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{"tag"},
MostRecentlyPulledCount: 2,
},
},
},
}
conf.Storage.Retention = config.ImageRetention{
Policies: policies,
}
So(conf.IsRetentionEnabled(), ShouldBeTrue)
subPaths := make(map[string]config.StorageConfig)
subPaths["/a"] = config.StorageConfig{
GC: true,
Retention: config.ImageRetention{
Policies: policies,
},
}
conf.Storage.SubPaths = subPaths
So(conf.IsRetentionEnabled(), ShouldBeTrue)
})
}

View file

@ -277,7 +277,8 @@ func (c *Controller) initCookieStore() error {
func (c *Controller) InitMetaDB(reloadCtx context.Context) error {
// init metaDB if search is enabled or we need to store user profiles, api keys or signatures
if c.Config.IsSearchEnabled() || c.Config.IsBasicAuthnEnabled() || c.Config.IsImageTrustEnabled() {
if c.Config.IsSearchEnabled() || c.Config.IsBasicAuthnEnabled() || c.Config.IsImageTrustEnabled() ||
c.Config.IsRetentionEnabled() {
driver, err := meta.New(c.Config.Storage.StorageConfig, c.Log) //nolint:contextcheck
if err != nil {
return err
@ -293,7 +294,7 @@ func (c *Controller) InitMetaDB(reloadCtx context.Context) error {
return err
}
err = meta.ParseStorage(driver, c.StoreController, c.Log)
err = meta.ParseStorage(driver, c.StoreController, c.Log) //nolint: contextcheck
if err != nil {
return err
}
@ -309,10 +310,30 @@ func (c *Controller) LoadNewConfig(reloadCtx context.Context, newConfig *config.
c.Config.HTTP.AccessControl = newConfig.HTTP.AccessControl
// reload periodical gc config
c.Config.Storage.GCInterval = newConfig.Storage.GCInterval
c.Config.Storage.GC = newConfig.Storage.GC
c.Config.Storage.Dedupe = newConfig.Storage.Dedupe
c.Config.Storage.GCDelay = newConfig.Storage.GCDelay
c.Config.Storage.GCReferrers = newConfig.Storage.GCReferrers
c.Config.Storage.GCInterval = newConfig.Storage.GCInterval
// only if we have a metaDB already in place
if c.Config.IsRetentionEnabled() {
c.Config.Storage.Retention = newConfig.Storage.Retention
}
for subPath, storageConfig := range newConfig.Storage.SubPaths {
subPathConfig, ok := c.Config.Storage.SubPaths[subPath]
if ok {
subPathConfig.GC = storageConfig.GC
subPathConfig.Dedupe = storageConfig.Dedupe
subPathConfig.GCDelay = storageConfig.GCDelay
subPathConfig.GCInterval = storageConfig.GCInterval
// only if we have a metaDB already in place
if c.Config.IsRetentionEnabled() {
subPathConfig.Retention = storageConfig.Retention
}
c.Config.Storage.SubPaths[subPath] = subPathConfig
}
}
// reload background tasks
if newConfig.Extensions != nil {
@ -356,10 +377,9 @@ func (c *Controller) StartBackgroundTasks(reloadCtx context.Context) {
// Enable running garbage-collect periodically for DefaultStore
if c.Config.Storage.GC {
gc := gc.NewGarbageCollect(c.StoreController.DefaultStore, c.MetaDB, gc.Options{
Referrers: c.Config.Storage.GCReferrers,
Delay: c.Config.Storage.GCDelay,
RetentionDelay: c.Config.Storage.UntaggedImageRetentionDelay,
}, c.Log)
ImageRetention: c.Config.Storage.Retention,
}, c.Audit, c.Log)
gc.CleanImageStorePeriodically(c.Config.Storage.GCInterval, taskScheduler)
}
@ -383,10 +403,9 @@ func (c *Controller) StartBackgroundTasks(reloadCtx context.Context) {
if storageConfig.GC {
gc := gc.NewGarbageCollect(c.StoreController.SubStore[route], c.MetaDB,
gc.Options{
Referrers: storageConfig.GCReferrers,
Delay: storageConfig.GCDelay,
RetentionDelay: storageConfig.UntaggedImageRetentionDelay,
}, c.Log)
ImageRetention: storageConfig.Retention,
}, c.Audit, c.Log)
gc.CleanImageStorePeriodically(storageConfig.GCInterval, taskScheduler)
}

View file

@ -5122,6 +5122,7 @@ func TestHardLink(t *testing.T) {
port := test.GetFreePort()
conf := config.New()
conf.HTTP.Port = port
conf.Storage.GC = false
dir := t.TempDir()
@ -7781,6 +7782,8 @@ func TestGCSignaturesAndUntaggedManifestsWithMetaDB(t *testing.T) {
ctx := context.Background()
Convey("Make controller", t, func() {
trueVal := true
Convey("Garbage collect signatures without subject and manifests without tags", func(c C) {
repoName := "testrepo" //nolint:goconst
tag := "0.0.1"
@ -7790,6 +7793,11 @@ func TestGCSignaturesAndUntaggedManifestsWithMetaDB(t *testing.T) {
conf := config.New()
conf.HTTP.Port = port
logFile, err := os.CreateTemp("", "zot-log*.txt")
So(err, ShouldBeNil)
conf.Log.Audit = logFile.Name()
value := true
searchConfig := &extconf.SearchConfig{
BaseConfig: extconf.BaseConfig{Enable: &value},
@ -7806,7 +7814,21 @@ func TestGCSignaturesAndUntaggedManifestsWithMetaDB(t *testing.T) {
ctlr.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.GC = true
ctlr.Config.Storage.GCDelay = 1 * time.Millisecond
ctlr.Config.Storage.UntaggedImageRetentionDelay = 1 * time.Millisecond
ctlr.Config.Storage.Retention = config.ImageRetention{
Delay: 1 * time.Millisecond,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{".*"}, // just for coverage
},
},
},
},
}
ctlr.Config.Storage.Dedupe = false
@ -7817,16 +7839,14 @@ func TestGCSignaturesAndUntaggedManifestsWithMetaDB(t *testing.T) {
img := CreateDefaultImage()
err := UploadImage(img, baseURL, repoName, tag)
err = UploadImage(img, baseURL, repoName, tag)
So(err, ShouldBeNil)
gc := gc.NewGarbageCollect(ctlr.StoreController.DefaultStore, ctlr.MetaDB,
gc.Options{
Referrers: ctlr.Config.Storage.GCReferrers,
Delay: ctlr.Config.Storage.GCDelay,
RetentionDelay: ctlr.Config.Storage.UntaggedImageRetentionDelay,
},
ctlr.Log)
ImageRetention: ctlr.Config.Storage.Retention,
}, ctlr.Audit, ctlr.Log)
resp, err := resty.R().Get(baseURL + fmt.Sprintf("/v2/%s/manifests/%s", repoName, tag))
So(err, ShouldBeNil)
@ -7897,7 +7917,7 @@ func TestGCSignaturesAndUntaggedManifestsWithMetaDB(t *testing.T) {
So(len(index.Manifests), ShouldEqual, 1)
// shouldn't do anything
err = gc.CleanRepo(repoName)
err = gc.CleanRepo(repoName) //nolint: contextcheck
So(err, ShouldBeNil)
// make sure both signatures are stored in repodb
@ -7988,7 +8008,7 @@ func TestGCSignaturesAndUntaggedManifestsWithMetaDB(t *testing.T) {
So(err, ShouldBeNil)
newManifestDigest := godigest.FromBytes(manifestBuf)
err = gc.CleanRepo(repoName)
err = gc.CleanRepo(repoName) //nolint: contextcheck
So(err, ShouldBeNil)
// make sure both signatures are removed from metaDB and repo reference for untagged is removed
@ -8051,7 +8071,16 @@ func TestGCSignaturesAndUntaggedManifestsWithMetaDB(t *testing.T) {
ctlr.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.GC = true
ctlr.Config.Storage.GCDelay = 1 * time.Second
ctlr.Config.Storage.UntaggedImageRetentionDelay = 1 * time.Second
ctlr.Config.Storage.Retention = config.ImageRetention{
Delay: 1 * time.Second,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
},
},
}
err := WriteImageToFileSystem(CreateDefaultImage(), repoName, tag,
ociutils.GetDefaultStoreController(dir, ctlr.Log))
@ -8063,10 +8092,9 @@ func TestGCSignaturesAndUntaggedManifestsWithMetaDB(t *testing.T) {
gc := gc.NewGarbageCollect(ctlr.StoreController.DefaultStore, ctlr.MetaDB,
gc.Options{
Referrers: ctlr.Config.Storage.GCReferrers,
Delay: ctlr.Config.Storage.GCDelay,
RetentionDelay: ctlr.Config.Storage.UntaggedImageRetentionDelay,
}, ctlr.Log)
ImageRetention: ctlr.Config.Storage.Retention,
}, ctlr.Audit, ctlr.Log)
resp, err := resty.R().Get(baseURL + fmt.Sprintf("/v2/%s/manifests/%s", repoName, tag))
So(err, ShouldBeNil)
@ -8196,8 +8224,12 @@ func TestPeriodicGC(t *testing.T) {
subPaths := make(map[string]config.StorageConfig)
subPaths["/a"] = config.StorageConfig{
RootDirectory: subDir, GC: true, GCDelay: 1 * time.Second,
UntaggedImageRetentionDelay: 1 * time.Second, GCInterval: 24 * time.Hour, RemoteCache: false, Dedupe: false,
RootDirectory: subDir,
GC: true,
GCDelay: 1 * time.Second,
GCInterval: 24 * time.Hour,
RemoteCache: false,
Dedupe: false,
} //nolint:lll // gofumpt conflicts with lll
ctlr.Config.Storage.Dedupe = false
ctlr.Config.Storage.SubPaths = subPaths

View file

@ -721,8 +721,8 @@ func (rh *RouteHandler) UpdateManifest(response http.ResponseWriter, request *ht
}
if rh.c.MetaDB != nil {
err := meta.OnUpdateManifest(name, reference, mediaType, digest, body, rh.c.StoreController, rh.c.MetaDB,
rh.c.Log)
err := meta.OnUpdateManifest(request.Context(), name, reference, mediaType,
digest, body, rh.c.StoreController, rh.c.MetaDB, rh.c.Log)
if err != nil {
response.WriteHeader(http.StatusInternalServerError)

View file

@ -158,6 +158,112 @@ func TestConfigReloader(t *testing.T) {
So(string(data), ShouldContainSubstring, "\"Actions\":[\"read\",\"create\",\"update\",\"delete\"]")
})
Convey("reload gc config", t, func(c C) {
port := test.GetFreePort()
baseURL := test.GetBaseURL(port)
logFile, err := os.CreateTemp("", "zot-log*.txt")
So(err, ShouldBeNil)
defer os.Remove(logFile.Name()) // clean up
content := fmt.Sprintf(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "%s",
"gc": false,
"dedupe": false,
"subPaths": {
"/a": {
"rootDirectory": "%s",
"gc": false,
"dedupe": false
}
}
},
"http": {
"address": "127.0.0.1",
"port": "%s"
},
"log": {
"level": "debug",
"output": "%s"
}
}`, t.TempDir(), t.TempDir(), port, logFile.Name())
cfgfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(cfgfile.Name()) // clean up
_, err = cfgfile.WriteString(content)
So(err, ShouldBeNil)
// err = cfgfile.Close()
// So(err, ShouldBeNil)
os.Args = []string{"cli_test", "serve", cfgfile.Name()}
go func() {
err = cli.NewServerRootCmd().Execute()
So(err, ShouldBeNil)
}()
test.WaitTillServerReady(baseURL)
content = fmt.Sprintf(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "%s",
"gc": true,
"dedupe": true,
"subPaths": {
"/a": {
"rootDirectory": "%s",
"gc": true,
"dedupe": true
}
}
},
"http": {
"address": "127.0.0.1",
"port": "%s"
},
"log": {
"level": "debug",
"output": "%s"
}
}`, t.TempDir(), t.TempDir(), port, logFile.Name())
err = cfgfile.Truncate(0)
So(err, ShouldBeNil)
_, err = cfgfile.Seek(0, io.SeekStart)
So(err, ShouldBeNil)
// truncate log before changing config, for the ShouldNotContainString
So(logFile.Truncate(0), ShouldBeNil)
_, err = cfgfile.WriteString(content)
So(err, ShouldBeNil)
err = cfgfile.Close()
So(err, ShouldBeNil)
// wait for config reload
time.Sleep(2 * time.Second)
data, err := os.ReadFile(logFile.Name())
So(err, ShouldBeNil)
t.Logf("log file: %s", data)
So(string(data), ShouldContainSubstring, "reloaded params")
So(string(data), ShouldContainSubstring, "loaded new configuration settings")
So(string(data), ShouldContainSubstring, "\"GC\":true")
So(string(data), ShouldContainSubstring, "\"Dedupe\":true")
So(string(data), ShouldNotContainSubstring, "\"GC\":false")
So(string(data), ShouldNotContainSubstring, "\"Dedupe\":false")
})
Convey("reload sync config", t, func(c C) {
port := test.GetFreePort()
baseURL := test.GetBaseURL(port)

View file

@ -30,9 +30,9 @@ func TestVerifyExtensionsConfig(t *testing.T) {
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := []byte(`{
content := fmt.Sprintf(`{
"storage":{
"rootDirectory":"/tmp/zot",
"rootDirectory":"%s",
"dedupe":true,
"remoteCache":false,
"storageDriver":{
@ -56,21 +56,22 @@ func TestVerifyExtensionsConfig(t *testing.T) {
}
}
}
}`)
err = os.WriteFile(tmpfile.Name(), content, 0o0600)
}`, t.TempDir())
err = os.WriteFile(tmpfile.Name(), []byte(content), 0o0600)
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
content = []byte(`{
content = fmt.Sprintf(`{
"storage":{
"rootDirectory":"/tmp/zot",
"rootDirectory":"%s",
"dedupe":true,
"remoteCache":false,
"subPaths":{
"/a": {
"rootDirectory": "/tmp/zot1",
"rootDirectory": "%s",
"dedupe": false,
"storageDriver":{
"name":"s3",
@ -95,8 +96,8 @@ func TestVerifyExtensionsConfig(t *testing.T) {
}
}
}
}`)
err = os.WriteFile(tmpfile.Name(), content, 0o0600)
}`, t.TempDir(), t.TempDir())
err = os.WriteFile(tmpfile.Name(), []byte(content), 0o0600)
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
@ -107,12 +108,12 @@ func TestVerifyExtensionsConfig(t *testing.T) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := []byte(`{"storage":{"rootDirectory":"/tmp/zot", "storageDriver": {"name": "s3"}},
content := fmt.Sprintf(`{"storage":{"rootDirectory":"%s", "storageDriver": {"name": "s3"}},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}},
"extensions":{"sync": {"registries": [{"urls":["localhost:9999"],
"maxRetries": 1, "retryDelay": "10s"}]}}}`)
_, err = tmpfile.Write(content)
"maxRetries": 1, "retryDelay": "10s"}]}}}`, t.TempDir())
_, err = tmpfile.WriteString(content)
So(err, ShouldBeNil)
err = tmpfile.Close()
So(err, ShouldBeNil)
@ -124,12 +125,12 @@ func TestVerifyExtensionsConfig(t *testing.T) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := []byte(`{"storage":{"rootDirectory":"/tmp/zot"},
content := fmt.Sprintf(`{"storage":{"rootDirectory":"%s"},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}},
"extensions":{"sync": {"registries": [{"urls":["localhost:9999"],
"maxRetries": 1, "retryDelay": "10s"}]}}}`)
_, err = tmpfile.Write(content)
"maxRetries": 1, "retryDelay": "10s"}]}}}`, t.TempDir())
_, err = tmpfile.WriteString(content)
So(err, ShouldBeNil)
err = tmpfile.Close()
So(err, ShouldBeNil)
@ -141,13 +142,13 @@ func TestVerifyExtensionsConfig(t *testing.T) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := []byte(`{"storage":{"rootDirectory":"/tmp/zot"},
content := fmt.Sprintf(`{"storage":{"rootDirectory":"%s"},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}},
"extensions":{"sync": {"registries": [{"urls":["localhost:9999"],
"maxRetries": 1, "retryDelay": "10s",
"content": [{"prefix":"[repo%^&"}]}]}}}`)
_, err = tmpfile.Write(content)
"content": [{"prefix":"[repo^&["}]}]}}}`, t.TempDir())
_, err = tmpfile.WriteString(content)
So(err, ShouldBeNil)
err = tmpfile.Close()
So(err, ShouldBeNil)
@ -159,13 +160,13 @@ func TestVerifyExtensionsConfig(t *testing.T) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := []byte(`{"storage":{"rootDirectory":"/tmp/zot"},
content := fmt.Sprintf(`{"storage":{"rootDirectory":"%s"},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}},
"extensions":{"sync": {"registries": [{"urls":["localhost:9999"],
"maxRetries": 1, "retryDelay": "10s",
"content": [{"prefix":"zot-repo","stripPrefix":true,"destination":"/"}]}]}}}`)
_, err = tmpfile.Write(content)
"content": [{"prefix":"zot-repo","stripPrefix":true,"destination":"/"}]}]}}}`, t.TempDir())
_, err = tmpfile.WriteString(content)
So(err, ShouldBeNil)
err = tmpfile.Close()
So(err, ShouldBeNil)
@ -177,13 +178,13 @@ func TestVerifyExtensionsConfig(t *testing.T) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := []byte(`{"storage":{"rootDirectory":"/tmp/zot"},
content := fmt.Sprintf(`{"storage":{"rootDirectory":"%s"},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}},
"extensions":{"sync": {"registries": [{"urls":["localhost:9999"],
"maxRetries": 1, "retryDelay": "10s",
"content": [{"prefix":"zot-repo/*","stripPrefix":true,"destination":"/"}]}]}}}`)
_, err = tmpfile.Write(content)
"content": [{"prefix":"zot-repo/*","stripPrefix":true,"destination":"/"}]}]}}}`, t.TempDir())
_, err = tmpfile.WriteString(content)
So(err, ShouldBeNil)
err = tmpfile.Close()
So(err, ShouldBeNil)
@ -196,13 +197,13 @@ func TestVerifyExtensionsConfig(t *testing.T) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := []byte(`{"storage":{"rootDirectory":"/tmp/zot"},
content := fmt.Sprintf(`{"storage":{"rootDirectory":"%s"},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}},
"extensions":{"sync": {"registries": [{"urls":["localhost:9999"],
"maxRetries": 1, "retryDelay": "10s",
"content": [{"prefix":"repo**"}]}]}}}`)
_, err = tmpfile.Write(content)
"content": [{"prefix":"repo**"}]}]}}}`, t.TempDir())
_, err = tmpfile.WriteString(content)
So(err, ShouldBeNil)
err = tmpfile.Close()
So(err, ShouldBeNil)
@ -215,12 +216,12 @@ func TestVerifyExtensionsConfig(t *testing.T) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := []byte(`{"storage":{"rootDirectory":"/tmp/zot"},
content := fmt.Sprintf(`{"storage":{"rootDirectory":"%s"},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}},
"extensions":{"sync": {"registries": [{"urls":["localhost:9999"],
"maxRetries": 10, "content": [{"prefix":"repo**"}]}]}}}`)
_, err = tmpfile.Write(content)
"maxRetries": 10, "content": [{"prefix":"repo**"}]}]}}}`, t.TempDir())
_, err = tmpfile.WriteString(content)
So(err, ShouldBeNil)
err = tmpfile.Close()
So(err, ShouldBeNil)
@ -377,7 +378,7 @@ func TestServeExtensions(t *testing.T) {
content := fmt.Sprintf(`{
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -387,7 +388,7 @@ func TestServeExtensions(t *testing.T) {
"level": "debug",
"output": "%s"
}
}`, port, logFile.Name())
}`, t.TempDir(), port, logFile.Name())
cfgfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
@ -418,7 +419,7 @@ func TestServeExtensions(t *testing.T) {
content := fmt.Sprintf(`{
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -430,7 +431,7 @@ func TestServeExtensions(t *testing.T) {
},
"extensions": {
}
}`, port, logFile.Name())
}`, t.TempDir(), port, logFile.Name())
cfgfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
@ -454,7 +455,7 @@ func TestServeExtensions(t *testing.T) {
})
}
func testWithMetricsEnabled(cfgContentFormat string) {
func testWithMetricsEnabled(rootDir string, cfgContentFormat string) {
port := GetFreePort()
baseURL := GetBaseURL(port)
logFile, err := os.CreateTemp("", "zot-log*.txt")
@ -462,7 +463,7 @@ func testWithMetricsEnabled(cfgContentFormat string) {
defer os.Remove(logFile.Name()) // clean up
content := fmt.Sprintf(cfgContentFormat, port, logFile.Name())
content := fmt.Sprintf(cfgContentFormat, rootDir, port, logFile.Name())
cfgfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
@ -502,7 +503,7 @@ func TestServeMetricsExtension(t *testing.T) {
Convey("no explicit enable", t, func(c C) {
content := `{
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -517,13 +518,13 @@ func TestServeMetricsExtension(t *testing.T) {
}
}
}`
testWithMetricsEnabled(content)
testWithMetricsEnabled(t.TempDir(), content)
})
Convey("no explicit enable but with prometheus parameter", t, func(c C) {
content := `{
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -541,13 +542,13 @@ func TestServeMetricsExtension(t *testing.T) {
}
}
}`
testWithMetricsEnabled(content)
testWithMetricsEnabled(t.TempDir(), content)
})
Convey("with explicit enable, but without prometheus parameter", t, func(c C) {
content := `{
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -563,7 +564,7 @@ func TestServeMetricsExtension(t *testing.T) {
}
}
}`
testWithMetricsEnabled(content)
testWithMetricsEnabled(t.TempDir(), content)
})
Convey("with explicit disable", t, func(c C) {
@ -575,7 +576,7 @@ func TestServeMetricsExtension(t *testing.T) {
content := fmt.Sprintf(`{
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -590,7 +591,7 @@ func TestServeMetricsExtension(t *testing.T) {
"enable": false
}
}
}`, port, logFile.Name())
}`, t.TempDir(), port, logFile.Name())
cfgfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
@ -1373,3 +1374,266 @@ func TestServeImageTrustExtension(t *testing.T) {
So(found, ShouldBeTrue)
})
}
func TestOverlappingSyncRetentionConfig(t *testing.T) {
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
Convey("Test verify without overlapping sync and retention", t, func(c C) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := `{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "%s",
"gc": true,
"gcDelay": "2h",
"gcInterval": "1h",
"retention": {
"policies": [
{
"repositories": ["infra/*", "prod/*"],
"deleteReferrers": false,
"keepTags": [{
"patterns": ["v4.*", ".*-prod"]
},
{
"patterns": ["v3.*", ".*-prod"],
"pulledWithin": "168h"
}]
}
]
}
},
"http": {
"address": "127.0.0.1",
"port": "%s"
},
"log": {
"level": "debug",
"output": "%s"
},
"extensions": {
"sync": {
"enable": true,
"registries": [
{
"urls": [
"https://registry1:5000"
],
"content": [
{
"prefix": "infra/*",
"tags": {
"regex": "v4.*",
"semver": true
}
}
]
}
]
}
}
}`
logPath, err := runCLIWithConfig(t.TempDir(), content)
So(err, ShouldBeNil)
data, err := os.ReadFile(logPath)
So(err, ShouldBeNil)
defer os.Remove(logPath) // clean up
So(string(data), ShouldNotContainSubstring, "overlapping sync content")
})
Convey("Test verify with overlapping sync and retention - retention would remove v4 tags", t, func(c C) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := `{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "%s",
"gc": true,
"gcDelay": "2h",
"gcInterval": "1h",
"retention": {
"policies": [
{
"repositories": ["infra/*", "prod/*"],
"keepTags": [{
"patterns": ["v2.*", ".*-prod"]
},
{
"patterns": ["v3.*", ".*-prod"]
}]
}
]
}
},
"http": {
"address": "127.0.0.1",
"port": "%s"
},
"log": {
"level": "debug",
"output": "%s"
},
"extensions": {
"sync": {
"enable": true,
"registries": [
{
"urls": [
"https://registry1:5000"
],
"content": [
{
"prefix": "infra/*",
"tags": {
"regex": "4.*",
"semver": true
}
}
]
}
]
}
}
}`
logPath, err := runCLIWithConfig(t.TempDir(), content)
So(err, ShouldBeNil)
data, err := os.ReadFile(logPath)
So(err, ShouldBeNil)
defer os.Remove(logPath) // clean up
So(string(data), ShouldContainSubstring, "overlapping sync content\":{\"Prefix\":\"infra/*")
})
Convey("Test verify with overlapping sync and retention - retention would remove tags from repo", t, func(c C) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := `{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "%s",
"gc": true,
"gcDelay": "2h",
"gcInterval": "1h",
"retention": {
"dryRun": false,
"delay": "24h",
"policies": [
{
"repositories": ["tmp/**"],
"keepTags": [{
"patterns": ["v1.*"]
}]
}
]
}
},
"http": {
"address": "127.0.0.1",
"port": "%s"
},
"log": {
"level": "debug",
"output": "%s"
},
"extensions": {
"sync": {
"enable": true,
"registries": [
{
"urls": [
"https://registry1:5000"
],
"content": [
{
"prefix": "**",
"destination": "/tmp",
"stripPrefix": true
}
]
}
]
}
}
}
`
logPath, err := runCLIWithConfig(t.TempDir(), content)
So(err, ShouldBeNil)
data, err := os.ReadFile(logPath)
So(err, ShouldBeNil)
defer os.Remove(logPath) // clean up
So(string(data), ShouldContainSubstring, "overlapping sync content\":{\"Prefix\":\"**")
})
Convey("Test verify with overlapping sync and retention - retention would remove tags from subpath", t, func(c C) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := `{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "%s",
"gc": true,
"gcDelay": "2h",
"gcInterval": "1h",
"subPaths": {
"/synced": {
"rootDirectory": "/tmp/zot2",
"dedupe": true,
"retention": {
"policies": [
{
"repositories": ["infra/*", "prod/*"],
"deleteReferrers": false,
"keepTags": [{
}]
}
]
}
}
}
},
"http": {
"address": "127.0.0.1",
"port": "%s"
},
"log": {
"level": "debug",
"output": "%s"
},
"extensions": {
"sync": {
"enable": true,
"registries": [
{
"urls": [
"https://registry1:5000"
],
"content": [
{
"prefix": "prod/*",
"destination": "/synced"
}
]
}
]
}
}
}
`
logPath, err := runCLIWithConfig(t.TempDir(), content)
So(err, ShouldBeNil)
data, err := os.ReadFile(logPath)
So(err, ShouldBeNil)
defer os.Remove(logPath) // clean up
So(string(data), ShouldContainSubstring, "overlapping sync content\":{\"Prefix\":\"prod/*")
})
}

View file

@ -7,6 +7,7 @@ import (
"net/http"
"os"
"path"
"regexp"
"strconv"
"strings"
"time"
@ -596,8 +597,8 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
config.Storage.GCDelay = 0
}
if viperInstance.Get("storage::gcdelay") == nil {
config.Storage.UntaggedImageRetentionDelay = 0
if viperInstance.Get("storage::retention::delay") == nil {
config.Storage.Retention.Delay = 0
}
if viperInstance.Get("storage::gcinterval") == nil {
@ -605,6 +606,13 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
}
}
// apply deleteUntagged default
for idx := range config.Storage.Retention.Policies {
if !viperInstance.IsSet("storage::retention::policies::" + fmt.Sprint(idx) + "::deleteUntagged") {
config.Storage.Retention.Policies[idx].DeleteUntagged = &defaultVal
}
}
// cache settings
// global storage
@ -615,7 +623,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
config.Storage.RemoteCache = true
}
// s3 dedup=false, check for previous dedup usage and set to true if cachedb found
// s3 dedup=false, check for previous dedupe usage and set to true if cachedb found
if !config.Storage.Dedupe && config.Storage.StorageDriver != nil {
cacheDir, _ := config.Storage.StorageDriver["rootdirectory"].(string)
cachePath := path.Join(cacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName)
@ -651,28 +659,31 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
// if gc is enabled
if storageConfig.GC {
// and gcReferrers is not set, it is set to default value
if !viperInstance.IsSet("storage::subpaths::" + name + "::gcreferrers") {
storageConfig.GCReferrers = true
}
// and gcDelay is not set, it is set to default value
if !viperInstance.IsSet("storage::subpaths::" + name + "::gcdelay") {
storageConfig.GCDelay = storageConstants.DefaultGCDelay
}
// and retentionDelay is not set, it is set to default value
if !viperInstance.IsSet("storage::subpaths::" + name + "::retentiondelay") {
storageConfig.UntaggedImageRetentionDelay = storageConstants.DefaultUntaggedImgeRetentionDelay
if !viperInstance.IsSet("storage::subpaths::" + name + "::retention::delay") {
storageConfig.Retention.Delay = storageConstants.DefaultRetentionDelay
}
// and gcInterval is not set, it is set to default value
if !viperInstance.IsSet("storage::subpaths::" + name + "::gcinterval") {
storageConfig.GCInterval = storageConstants.DefaultGCInterval
}
config.Storage.SubPaths[name] = storageConfig
}
// apply deleteUntagged default
for idx := range storageConfig.Retention.Policies {
deleteUntaggedKey := "storage::subpaths::" + name + "::retention::policies::" + fmt.Sprint(idx) + "::deleteUntagged"
if !viperInstance.IsSet(deleteUntaggedKey) {
storageConfig.Retention.Policies[idx].DeleteUntagged = &defaultVal
}
}
config.Storage.SubPaths[name] = storageConfig
}
// if OpenID authentication is enabled,
@ -851,6 +862,10 @@ func validateGC(config *config.Config, log zlog.Logger) error {
}
}
if err := validateGCRules(config.Storage.Retention, log); err != nil {
return err
}
// subpaths
for name, subPath := range config.Storage.SubPaths {
if subPath.GC && subPath.GCDelay <= 0 {
@ -861,6 +876,37 @@ func validateGC(config *config.Config, log zlog.Logger) error {
return zerr.ErrBadConfig
}
if err := validateGCRules(subPath.Retention, log); err != nil {
return err
}
}
return nil
}
func validateGCRules(retention config.ImageRetention, log zlog.Logger) error {
for _, policy := range retention.Policies {
for _, pattern := range policy.Repositories {
if ok := glob.ValidatePattern(pattern); !ok {
log.Error().Err(glob.ErrBadPattern).Str("pattern", pattern).
Msg("retention repo glob pattern could not be compiled")
return zerr.ErrBadConfig
}
}
for _, tagRule := range policy.KeepTags {
for _, regex := range tagRule.Patterns {
_, err := regexp.Compile(regex)
if err != nil {
log.Error().Err(glob.ErrBadPattern).Str("regex", regex).
Msg("retention tag regex could not be compiled")
return zerr.ErrBadConfig
}
}
}
}
return nil
@ -882,9 +928,20 @@ func validateSync(config *config.Config, log zlog.Logger) error {
for _, content := range regCfg.Content {
ok := glob.ValidatePattern(content.Prefix)
if !ok {
log.Error().Err(glob.ErrBadPattern).Str("prefix", content.Prefix).Msg("sync prefix could not be compiled")
log.Error().Err(glob.ErrBadPattern).Str("prefix", content.Prefix).
Msg("sync prefix could not be compiled")
return glob.ErrBadPattern
return zerr.ErrBadConfig
}
if content.Tags != nil && content.Tags.Regex != nil {
_, err := regexp.Compile(*content.Tags.Regex)
if err != nil {
log.Error().Err(glob.ErrBadPattern).Str("regex", *content.Tags.Regex).
Msg("sync content regex could not be compiled")
return zerr.ErrBadConfig
}
}
if content.StripPrefix && !strings.Contains(content.Prefix, "/*") && content.Destination == "/" {
@ -894,6 +951,9 @@ func validateSync(config *config.Config, log zlog.Logger) error {
return zerr.ErrBadConfig
}
// check sync config doesn't overlap with retention config
validateRetentionSyncOverlaps(config, content, regCfg.URLs, log)
}
}
}

View file

@ -417,6 +417,94 @@ func TestVerify(t *testing.T) {
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldNotPanic)
})
Convey("Test verify with bad gc retention repo patterns", t, func(c C) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := []byte(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot",
"gc": true,
"retention": {
"policies": [
{
"repositories": ["["],
"deleteReferrers": false
}
]
},
"subPaths":{
"/a":{
"rootDirectory":"/zot-a",
"retention": {
"policies": [
{
"repositories": ["**"],
"deleteReferrers": true
}
]
}
}
}
},
"http": {
"address": "127.0.0.1",
"port": "8080"
},
"log": {
"level": "debug"
}
}`)
_, err = tmpfile.Write(content)
So(err, ShouldBeNil)
err = tmpfile.Close()
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
})
Convey("Test verify with bad gc image retention tag regex", t, func(c C) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
content := []byte(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot",
"gc": true,
"retention": {
"dryRun": false,
"policies": [
{
"repositories": ["infra/*"],
"deleteReferrers": false,
"deleteUntagged": true,
"keepTags": [{
"names": ["["]
}]
}
]
}
},
"http": {
"address": "127.0.0.1",
"port": "8080"
},
"log": {
"level": "debug"
}
}`)
_, err = tmpfile.Write(content)
So(err, ShouldBeNil)
err = tmpfile.Close()
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
})
Convey("Test apply defaults cache db", t, func(c C) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)

View file

@ -25,7 +25,7 @@ const (
WorkerRunningTime = 60 * time.Second
)
func TestSressTooManyOpenFiles(t *testing.T) {
func TestStressTooManyOpenFiles(t *testing.T) {
oldArgs := os.Args
defer func() { os.Args = oldArgs }()

View file

@ -0,0 +1,13 @@
//go:build !sync
// +build !sync
package server
import (
"zotregistry.io/zot/pkg/api/config"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
zlog "zotregistry.io/zot/pkg/log"
)
func validateRetentionSyncOverlaps(config *config.Config, content syncconf.Content, urls []string, log zlog.Logger) {
}

View file

@ -0,0 +1,86 @@
//go:build sync
// +build sync
package server
import (
"path"
"zotregistry.io/zot/pkg/api/config"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
"zotregistry.io/zot/pkg/extensions/sync"
zlog "zotregistry.io/zot/pkg/log"
)
func validateRetentionSyncOverlaps(config *config.Config, content syncconf.Content, urls []string, log zlog.Logger) {
cm := sync.NewContentManager([]syncconf.Content{content}, log)
prefix := content.Prefix
if content.Destination != "" {
prefix = cm.GetRepoDestination(content.Prefix)
}
repoPolicy := getRepoPolicyByPrefix(config, prefix)
if repoPolicy == nil {
return
}
if content.Tags != nil && content.Tags.Regex != nil {
areTagsRetained := false
for _, tagPolicy := range repoPolicy.KeepTags {
for _, tagRegex := range tagPolicy.Patterns {
if tagRegex == *content.Tags.Regex {
areTagsRetained = true
}
}
}
if !areTagsRetained {
log.Warn().Str("repositories pattern", prefix).
Str("tags regex", *content.Tags.Regex).
Interface("sync urls", urls).
Interface("overlapping sync content", content).
Interface("overlapping repo policy", repoPolicy).
Msgf("retention policy can overlap with the sync config, "+
"make sure retention doesn't remove syncing images with next tag regex: %s", *content.Tags.Regex)
}
} else {
log.Warn().Str("repositories pattern", prefix).
Interface("sync urls", urls).
Interface("overlapping sync content", content).
Interface("overlapping repo policy", repoPolicy).
Msg("retention policy can overlap with the sync config, make sure retention doesn't remove syncing images")
}
}
func getRepoPolicyByPrefixFromStorageConfig(config config.StorageConfig, subpath string, prefix string,
) *config.RetentionPolicy {
for _, repoPolicy := range config.Retention.Policies {
for _, repo := range repoPolicy.Repositories {
if subpath != "" {
repo = path.Join(subpath, repo)[1:] // remove startin '/'
}
if repo == prefix {
return &repoPolicy
}
}
}
return nil
}
func getRepoPolicyByPrefix(config *config.Config, prefix string) *config.RetentionPolicy {
if repoPolicy := getRepoPolicyByPrefixFromStorageConfig(config.Storage.StorageConfig, "", prefix); repoPolicy != nil {
return repoPolicy
}
for subpath, subpathConfig := range config.Storage.SubPaths {
if repoPolicy := getRepoPolicyByPrefixFromStorageConfig(subpathConfig, subpath, prefix); repoPolicy != nil {
return repoPolicy
}
}
return nil
}

View file

@ -39,7 +39,7 @@ func TestCVEConvert(t *testing.T) {
Blob: ispec.DescriptorEmptyJSON.Data,
}}).DefaultConfig().Build()
err = metaDB.SetRepoReference("repo1", "0.1.0", image.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo1", "0.1.0", image.AsImageMeta())
So(err, ShouldBeNil)
repoMetaList, err := metaDB.SearchRepos(context.Background(), "")

View file

@ -770,32 +770,32 @@ func TestCVEStruct(t *testing.T) { //nolint:gocyclo
image11 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2008, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference(repo1, "0.1.0", image11.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repo1, "0.1.0", image11.AsImageMeta())
So(err, ShouldBeNil)
image12 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2009, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference(repo1, "1.0.0", image12.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repo1, "1.0.0", image12.AsImageMeta())
So(err, ShouldBeNil)
image13 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2010, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference(repo1, "1.1.0", image13.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repo1, "1.1.0", image13.AsImageMeta())
So(err, ShouldBeNil)
image14 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2011, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference(repo1, "1.0.1", image14.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repo1, "1.0.1", image14.AsImageMeta())
So(err, ShouldBeNil)
// Create metadb data for scannable image with no vulnerabilities
image61 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2016, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference(repo6, "1.0.0", image61.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repo6, "1.0.0", image61.AsImageMeta())
So(err, ShouldBeNil)
// Create metadb data for image not supporting scanning
@ -805,50 +805,50 @@ func TestCVEStruct(t *testing.T) { //nolint:gocyclo
Digest: godigest.FromBytes([]byte{10, 10, 10}),
}}).ImageConfig(ispec.Image{Created: DateRef(2009, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference(repo2, "1.0.0", image21.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repo2, "1.0.0", image21.AsImageMeta())
So(err, ShouldBeNil)
// Create metadb data for invalid images/negative tests
image := CreateRandomImage()
err = metaDB.SetRepoReference(repo3, "invalid-manifest", image.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repo3, "invalid-manifest", image.AsImageMeta())
So(err, ShouldBeNil)
image41 := CreateImageWith().DefaultLayers().
CustomConfigBlob([]byte("invalid config blob"), ispec.MediaTypeImageConfig).Build()
err = metaDB.SetRepoReference(repo4, "invalid-config", image41.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repo4, "invalid-config", image41.AsImageMeta())
So(err, ShouldBeNil)
digest51 := godigest.FromString("abc8")
randomImgData := CreateRandomImage().AsImageMeta()
randomImgData.Digest = digest51
randomImgData.Manifests[0].Digest = digest51
err = metaDB.SetRepoReference(repo5, "nonexitent-manifest", randomImgData)
err = metaDB.SetRepoReference(context.Background(), repo5, "nonexitent-manifest", randomImgData)
So(err, ShouldBeNil)
// Create metadb data for scannable image which errors during scan
image71 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2000, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference(repo7, "1.0.0", image71.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repo7, "1.0.0", image71.AsImageMeta())
So(err, ShouldBeNil)
// create multiarch image with vulnerabilities
multiarchImage := CreateRandomMultiarch()
err = metaDB.SetRepoReference(repoMultiarch, multiarchImage.Images[0].DigestStr(),
err = metaDB.SetRepoReference(context.Background(), repoMultiarch, multiarchImage.Images[0].DigestStr(),
multiarchImage.Images[0].AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repoMultiarch, multiarchImage.Images[1].DigestStr(),
err = metaDB.SetRepoReference(context.Background(), repoMultiarch, multiarchImage.Images[1].DigestStr(),
multiarchImage.Images[1].AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repoMultiarch, multiarchImage.Images[2].DigestStr(),
err = metaDB.SetRepoReference(context.Background(), repoMultiarch, multiarchImage.Images[2].DigestStr(),
multiarchImage.Images[2].AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repoMultiarch, "tagIndex", multiarchImage.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repoMultiarch, "tagIndex", multiarchImage.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoMeta("repo-with-bad-tag-digest", mTypes.RepoMeta{

View file

@ -4,6 +4,7 @@
package cveinfo_test
import (
"context"
"fmt"
"sort"
"testing"
@ -41,7 +42,7 @@ func TestCVEPagination(t *testing.T) {
Blob: ispec.DescriptorEmptyJSON.Data,
}}).ImageConfig(ispec.Image{Created: &timeStamp11}).Build()
err = metaDB.SetRepoReference("repo1", "0.1.0", image.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo1", "0.1.0", image.AsImageMeta())
So(err, ShouldBeNil)
timeStamp12 := time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC)
@ -53,7 +54,7 @@ func TestCVEPagination(t *testing.T) {
Blob: ispec.DescriptorEmptyJSON.Data,
}}).ImageConfig(ispec.Image{Created: &timeStamp12}).Build()
err = metaDB.SetRepoReference("repo1", "1.0.0", image2.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo1", "1.0.0", image2.AsImageMeta())
So(err, ShouldBeNil)
// MetaDB loaded with initial data, mock the scanner

View file

@ -74,32 +74,32 @@ func TestScanGeneratorWithMockedData(t *testing.T) { //nolint: gocyclo
image11 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2008, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference("repo1", "0.1.0", image11.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo1", "0.1.0", image11.AsImageMeta())
So(err, ShouldBeNil)
image12 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2009, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference("repo1", "1.0.0", image12.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo1", "1.0.0", image12.AsImageMeta())
So(err, ShouldBeNil)
image13 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2010, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference("repo1", "1.1.0", image13.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo1", "1.1.0", image13.AsImageMeta())
So(err, ShouldBeNil)
image14 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2011, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference("repo1", "1.0.1", image14.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo1", "1.0.1", image14.AsImageMeta())
So(err, ShouldBeNil)
// Create metadb data for scannable image with no vulnerabilities
image61 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2016, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference("repo6", "1.0.0", image61.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo6", "1.0.0", image61.AsImageMeta())
So(err, ShouldBeNil)
// Create metadb data for image not supporting scanning
@ -109,49 +109,50 @@ func TestScanGeneratorWithMockedData(t *testing.T) { //nolint: gocyclo
Digest: godigest.FromBytes([]byte{10, 10, 10}),
}}).ImageConfig(ispec.Image{Created: DateRef(2009, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference("repo2", "1.0.0", image21.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo2", "1.0.0", image21.AsImageMeta())
So(err, ShouldBeNil)
// Create metadb data for invalid images/negative tests
img := CreateRandomImage()
digest31 := img.Digest()
err = metaDB.SetRepoReference("repo3", "invalid-manifest", img.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo3", "invalid-manifest", img.AsImageMeta())
So(err, ShouldBeNil)
image41 := CreateImageWith().DefaultLayers().
CustomConfigBlob([]byte("invalid config blob"), ispec.MediaTypeImageConfig).Build()
err = metaDB.SetRepoReference("repo4", "invalid-config", image41.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo4", "invalid-config", image41.AsImageMeta())
So(err, ShouldBeNil)
image15 := CreateRandomMultiarch()
digest51 := image15.Digest()
err = metaDB.SetRepoReference("repo5", "nonexitent-manifests-for-multiarch", image15.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo5", "nonexitent-manifests-for-multiarch",
image15.AsImageMeta())
So(err, ShouldBeNil)
// Create metadb data for scannable image which errors during scan
image71 := CreateImageWith().DefaultLayers().
ImageConfig(ispec.Image{Created: DateRef(2000, 1, 1, 12, 0, 0, 0, time.UTC)}).Build()
err = metaDB.SetRepoReference("repo7", "1.0.0", image71.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo7", "1.0.0", image71.AsImageMeta())
So(err, ShouldBeNil)
// Create multiarch image with vulnerabilities
multiarchImage := CreateRandomMultiarch()
err = metaDB.SetRepoReference(repoIndex, multiarchImage.Images[0].DigestStr(),
err = metaDB.SetRepoReference(context.Background(), repoIndex, multiarchImage.Images[0].DigestStr(),
multiarchImage.Images[0].AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repoIndex, multiarchImage.Images[1].DigestStr(),
err = metaDB.SetRepoReference(context.Background(), repoIndex, multiarchImage.Images[1].DigestStr(),
multiarchImage.Images[1].AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repoIndex, multiarchImage.Images[2].DigestStr(),
err = metaDB.SetRepoReference(context.Background(), repoIndex, multiarchImage.Images[2].DigestStr(),
multiarchImage.Images[2].AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repoIndex, "tagIndex", multiarchImage.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repoIndex, "tagIndex", multiarchImage.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoMeta("repo-with-bad-tag-digest", mTypes.RepoMeta{

View file

@ -5,6 +5,7 @@ package trivy
import (
"bytes"
"context"
"encoding/json"
"os"
"path"
@ -299,7 +300,7 @@ func TestImageScannable(t *testing.T) {
Blob: ispec.DescriptorEmptyJSON.Data,
}}).ImageConfig(validConfig).Build()
err = metaDB.SetRepoReference("repo1", "valid", validImage.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo1", "valid", validImage.AsImageMeta())
if err != nil {
panic(err)
}
@ -312,7 +313,8 @@ func TestImageScannable(t *testing.T) {
Blob: ispec.DescriptorEmptyJSON.Data,
}}).ImageConfig(validConfig).Build()
err = metaDB.SetRepoReference("repo1", "unscannable-layer", imageWithUnscannableLayer.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), "repo1",
"unscannable-layer", imageWithUnscannableLayer.AsImageMeta())
if err != nil {
panic(err)
}

View file

@ -4484,7 +4484,7 @@ func TestMetaDBWhenPushingImages(t *testing.T) {
Convey("SetManifestMeta succeeds but SetRepoReference fails", func() {
ctlr.MetaDB = mocks.MetaDBMock{
SetRepoReferenceFn: func(repo, reference string, imageMeta mTypes.ImageMeta) error {
SetRepoReferenceFn: func(ctx context.Context, repo, reference string, imageMeta mTypes.ImageMeta) error {
return ErrTestError
},
}
@ -5196,7 +5196,7 @@ func TestMetaDBWhenReadingImages(t *testing.T) {
Convey("Error when incrementing", func() {
ctlr.MetaDB = mocks.MetaDBMock{
IncrementImageDownloadsFn: func(repo string, tag string) error {
UpdateStatsOnDownloadFn: func(repo string, tag string) error {
return ErrTestError
},
}

View file

@ -4,6 +4,7 @@
package sync
import (
"context"
"encoding/json"
"errors"
"fmt"
@ -164,7 +165,7 @@ func (registry *LocalRegistry) CommitImage(imageReference types.ImageReference,
}
if registry.metaDB != nil {
err = meta.SetImageMetaFromInput(repo, reference, mediaType,
err = meta.SetImageMetaFromInput(context.Background(), repo, reference, mediaType,
manifestDigest, manifestBlob, imageStore, registry.metaDB, registry.log)
if err != nil {
return fmt.Errorf("metaDB: failed to set metadata for image '%s %s': %w", repo, reference, err)
@ -222,7 +223,7 @@ func (registry *LocalRegistry) copyManifest(repo string, manifestContent []byte,
}
if registry.metaDB != nil {
err = meta.SetImageMetaFromInput(repo, reference, ispec.MediaTypeImageManifest,
err = meta.SetImageMetaFromInput(context.Background(), repo, reference, ispec.MediaTypeImageManifest,
digest, manifestContent, imageStore, registry.metaDB, registry.log)
if err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).

View file

@ -153,7 +153,7 @@ func (ref CosignReference) SyncReferences(ctx context.Context, localRepo, remote
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("metaDB: trying to sync cosign reference for image")
err = meta.SetImageMetaFromInput(localRepo, cosignTag, ispec.MediaTypeImageManifest,
err = meta.SetImageMetaFromInput(ctx, localRepo, cosignTag, ispec.MediaTypeImageManifest,
referenceDigest, manifestBuf, ref.storeController.GetImageStore(localRepo),
ref.metaDB, ref.log)

View file

@ -137,7 +137,7 @@ func (ref OciReferences) SyncReferences(ctx context.Context, localRepo, remoteRe
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("metaDB: trying to add oci references for image")
err = meta.SetImageMetaFromInput(localRepo, referenceDigest.String(), referrer.MediaType,
err = meta.SetImageMetaFromInput(ctx, localRepo, referenceDigest.String(), referrer.MediaType,
referenceDigest, referenceBuf, ref.storeController.GetImageStore(localRepo),
ref.metaDB, ref.log)
if err != nil {

View file

@ -154,7 +154,8 @@ func (ref ORASReferences) SyncReferences(ctx context.Context, localRepo, remoteR
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("metaDB: trying to sync oras artifact for image")
err := meta.SetImageMetaFromInput(localRepo, referenceDigest.String(), referrer.MediaType,
err := meta.SetImageMetaFromInput(context.Background(), localRepo, //nolint:contextcheck
referenceDigest.String(), referrer.MediaType,
referenceDigest, orasBuf, ref.storeController.GetImageStore(localRepo),
ref.metaDB, ref.log)
if err != nil {

View file

@ -113,7 +113,7 @@ func (ref TagReferences) SyncReferences(ctx context.Context, localRepo, remoteRe
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("metaDB: trying to add oci references for image")
err = meta.SetImageMetaFromInput(localRepo, referenceDigest.String(), referrer.MediaType,
err = meta.SetImageMetaFromInput(ctx, localRepo, referenceDigest.String(), referrer.MediaType,
referenceDigest, referenceBuf, ref.storeController.GetImageStore(localRepo),
ref.metaDB, ref.log)
if err != nil {

View file

@ -337,7 +337,7 @@ func TestLocalRegistry(t *testing.T) {
Convey("trigger metaDB error on index manifest in CommitImage()", func() {
registry := NewLocalRegistry(storage.StoreController{DefaultStore: syncImgStore}, mocks.MetaDBMock{
SetRepoReferenceFn: func(repo string, reference string, imageMeta mTypes.ImageMeta) error {
SetRepoReferenceFn: func(ctx context.Context, repo string, reference string, imageMeta mTypes.ImageMeta) error {
if reference == "1.0" {
return zerr.ErrRepoMetaNotFound
}
@ -352,7 +352,7 @@ func TestLocalRegistry(t *testing.T) {
Convey("trigger metaDB error on image manifest in CommitImage()", func() {
registry := NewLocalRegistry(storage.StoreController{DefaultStore: syncImgStore}, mocks.MetaDBMock{
SetRepoReferenceFn: func(repo, reference string, imageMeta mTypes.ImageMeta) error {
SetRepoReferenceFn: func(ctx context.Context, repo, reference string, imageMeta mTypes.ImageMeta) error {
return zerr.ErrRepoMetaNotFound
},
}, log)

View file

@ -877,7 +877,7 @@ func TestOnDemand(t *testing.T) {
return nil
},
SetRepoReferenceFn: func(repo, reference string, imageMeta mTypes.ImageMeta) error {
SetRepoReferenceFn: func(ctx context.Context, repo, reference string, imageMeta mTypes.ImageMeta) error {
if strings.HasPrefix(reference, "sha256-") &&
(strings.HasSuffix(reference, remote.SignatureTagSuffix) ||
strings.HasSuffix(reference, remote.SBOMTagSuffix)) ||
@ -1017,7 +1017,7 @@ func TestOnDemand(t *testing.T) {
// metadb fails for syncReferrersTag"
dctlr.MetaDB = mocks.MetaDBMock{
SetRepoReferenceFn: func(repo, reference string, imageMeta mTypes.ImageMeta) error {
SetRepoReferenceFn: func(ctx context.Context, repo, reference string, imageMeta mTypes.ImageMeta) error {
if imageMeta.Digest.String() == ociRefImage.ManifestDescriptor.Digest.String() {
return sync.ErrTestError
}

View file

@ -52,7 +52,7 @@ func NewLogger(level, output string) Logger {
return Logger{Logger: log.Hook(goroutineHook{}).With().Caller().Timestamp().Logger()}
}
func NewAuditLogger(level, audit string) *Logger {
func NewAuditLogger(level, output string) *Logger {
loggerSetTimeFormat.Do(func() {
zerolog.TimeFieldFormat = time.RFC3339Nano
})
@ -66,12 +66,16 @@ func NewAuditLogger(level, audit string) *Logger {
var auditLog zerolog.Logger
auditFile, err := os.OpenFile(audit, os.O_APPEND|os.O_WRONLY|os.O_CREATE, defaultPerms)
if err != nil {
panic(err)
}
if output == "" {
auditLog = zerolog.New(os.Stdout)
} else {
auditFile, err := os.OpenFile(output, os.O_APPEND|os.O_WRONLY|os.O_CREATE, defaultPerms)
if err != nil {
panic(err)
}
auditLog = zerolog.New(auditFile)
auditLog = zerolog.New(auditFile)
}
return &Logger{Logger: auditLog.With().Timestamp().Logger()}
}

View file

@ -116,13 +116,20 @@ func (bdw *BoltDB) SetImageMeta(digest godigest.Digest, imageMeta mTypes.ImageMe
return err
}
func (bdw *BoltDB) SetRepoReference(repo string, reference string, imageMeta mTypes.ImageMeta,
func (bdw *BoltDB) SetRepoReference(ctx context.Context, repo string, reference string, imageMeta mTypes.ImageMeta,
) error {
if err := common.ValidateRepoReferenceInput(repo, reference, imageMeta.Digest); err != nil {
return err
}
err := bdw.DB.Update(func(tx *bbolt.Tx) error {
var userid string
userAc, err := reqCtx.UserAcFromContext(ctx)
if err == nil {
userid = userAc.GetUsername()
}
err = bdw.DB.Update(func(tx *bbolt.Tx) error {
repoBuck := tx.Bucket([]byte(RepoMetaBuck))
repoBlobsBuck := tx.Bucket([]byte(RepoBlobsBuck))
imageBuck := tx.Bucket([]byte(ImageMetaBuck))
@ -199,7 +206,12 @@ func (bdw *BoltDB) SetRepoReference(repo string, reference string, imageMeta mTy
}
if _, ok := protoRepoMeta.Statistics[imageMeta.Digest.String()]; !ok {
protoRepoMeta.Statistics[imageMeta.Digest.String()] = &proto_go.DescriptorStatistics{DownloadCount: 0}
protoRepoMeta.Statistics[imageMeta.Digest.String()] = &proto_go.DescriptorStatistics{
DownloadCount: 0,
LastPullTimestamp: &timestamppb.Timestamp{},
PushTimestamp: timestamppb.Now(),
PushedBy: userid,
}
}
if _, ok := protoRepoMeta.Signatures[imageMeta.Digest.String()]; !ok {
@ -219,8 +231,8 @@ func (bdw *BoltDB) SetRepoReference(repo string, reference string, imageMeta mTy
repoBlobs := &proto_go.RepoBlobs{}
if repoBlobsBytes == nil {
repoBlobs.Blobs = map[string]*proto_go.BlobInfo{}
if len(repoBlobsBytes) == 0 {
repoBlobs.Blobs = make(map[string]*proto_go.BlobInfo)
} else {
err := proto.Unmarshal(repoBlobsBytes, repoBlobs)
if err != nil {
@ -1054,7 +1066,7 @@ func (bdw *BoltDB) GetReferrersInfo(repo string, referredDigest godigest.Digest,
return referrersInfoResult, err
}
func (bdw *BoltDB) IncrementImageDownloads(repo string, reference string) error {
func (bdw *BoltDB) UpdateStatsOnDownload(repo string, reference string) error {
err := bdw.DB.Update(func(tx *bbolt.Tx) error {
buck := tx.Bucket([]byte(RepoMetaBuck))
@ -1089,6 +1101,7 @@ func (bdw *BoltDB) IncrementImageDownloads(repo string, reference string) error
}
manifestStatistics.DownloadCount++
manifestStatistics.LastPullTimestamp = timestamppb.Now()
repoMeta.Statistics[manifestDigest] = manifestStatistics
repoMetaBlob, err = proto.Marshal(&repoMeta)
@ -1271,8 +1284,8 @@ func (bdw *BoltDB) RemoveRepoReference(repo, reference string, manifestDigest go
repoBlobs := &proto_go.RepoBlobs{}
if repoBlobsBytes == nil {
repoBlobs.Blobs = map[string]*proto_go.BlobInfo{}
if len(repoBlobsBytes) == 0 {
repoBlobs.Blobs = make(map[string]*proto_go.BlobInfo)
} else {
err := proto.Unmarshal(repoBlobsBytes, repoBlobs)
if err != nil {

View file

@ -297,7 +297,10 @@ func GetStatisticsMap(stats map[string]*proto_go.DescriptorStatistics) map[strin
for digest, stat := range stats {
results[digest] = mTypes.DescriptorStatistics{
DownloadCount: int(stat.DownloadCount),
DownloadCount: int(stat.DownloadCount),
LastPullTimestamp: stat.LastPullTimestamp.AsTime(),
PushTimestamp: stat.PushTimestamp.AsTime(),
PushedBy: stat.PushedBy,
}
}
@ -310,7 +313,10 @@ func GetImageStatistics(stats *proto_go.DescriptorStatistics) mTypes.DescriptorS
}
return mTypes.DescriptorStatistics{
DownloadCount: int(stats.DownloadCount),
DownloadCount: int(stats.DownloadCount),
LastPullTimestamp: stats.LastPullTimestamp.AsTime(),
PushTimestamp: stats.PushTimestamp.AsTime(),
PushedBy: stats.PushedBy,
}
}

View file

@ -121,7 +121,10 @@ func GetProtoStatistics(stats map[string]mTypes.DescriptorStatistics) map[string
for digest, stat := range stats {
results[digest] = &proto_go.DescriptorStatistics{
DownloadCount: int32(stat.DownloadCount),
DownloadCount: int32(stat.DownloadCount),
LastPullTimestamp: timestamppb.New(stat.LastPullTimestamp),
PushTimestamp: timestamppb.New(stat.PushTimestamp),
PushedBy: stat.PushedBy,
}
}

View file

@ -229,20 +229,29 @@ func (dwr *DynamoDB) getProtoRepoMeta(ctx context.Context, repo string) (*proto_
return repoMeta, nil
}
func (dwr *DynamoDB) SetRepoReference(repo string, reference string, imageMeta mTypes.ImageMeta) error {
func (dwr *DynamoDB) SetRepoReference(ctx context.Context, repo string, reference string,
imageMeta mTypes.ImageMeta,
) error {
if err := common.ValidateRepoReferenceInput(repo, reference, imageMeta.Digest); err != nil {
return err
}
var userid string
userAc, err := reqCtx.UserAcFromContext(ctx)
if err == nil {
userid = userAc.GetUsername()
}
// 1. Add image data to db if needed
protoImageMeta := mConvert.GetProtoImageMeta(imageMeta)
err := dwr.SetProtoImageMeta(imageMeta.Digest, protoImageMeta)
err = dwr.SetProtoImageMeta(imageMeta.Digest, protoImageMeta) //nolint: contextcheck
if err != nil {
return err
}
repoMeta, err := dwr.getProtoRepoMeta(context.Background(), repo)
repoMeta, err := dwr.getProtoRepoMeta(ctx, repo)
if err != nil {
if !errors.Is(err, zerr.ErrRepoMetaNotFound) {
return err
@ -298,7 +307,12 @@ func (dwr *DynamoDB) SetRepoReference(repo string, reference string, imageMeta m
}
if _, ok := repoMeta.Statistics[imageMeta.Digest.String()]; !ok {
repoMeta.Statistics[imageMeta.Digest.String()] = &proto_go.DescriptorStatistics{DownloadCount: 0}
repoMeta.Statistics[imageMeta.Digest.String()] = &proto_go.DescriptorStatistics{
DownloadCount: 0,
LastPullTimestamp: &timestamppb.Timestamp{},
PushTimestamp: timestamppb.Now(),
PushedBy: userid,
}
}
if _, ok := repoMeta.Signatures[imageMeta.Digest.String()]; !ok {
@ -314,7 +328,7 @@ func (dwr *DynamoDB) SetRepoReference(repo string, reference string, imageMeta m
}
// 4. Blobs
repoBlobs, err := dwr.getRepoBlobsInfo(repo)
repoBlobs, err := dwr.getRepoBlobsInfo(repo) //nolint: contextcheck
if err != nil {
return err
}
@ -324,12 +338,12 @@ func (dwr *DynamoDB) SetRepoReference(repo string, reference string, imageMeta m
return err
}
err = dwr.setRepoBlobsInfo(repo, repoBlobs)
err = dwr.setRepoBlobsInfo(repo, repoBlobs) //nolint: contextcheck
if err != nil {
return err
}
return dwr.setProtoRepoMeta(repo, repoMeta)
return dwr.setProtoRepoMeta(repo, repoMeta) //nolint: contextcheck
}
func (dwr *DynamoDB) getRepoBlobsInfo(repo string) (*proto_go.RepoBlobs, error) {
@ -344,7 +358,7 @@ func (dwr *DynamoDB) getRepoBlobsInfo(repo string) (*proto_go.RepoBlobs, error)
}
if resp.Item == nil {
return &proto_go.RepoBlobs{Name: repo, Blobs: map[string]*proto_go.BlobInfo{"": {}}}, nil
return &proto_go.RepoBlobs{Name: repo, Blobs: make(map[string]*proto_go.BlobInfo)}, nil
}
repoBlobsBytes := []byte{}
@ -355,8 +369,8 @@ func (dwr *DynamoDB) getRepoBlobsInfo(repo string) (*proto_go.RepoBlobs, error)
}
repoBlobs := &proto_go.RepoBlobs{}
if repoBlobsBytes == nil {
repoBlobs.Blobs = map[string]*proto_go.BlobInfo{}
if len(repoBlobsBytes) == 0 {
repoBlobs.Blobs = make(map[string]*proto_go.BlobInfo)
} else {
err := proto.Unmarshal(repoBlobsBytes, repoBlobs)
if err != nil {
@ -364,6 +378,10 @@ func (dwr *DynamoDB) getRepoBlobsInfo(repo string) (*proto_go.RepoBlobs, error)
}
}
if len(repoBlobs.Blobs) == 0 {
repoBlobs.Blobs = make(map[string]*proto_go.BlobInfo)
}
return repoBlobs, nil
}
@ -926,7 +944,7 @@ func (dwr *DynamoDB) GetReferrersInfo(repo string, referredDigest godigest.Diges
return filteredResults, nil
}
func (dwr *DynamoDB) IncrementImageDownloads(repo string, reference string) error {
func (dwr *DynamoDB) UpdateStatsOnDownload(repo string, reference string) error {
repoMeta, err := dwr.getProtoRepoMeta(context.Background(), repo)
if err != nil {
return err
@ -951,6 +969,7 @@ func (dwr *DynamoDB) IncrementImageDownloads(repo string, reference string) erro
}
manifestStatistics.DownloadCount++
manifestStatistics.LastPullTimestamp = timestamppb.Now()
repoMeta.Statistics[descriptorDigest] = manifestStatistics
return dwr.setProtoRepoMeta(repo, repoMeta)
@ -1253,11 +1272,11 @@ func (dwr *DynamoDB) RemoveRepoReference(repo, reference string, manifestDigest
return err
}
err = dwr.setRepoBlobsInfo(repo, repoBlobsInfo)
err = dwr.setRepoBlobsInfo(repo, repoBlobsInfo) //nolint: contextcheck
if err != nil {
return err
}
err = dwr.setProtoRepoMeta(repo, protoRepoMeta)
err = dwr.setProtoRepoMeta(repo, protoRepoMeta) //nolint: contextcheck
return err
}

View file

@ -65,13 +65,13 @@ func TestIterator(t *testing.T) {
So(dynamoWrapper.ResetTable(dynamoWrapper.ImageMetaTablename), ShouldBeNil)
So(dynamoWrapper.ResetTable(dynamoWrapper.RepoMetaTablename), ShouldBeNil)
err = dynamoWrapper.SetRepoReference("repo1", "tag1", CreateRandomImage().AsImageMeta())
err = dynamoWrapper.SetRepoReference(context.Background(), "repo1", "tag1", CreateRandomImage().AsImageMeta())
So(err, ShouldBeNil)
err = dynamoWrapper.SetRepoReference("repo2", "tag2", CreateRandomImage().AsImageMeta())
err = dynamoWrapper.SetRepoReference(context.Background(), "repo2", "tag2", CreateRandomImage().AsImageMeta())
So(err, ShouldBeNil)
err = dynamoWrapper.SetRepoReference("repo3", "tag3", CreateRandomImage().AsImageMeta())
err = dynamoWrapper.SetRepoReference(context.Background(), "repo3", "tag3", CreateRandomImage().AsImageMeta())
So(err, ShouldBeNil)
repoMetaAttributeIterator := mdynamodb.NewBaseDynamoAttributesIterator(

View file

@ -1,6 +1,8 @@
package meta
import (
"context"
godigest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -13,7 +15,7 @@ import (
// OnUpdateManifest is called when a new manifest is added. It updates metadb according to the type
// of image pushed(normal images, signatues, etc.). In care of any errors, it makes sure to keep
// consistency between metadb and the image store.
func OnUpdateManifest(repo, reference, mediaType string, digest godigest.Digest, body []byte,
func OnUpdateManifest(ctx context.Context, repo, reference, mediaType string, digest godigest.Digest, body []byte,
storeController storage.StoreController, metaDB mTypes.MetaDB, log log.Logger,
) error {
if zcommon.IsReferrersTag(reference) {
@ -22,7 +24,7 @@ func OnUpdateManifest(repo, reference, mediaType string, digest godigest.Digest,
imgStore := storeController.GetImageStore(repo)
err := SetImageMetaFromInput(repo, reference, mediaType, digest, body,
err := SetImageMetaFromInput(ctx, repo, reference, mediaType, digest, body,
imgStore, metaDB, log)
if err != nil {
log.Info().Str("tag", reference).Str("repository", repo).Msg("uploading image meta was unsuccessful for tag in repo")
@ -116,7 +118,7 @@ func OnGetManifest(name, reference, mediaType string, body []byte,
return nil
}
err = metaDB.IncrementImageDownloads(name, reference)
err = metaDB.UpdateStatsOnDownload(name, reference)
if err != nil {
log.Error().Err(err).Str("repository", name).Str("reference", reference).
Msg("unexpected error for image")

View file

@ -42,7 +42,7 @@ func TestOnUpdateManifest(t *testing.T) {
err = WriteImageToFileSystem(CreateDefaultImage(), "repo", "tag1", storeController)
So(err, ShouldBeNil)
err = meta.OnUpdateManifest("repo", "tag1", ispec.MediaTypeImageManifest, image.Digest(),
err = meta.OnUpdateManifest(context.Background(), "repo", "tag1", ispec.MediaTypeImageManifest, image.Digest(),
image.ManifestDescriptor.Data, storeController, metaDB, log)
So(err, ShouldBeNil)
@ -61,7 +61,7 @@ func TestUpdateErrors(t *testing.T) {
log := log.NewLogger("debug", "")
Convey("IsReferrersTag true update", func() {
err := meta.OnUpdateManifest("repo", "sha256-123", "digest", "media", []byte("bad"),
err := meta.OnUpdateManifest(context.Background(), "repo", "sha256-123", "digest", "media", []byte("bad"),
storeController, metaDB, log)
So(err, ShouldBeNil)
})

View file

@ -559,7 +559,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
Annotations(map[string]string{ispec.AnnotationVendor: "vendor1"}).Build()
Convey("Setting a good repo", func() {
err := metaDB.SetRepoReference(repo1, tag1, imgData1)
err := metaDB.SetRepoReference(ctx, repo1, tag1, imgData1)
So(err, ShouldBeNil)
repoMeta, err := metaDB.GetRepoMeta(ctx, repo1)
@ -573,12 +573,12 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
So(err, ShouldNotBeNil)
for i := range imgMulti.Images {
err := metaDB.SetRepoReference(repo1, imgMulti.Images[i].DigestStr(),
err := metaDB.SetRepoReference(ctx, repo1, imgMulti.Images[i].DigestStr(),
imgMulti.Images[i].AsImageMeta())
So(err, ShouldBeNil)
}
err = metaDB.SetRepoReference(repo1, tag1, imgMulti.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, tag1, imgMulti.AsImageMeta())
So(err, ShouldBeNil)
image1TotalSize := multiImages[0].ManifestDescriptor.Size + multiImages[0].ConfigDescriptor.Size + 2*10
@ -596,9 +596,9 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
})
Convey("Set multiple repos", func() {
err := metaDB.SetRepoReference(repo1, tag1, imgData1)
err := metaDB.SetRepoReference(ctx, repo1, tag1, imgData1)
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo2, tag1, imgData2)
err = metaDB.SetRepoReference(ctx, repo2, tag1, imgData2)
So(err, ShouldBeNil)
repoMeta1, err := metaDB.GetRepoMeta(ctx, repo1)
@ -622,7 +622,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
layersSize := int64(2 * 10)
image1Size := imageMeta1.Manifests[0].Size + imageMeta1.Manifests[0].Manifest.Config.Size + layersSize
err := metaDB.SetRepoReference(repo1, tag1, imageMeta1)
err := metaDB.SetRepoReference(ctx, repo1, tag1, imageMeta1)
So(err, ShouldBeNil)
repoMeta, err := metaDB.GetRepoMeta(ctx, repo1)
@ -641,7 +641,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
// the layers are the same so we add them once
repoSize := image1Size + image2.ManifestDescriptor.Size + image2.ConfigDescriptor.Size
err = metaDB.SetRepoReference(repo1, tag2, imageMeta2)
err = metaDB.SetRepoReference(ctx, repo1, tag2, imageMeta2)
So(err, ShouldBeNil)
repoMeta, err = metaDB.GetRepoMeta(ctx, repo1)
@ -681,10 +681,10 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
totalRepoSize := image1Size + image2Size - layersSize
err := metaDB.SetRepoReference(repo, tag1, imageMeta1)
err := metaDB.SetRepoReference(ctx, repo, tag1, imageMeta1)
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo, tag2, imageMeta2)
err = metaDB.SetRepoReference(ctx, repo, tag2, imageMeta2)
So(err, ShouldBeNil)
Convey("Delete reference from repo", func() {
@ -765,13 +765,13 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
Build()
imageMeta2 := image2.AsImageMeta()
err := metaDB.SetRepoReference(repo1, tag1, imageMeta1)
err := metaDB.SetRepoReference(ctx, repo1, tag1, imageMeta1)
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, tag2, imageMeta2)
err = metaDB.SetRepoReference(ctx, repo1, tag2, imageMeta2)
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo2, tag2, imageMeta2)
err = metaDB.SetRepoReference(ctx, repo2, tag2, imageMeta2)
So(err, ShouldBeNil)
Convey("Get all RepoMeta", func() {
@ -805,7 +805,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
imageMeta = CreateDefaultImage().AsImageMeta()
)
err := metaDB.SetRepoReference(repo1, tag1, imageMeta)
err := metaDB.SetRepoReference(ctx, repo1, tag1, imageMeta)
So(err, ShouldBeNil)
err = metaDB.IncrementRepoStars(repo1)
@ -837,7 +837,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
imageMeta = CreateDefaultImage().AsImageMeta()
)
err := metaDB.SetRepoReference(repo1, tag1, imageMeta)
err := metaDB.SetRepoReference(ctx, repo1, tag1, imageMeta)
So(err, ShouldBeNil)
err = metaDB.IncrementRepoStars(repo1)
@ -871,7 +871,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
tag1 = "0.0.1"
)
err := metaDB.SetRepoReference(repo1, tag1, CreateDefaultImage().AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, tag1, CreateDefaultImage().AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.IncrementRepoStars(repo1)
@ -929,10 +929,10 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
// anonymous user
ctx3 := userAc.DeriveContext(ctx)
err := metaDB.SetRepoReference(repo1, tag1, CreateDefaultImage().AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, tag1, CreateDefaultImage().AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo2, tag1, CreateDefaultImage().AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo2, tag1, CreateDefaultImage().AsImageMeta())
So(err, ShouldBeNil)
repos, err := metaDB.GetStarredRepos(ctx1)
@ -1089,6 +1089,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
So(len(repos), ShouldEqual, 0)
})
//nolint: contextcheck
Convey("Test repo bookmarks for user", func() {
var (
repo1 = "repo1"
@ -1126,49 +1127,49 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
// anonymous user
ctx3 := userAc.DeriveContext(context.Background())
err := metaDB.SetRepoReference(repo1, tag1, image1)
err := metaDB.SetRepoReference(ctx, repo1, tag1, image1)
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo2, tag1, image1)
err = metaDB.SetRepoReference(ctx, repo2, tag1, image1)
So(err, ShouldBeNil)
repos, err := metaDB.GetBookmarkedRepos(ctx1)
repos, err := metaDB.GetBookmarkedRepos(ctx1) //nolint: contextcheck
So(err, ShouldBeNil)
So(len(repos), ShouldEqual, 0)
repos, err = metaDB.GetBookmarkedRepos(ctx2)
repos, err = metaDB.GetBookmarkedRepos(ctx2) //nolint: contextcheck
So(err, ShouldBeNil)
So(len(repos), ShouldEqual, 0)
// anonymous cannot use bookmarks
repos, err = metaDB.GetBookmarkedRepos(ctx3)
repos, err = metaDB.GetBookmarkedRepos(ctx3) //nolint: contextcheck
So(err, ShouldBeNil)
So(len(repos), ShouldEqual, 0)
toggleState, err := metaDB.ToggleBookmarkRepo(ctx3, repo1)
toggleState, err := metaDB.ToggleBookmarkRepo(ctx3, repo1) //nolint: contextcheck
So(err, ShouldNotBeNil)
So(toggleState, ShouldEqual, mTypes.NotChanged)
repos, err = metaDB.GetBookmarkedRepos(ctx3)
repos, err = metaDB.GetBookmarkedRepos(ctx3) //nolint: contextcheck
So(err, ShouldBeNil)
So(len(repos), ShouldEqual, 0)
// User 1 bookmarks repo 1, User 2 has no bookmarks
toggleState, err = metaDB.ToggleBookmarkRepo(ctx1, repo1)
toggleState, err = metaDB.ToggleBookmarkRepo(ctx1, repo1) //nolint: contextcheck
So(err, ShouldBeNil)
So(toggleState, ShouldEqual, mTypes.Added)
repos, err = metaDB.GetBookmarkedRepos(ctx1)
repos, err = metaDB.GetBookmarkedRepos(ctx1) //nolint: contextcheck
So(err, ShouldBeNil)
So(len(repos), ShouldEqual, 1)
So(repos, ShouldContain, repo1)
repos, err = metaDB.GetBookmarkedRepos(ctx2)
repos, err = metaDB.GetBookmarkedRepos(ctx2) //nolint: contextcheck
So(err, ShouldBeNil)
So(len(repos), ShouldEqual, 0)
// User 1 and User 2 bookmark only repo 1
toggleState, err = metaDB.ToggleBookmarkRepo(ctx2, repo1)
toggleState, err = metaDB.ToggleBookmarkRepo(ctx2, repo1) //nolint: contextcheck
So(err, ShouldBeNil)
So(toggleState, ShouldEqual, mTypes.Added)
@ -1233,17 +1234,17 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
So(len(repos), ShouldEqual, 0)
})
Convey("Test IncrementImageDownloads", func() {
Convey("Test UpdateStatsOnDownload", func() {
var (
repo1 = "repo1"
tag1 = "0.0.1"
image1 = CreateRandomImage().AsImageMeta()
)
err := metaDB.SetRepoReference(repo1, tag1, image1)
err := metaDB.SetRepoReference(ctx, repo1, tag1, image1)
So(err, ShouldBeNil)
err = metaDB.IncrementImageDownloads(repo1, tag1)
err = metaDB.UpdateStatsOnDownload(repo1, tag1)
So(err, ShouldBeNil)
repoMeta, err := metaDB.GetRepoMeta(ctx, repo1)
@ -1251,13 +1252,14 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
So(repoMeta.Statistics[image1.Digest.String()].DownloadCount, ShouldEqual, 1)
err = metaDB.IncrementImageDownloads(repo1, tag1)
err = metaDB.UpdateStatsOnDownload(repo1, tag1)
So(err, ShouldBeNil)
repoMeta, err = metaDB.GetRepoMeta(ctx, repo1)
So(err, ShouldBeNil)
So(repoMeta.Statistics[image1.Digest.String()].DownloadCount, ShouldEqual, 2)
So(time.Now(), ShouldHappenAfter, repoMeta.Statistics[image1.Digest.String()].LastPullTimestamp)
})
Convey("Test AddImageSignature", func() {
@ -1267,7 +1269,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
image1 = CreateRandomImage().AsImageMeta()
)
err := metaDB.SetRepoReference(repo1, tag1, image1)
err := metaDB.SetRepoReference(ctx, repo1, tag1, image1)
So(err, ShouldBeNil)
err = metaDB.AddManifestSignature(repo1, image1.Digest, mTypes.SignatureMetadata{
@ -1299,7 +1301,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
image1 = CreateRandomImage()
)
err := metaDB.SetRepoReference(repo1, tag1, image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, tag1, image1.AsImageMeta())
So(err, ShouldBeNil)
layerInfo := mTypes.LayerInfo{LayerDigest: "", LayerContent: []byte{}, SignatureKey: ""}
@ -1321,12 +1323,14 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
So(repoData.Signatures[image1.DigestStr()]["cosign"][0].LayersInfo[0].Date,
ShouldBeZeroValue)
})
//nolint: contextcheck
Convey("trusted signature", func() {
image1 := CreateRandomImage()
repo := "repo1"
tag := "0.0.1"
err := metaDB.SetRepoReference(repo, tag, image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo, tag, image1.AsImageMeta())
So(err, ShouldBeNil)
mediaType := jws.MediaTypeEnvelope
@ -1431,7 +1435,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
})
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, tag1, image1.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, tag1, image1.AsImageMeta())
So(err, ShouldBeNil)
repoMeta, err := metaDB.GetRepoMeta(ctx, repo1)
@ -1447,7 +1451,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
image1 = CreateRandomImage()
)
err := metaDB.SetRepoReference(repo1, tag1, image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, tag1, image1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.AddManifestSignature(repo1, image1.Digest(), mTypes.SignatureMetadata{
@ -1493,11 +1497,11 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
)
_ = repo3
Convey("Search all repos", func() {
err := metaDB.SetRepoReference(repo1, tag1, image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, tag1, image1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, tag2, image2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, tag2, image2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo2, tag3, image3.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo2, tag3, image3.AsImageMeta())
So(err, ShouldBeNil)
repoMetaList, err := metaDB.SearchRepos(ctx, "")
@ -1510,7 +1514,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
})
Convey("Search a repo by name", func() {
err := metaDB.SetRepoReference(repo1, tag1, image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, tag1, image1.AsImageMeta())
So(err, ShouldBeNil)
repoMetaList, err := metaDB.SearchRepos(ctx, repo1)
@ -1520,10 +1524,10 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
})
Convey("Search non-existing repo by name", func() {
err := metaDB.SetRepoReference(repo1, tag1, image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, tag1, image1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, tag2, image2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, tag2, image2.AsImageMeta())
So(err, ShouldBeNil)
repoMetaList, err := metaDB.SearchRepos(ctx, "RepoThatDoesntExist")
@ -1532,11 +1536,11 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
})
Convey("Search with partial match", func() {
err := metaDB.SetRepoReference("alpine", tag1, image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, "alpine", tag1, image1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("pine", tag2, image2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "pine", tag2, image2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("golang", tag3, image3.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "golang", tag3, image3.AsImageMeta())
So(err, ShouldBeNil)
repoMetaList, err := metaDB.SearchRepos(ctx, "pine")
@ -1545,11 +1549,11 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
})
Convey("Search multiple repos that share manifests", func() {
err := metaDB.SetRepoReference("alpine", tag1, image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, "alpine", tag1, image1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("pine", tag2, image1.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "pine", tag2, image1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("golang", tag3, image1.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "golang", tag3, image1.AsImageMeta())
So(err, ShouldBeNil)
repoMetaList, err := metaDB.SearchRepos(ctx, "")
@ -1558,11 +1562,11 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
})
Convey("Search repos with access control", func() {
err := metaDB.SetRepoReference(repo1, tag1, image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, tag1, image1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo2, tag2, image2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo2, tag2, image2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo3, tag3, image3.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo3, tag3, image3.AsImageMeta())
So(err, ShouldBeNil)
userAc := reqCtx.NewUserAccessControl()
@ -1572,9 +1576,9 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
repo2: true,
})
ctx := userAc.DeriveContext(context.Background())
ctx := userAc.DeriveContext(context.Background()) //nolint: contextcheck
repoMetaList, err := metaDB.SearchRepos(ctx, "repo")
repoMetaList, err := metaDB.SearchRepos(ctx, "repo") //nolint: contextcheck
So(err, ShouldBeNil)
So(len(repoMetaList), ShouldEqual, 2)
for _, k := range repoMetaList {
@ -1593,14 +1597,14 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
image1 = CreateRandomImage()
)
err := metaDB.SetRepoReference("repo", subImage1.DigestStr(), subImage1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, "repo", subImage1.DigestStr(), subImage1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", subImage2.DigestStr(), subImage2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", subImage2.DigestStr(), subImage2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", tag4, multiarch.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", tag4, multiarch.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", tag5, image1.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", tag5, image1.AsImageMeta())
So(err, ShouldBeNil)
repoMetaList, err := metaDB.SearchRepos(ctx, "repo")
@ -1625,17 +1629,17 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
ctx = context.Background()
)
err := metaDB.SetRepoReference(repo1, "0.0.1", image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, "0.0.1", image1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, "0.0.2", image3.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, "0.0.2", image3.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, "0.1.0", image2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, "0.1.0", image2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, "1.0.0", image2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, "1.0.0", image2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, "1.0.1", image2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, "1.0.1", image2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo2, "0.0.1", image3.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo2, "0.0.1", image3.AsImageMeta())
So(err, ShouldBeNil)
Convey("With exact match", func() {
@ -1740,17 +1744,17 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
image6 = CreateRandomImage()
)
err = metaDB.SetRepoReference("repo", subImage1.DigestStr(), subImage1.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", subImage1.DigestStr(), subImage1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", subImage2.DigestStr(), subImage2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", subImage2.DigestStr(), subImage2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", tag4, multiarch.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", tag4, multiarch.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", tag5, image5.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", tag5, image5.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", tag6, image6.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", tag6, image6.AsImageMeta())
So(err, ShouldBeNil)
fullImageMetaList, err := metaDB.SearchTags(ctx, "repo:0.0")
@ -1790,7 +1794,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
Convey("With referrer", func() {
refImage := CreateRandomImageWith().Subject(image1.DescriptorRef()).Build()
err := metaDB.SetRepoReference(repo1, "ref-tag", refImage.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, "ref-tag", refImage.AsImageMeta())
So(err, ShouldBeNil)
fullImageMetaList, err := metaDB.SearchTags(ctx, "repo1:0.0.1")
@ -1815,24 +1819,24 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
ctx = context.Background()
)
err := metaDB.SetRepoReference(repo1, subImage1.DigestStr(), subImage1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo1, subImage1.DigestStr(), subImage1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, subImage2.DigestStr(), subImage2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, subImage2.DigestStr(), subImage2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, "2.0.0", multiarch.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, "2.0.0", multiarch.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, "0.0.1", image1.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, "0.0.1", image1.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, "0.0.2", image3.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, "0.0.2", image3.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, "0.1.0", image2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, "0.1.0", image2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, "1.0.0", image2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, "1.0.0", image2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo1, "1.0.1", image2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo1, "1.0.1", image2.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo2, "0.0.1", image3.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo2, "0.0.1", image3.AsImageMeta())
So(err, ShouldBeNil)
Convey("Return all tags", func() {
@ -1939,7 +1943,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
Convey("Test Referrers", func() {
image1 := CreateRandomImage()
err := metaDB.SetRepoReference("repo", "tag", image1.AsImageMeta())
err := metaDB.SetRepoReference(ctx, "repo", "tag", image1.AsImageMeta())
So(err, ShouldBeNil)
// Artifact 1 with artifact type in Manifest
@ -1949,7 +1953,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
Subject(image1.DescriptorRef()).
Build()
err = metaDB.SetRepoReference("repo", artifact1.DigestStr(), artifact1.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", artifact1.DigestStr(), artifact1.AsImageMeta())
So(err, ShouldBeNil)
// Artifact 2 with artifact type in Config media type
@ -1959,7 +1963,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
Subject(image1.DescriptorRef()).
Build()
err = metaDB.SetRepoReference("repo", artifact2.DigestStr(), artifact2.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", artifact2.DigestStr(), artifact2.AsImageMeta())
So(err, ShouldBeNil)
// GetReferrers
@ -2004,13 +2008,13 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
image := CreateRandomImage()
referrer := CreateRandomImageWith().Subject(image.DescriptorRef()).Build()
err = metaDB.SetRepoReference("repo", tag, image.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", tag, image.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", refTag, referrer.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", refTag, referrer.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", referrer.DigestStr(), referrer.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", referrer.DigestStr(), referrer.AsImageMeta())
So(err, ShouldBeNil)
repoMeta, err := metaDB.GetRepoMeta(ctx, "repo")
@ -2042,13 +2046,13 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
image := CreateRandomImage()
referrer := CreateRandomImageWith().Subject(image.DescriptorRef()).Build()
err = metaDB.SetRepoReference("repo", tag, image.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", tag, image.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", refTag, referrer.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", refTag, referrer.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", referrer.DigestStr(), referrer.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", referrer.DigestStr(), referrer.AsImageMeta())
So(err, ShouldBeNil)
repoMeta, err := metaDB.GetRepoMeta(ctx, "repo")
@ -2072,13 +2076,13 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
image := CreateRandomImage()
referrer := CreateRandomImageWith().Subject(image.DescriptorRef()).Build()
err = metaDB.SetRepoReference("repo", tag, image.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", tag, image.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", referrer.DigestStr(), referrer.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", referrer.DigestStr(), referrer.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference("repo", referrer.DigestStr(), referrer.AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", referrer.DigestStr(), referrer.AsImageMeta())
So(err, ShouldBeNil)
repoMeta, err := metaDB.GetRepoMeta(ctx, "repo")
@ -2091,7 +2095,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
tag := "tag"
image := CreateRandomImage()
err := metaDB.SetRepoReference(repo, tag, image.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo, tag, image.AsImageMeta())
So(err, ShouldBeNil)
referrerWantedType := CreateRandomImageWith().
@ -2102,9 +2106,11 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
ArtifactType("not-wanted-type").
Subject(image.DescriptorRef()).Build()
err = metaDB.SetRepoReference(repo, referrerWantedType.DigestStr(), referrerWantedType.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo, referrerWantedType.DigestStr(),
referrerWantedType.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo, referrerNotWantedType.DigestStr(), referrerNotWantedType.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo, referrerNotWantedType.DigestStr(),
referrerNotWantedType.AsImageMeta())
So(err, ShouldBeNil)
referrerInfo, err := metaDB.GetReferrersInfo("repo", image.Digest(), []string{"wanted-type"})
@ -2120,7 +2126,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
Convey("Just manifests", func() {
image := CreateRandomImage()
err := metaDB.SetRepoReference(repo, tag, image.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo, tag, image.AsImageMeta())
So(err, ShouldBeNil)
imageMeta, err := metaDB.FilterImageMeta(ctx, []string{image.DigestStr()})
@ -2136,13 +2142,14 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
digests := []string{}
for i := range multi.Images {
err := metaDB.SetRepoReference(repo, multi.Images[i].DigestStr(), multi.Images[i].AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo, multi.Images[i].DigestStr(),
multi.Images[i].AsImageMeta())
So(err, ShouldBeNil)
digests = append(digests, multi.Images[i].DigestStr())
}
err := metaDB.SetRepoReference(repo, tag, multi.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo, tag, multi.AsImageMeta())
So(err, ShouldBeNil)
imageMeta, err := metaDB.FilterImageMeta(ctx, []string{multi.DigestStr()})
@ -2160,9 +2167,9 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
image := CreateRandomImage()
referrer := CreateRandomImageWith().Subject(image.DescriptorRef()).Build()
err := metaDB.SetRepoReference(repo, tag, image.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo, tag, image.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo, tag, referrer.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo, tag, referrer.AsImageMeta())
So(err, ShouldBeNil)
repoMeta, err := metaDB.GetRepoMeta(ctx, repo)
@ -2184,7 +2191,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
tag2 := "tag2"
image := CreateImageWith().DefaultLayers().PlatformConfig("image-platform", "image-os").Build()
err := metaDB.SetRepoReference(repo, tag1, image.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo, tag1, image.AsImageMeta())
So(err, ShouldBeNil)
multiarch := CreateMultiarchWith().
@ -2194,13 +2201,14 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
}).Build()
for _, img := range multiarch.Images {
err := metaDB.SetRepoReference(repo, img.DigestStr(), img.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo, img.DigestStr(), img.AsImageMeta())
So(err, ShouldBeNil)
}
err = metaDB.SetRepoReference(repo, tag2, multiarch.AsImageMeta())
err = metaDB.SetRepoReference(ctx, repo, tag2, multiarch.AsImageMeta())
So(err, ShouldBeNil)
//nolint: contextcheck
repoMetaList, err := metaDB.FilterRepos(context.Background(), mTypes.AcceptAllRepoNames,
mTypes.AcceptAllRepoMeta)
So(err, ShouldBeNil)
@ -2223,7 +2231,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
image := CreateRandomImage()
err := metaDB.SetRepoReference(repo99, "tag", image.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo99, "tag", image.AsImageMeta())
So(err, ShouldBeNil)
repoMetaList, err := metaDB.SearchRepos(ctx, repo99)
@ -2293,7 +2301,7 @@ func RunMetaDBTests(t *testing.T, metaDB mTypes.MetaDB, preparationFuncs ...func
ctx := userAc.DeriveContext(context.Background())
err = metaDB.SetRepoReference("repo", "tag", CreateDefaultImage().AsImageMeta())
err = metaDB.SetRepoReference(ctx, "repo", "tag", CreateDefaultImage().AsImageMeta())
So(err, ShouldBeNil)
_, err = metaDB.ToggleBookmarkRepo(ctx, "repo")

View file

@ -1,6 +1,7 @@
package meta
import (
"context"
"encoding/json"
"errors"
"time"
@ -106,7 +107,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController storage.StoreC
reference = manifest.Digest.String()
}
err = SetImageMetaFromInput(repo, reference, manifest.MediaType, manifest.Digest, manifestBlob,
err = SetImageMetaFromInput(context.Background(), repo, reference, manifest.MediaType, manifest.Digest, manifestBlob,
imageStore, metaDB, log)
if err != nil {
log.Error().Err(err).Str("repository", repo).Str("tag", tag).
@ -248,7 +249,7 @@ func getNotationSignatureLayersInfo(
// SetMetadataFromInput tries to set manifest metadata and update repo metadata by adding the current tag
// (in case the reference is a tag). The function expects image manifests and indexes (multi arch images).
func SetImageMetaFromInput(repo, reference, mediaType string, digest godigest.Digest, blob []byte,
func SetImageMetaFromInput(ctx context.Context, repo, reference, mediaType string, digest godigest.Digest, blob []byte,
imageStore storageTypes.ImageStore, metaDB mTypes.MetaDB, log log.Logger,
) error {
var imageMeta mTypes.ImageMeta
@ -260,6 +261,8 @@ func SetImageMetaFromInput(repo, reference, mediaType string, digest godigest.Di
err := json.Unmarshal(blob, &manifestContent)
if err != nil {
log.Error().Err(err).Msg("metadb: error while getting image data")
return err
}
@ -321,9 +324,9 @@ func SetImageMetaFromInput(repo, reference, mediaType string, digest godigest.Di
return nil
}
err := metaDB.SetRepoReference(repo, reference, imageMeta)
err := metaDB.SetRepoReference(ctx, repo, reference, imageMeta)
if err != nil {
log.Error().Err(err).Msg("metadb: error while putting repo meta")
log.Error().Err(err).Msg("metadb: error while setting repo meta")
return err
}

View file

@ -8,6 +8,7 @@ import (
"os"
"path"
"testing"
"time"
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
@ -116,7 +117,7 @@ func TestParseStorageErrors(t *testing.T) {
}
Convey("metaDB.SetRepoReference", func() {
metaDB.SetRepoReferenceFn = func(repo, reference string, imageMeta mTypes.ImageMeta) error {
metaDB.SetRepoReferenceFn = func(ctx context.Context, repo, reference string, imageMeta mTypes.ImageMeta) error {
return ErrTestError
}
@ -332,16 +333,16 @@ func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB) {
err := WriteImageToFileSystem(image, repo, "tag", storeController)
So(err, ShouldBeNil)
err = metaDB.SetRepoReference(repo, "tag", image.AsImageMeta())
err = metaDB.SetRepoReference(context.Background(), repo, "tag", image.AsImageMeta())
So(err, ShouldBeNil)
err = metaDB.IncrementRepoStars(repo)
So(err, ShouldBeNil)
err = metaDB.IncrementImageDownloads(repo, "tag")
err = metaDB.UpdateStatsOnDownload(repo, "tag")
So(err, ShouldBeNil)
err = metaDB.IncrementImageDownloads(repo, "tag")
err = metaDB.UpdateStatsOnDownload(repo, "tag")
So(err, ShouldBeNil)
err = metaDB.IncrementImageDownloads(repo, "tag")
err = metaDB.UpdateStatsOnDownload(repo, "tag")
So(err, ShouldBeNil)
repoMeta, err := metaDB.GetRepoMeta(context.Background(), repo)
@ -349,6 +350,7 @@ func RunParseStorageTests(rootDir string, metaDB mTypes.MetaDB) {
So(repoMeta.Statistics[image.DigestStr()].DownloadCount, ShouldEqual, 3)
So(repoMeta.StarCount, ShouldEqual, 1)
So(time.Now(), ShouldHappenAfter, repoMeta.Statistics[image.DigestStr()].LastPullTimestamp)
err = meta.ParseStorage(metaDB, storeController, log.NewLogger("debug", ""))
So(err, ShouldBeNil)

View file

@ -628,7 +628,10 @@ type DescriptorStatistics struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
DownloadCount int32 `protobuf:"varint,1,opt,name=DownloadCount,proto3" json:"DownloadCount,omitempty"`
DownloadCount int32 `protobuf:"varint,1,opt,name=DownloadCount,proto3" json:"DownloadCount,omitempty"`
LastPullTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=LastPullTimestamp,proto3" json:"LastPullTimestamp,omitempty"`
PushTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=PushTimestamp,proto3" json:"PushTimestamp,omitempty"`
PushedBy string `protobuf:"bytes,4,opt,name=PushedBy,proto3" json:"PushedBy,omitempty"`
}
func (x *DescriptorStatistics) Reset() {
@ -670,6 +673,27 @@ func (x *DescriptorStatistics) GetDownloadCount() int32 {
return 0
}
func (x *DescriptorStatistics) GetLastPullTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.LastPullTimestamp
}
return nil
}
func (x *DescriptorStatistics) GetPushTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.PushTimestamp
}
return nil
}
func (x *DescriptorStatistics) GetPushedBy() string {
if x != nil {
return x.PushedBy
}
return ""
}
type ReferrersInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -1165,67 +1189,77 @@ var file_meta_meta_proto_rawDesc = []byte{
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64,
0x61, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x4c, 0x61, 0x73, 0x74,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x22, 0x3c, 0x0a, 0x14, 0x44, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12,
0x24, 0x0a, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64,
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x3a, 0x0a, 0x0d, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65,
0x72, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x31, 0x2e, 0x52,
0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x6c, 0x69, 0x73,
0x74, 0x22, 0x9c, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x49, 0x6e,
0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x06, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6f,
0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x12, 0x1c, 0x0a, 0x09, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20,
0x01, 0x28, 0x09, 0x52, 0x09, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x22,
0x0a, 0x0c, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x04,
0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x54, 0x79,
0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x65,
0x74, 0x61, 0x5f, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x49, 0x6e,
0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x0b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x22, 0xe4, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x73, 0x63,
0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73,
0x12, 0x24, 0x0a, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x75, 0x6e,
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61,
0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x11, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x75,
0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x4c,
0x61, 0x73, 0x74, 0x50, 0x75, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x12, 0x40, 0x0a, 0x0d, 0x50, 0x75, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x52, 0x0d, 0x50, 0x75, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x75, 0x73, 0x68, 0x65, 0x64, 0x42, 0x79, 0x18, 0x04,
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x75, 0x73, 0x68, 0x65, 0x64, 0x42, 0x79, 0x22, 0x3a,
0x0a, 0x0d, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12,
0x29, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72,
0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x9c, 0x02, 0x0a, 0x0c, 0x52,
0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x44,
0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x69, 0x67,
0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01,
0x28, 0x03, 0x52, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x4d, 0x65, 0x64,
0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4d, 0x65,
0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x72, 0x74, 0x69, 0x66,
0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41,
0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x53,
0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12,
0x48, 0x0a, 0x0b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x31, 0x2e, 0x52,
0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f,
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x41, 0x6e,
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e,
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9d, 0x01, 0x0a, 0x12, 0x4d, 0x61,
0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
0x12, 0x36, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74,
0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x4f, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
0x22, 0x9d, 0x01, 0x0a, 0x12, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x53, 0x69, 0x67,
0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x31, 0x2e, 0x4d,
0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
0x73, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a,
0x4f, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d,
0x65, 0x74, 0x61, 0x5f, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
0x22, 0x3c, 0x0a, 0x0e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x49, 0x6e,
0x66, 0x6f, 0x12, 0x2a, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x16, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
0x74, 0x75, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x7e,
0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
0x38, 0x0a, 0x17, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x4d, 0x61, 0x6e, 0x69,
0x66, 0x65, 0x73, 0x74, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x17, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x4d, 0x61, 0x6e, 0x69, 0x66,
0x65, 0x73, 0x74, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x0a, 0x4c, 0x61, 0x79,
0x65, 0x72, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e,
0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x49, 0x6e,
0x66, 0x6f, 0x52, 0x0a, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xbe,
0x01, 0x0a, 0x0a, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x0a,
0x0b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12,
0x22, 0x0a, 0x0c, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
0x4b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x61,
0x74, 0x75, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x69, 0x67, 0x6e, 0x65,
0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12,
0x2e, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x44, 0x61, 0x74, 0x65, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x31, 0x2e,
0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3c, 0x0a, 0x0e, 0x53, 0x69, 0x67,
0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2a, 0x0a, 0x04, 0x6c,
0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x65, 0x74, 0x61,
0x5f, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x7e, 0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, 0x61,
0x74, 0x75, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x38, 0x0a, 0x17, 0x53, 0x69, 0x67, 0x6e,
0x61, 0x74, 0x75, 0x72, 0x65, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x44, 0x69, 0x67,
0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x53, 0x69, 0x67, 0x6e, 0x61,
0x74, 0x75, 0x72, 0x65, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x44, 0x69, 0x67, 0x65,
0x73, 0x74, 0x12, 0x33, 0x0a, 0x0a, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x66, 0x6f,
0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x76, 0x31,
0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x4c, 0x61, 0x79,
0x65, 0x72, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xbe, 0x01, 0x0a, 0x0a, 0x4c, 0x61, 0x79, 0x65,
0x72, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x0a, 0x0b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x44,
0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x4c, 0x61, 0x79,
0x65, 0x72, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x4c, 0x61, 0x79, 0x65,
0x72, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c,
0x4c, 0x61, 0x79, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c,
0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x4b, 0x65, 0x79,
0x12, 0x16, 0x0a, 0x06, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
0x52, 0x06, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x65,
0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x52, 0x04, 0x44, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -1286,23 +1320,25 @@ var file_meta_meta_proto_depIdxs = []int32{
19, // 12: meta_v1.RepoBlobs.Blobs:type_name -> meta_v1.RepoBlobs.BlobsEntry
26, // 13: meta_v1.BlobInfo.Platforms:type_name -> oci_v1.Platform
25, // 14: meta_v1.BlobInfo.LastUpdated:type_name -> google.protobuf.Timestamp
10, // 15: meta_v1.ReferrersInfo.list:type_name -> meta_v1.ReferrerInfo
20, // 16: meta_v1.ReferrerInfo.Annotations:type_name -> meta_v1.ReferrerInfo.AnnotationsEntry
21, // 17: meta_v1.ManifestSignatures.map:type_name -> meta_v1.ManifestSignatures.MapEntry
13, // 18: meta_v1.SignaturesInfo.list:type_name -> meta_v1.SignatureInfo
14, // 19: meta_v1.SignatureInfo.LayersInfo:type_name -> meta_v1.LayersInfo
25, // 20: meta_v1.LayersInfo.Date:type_name -> google.protobuf.Timestamp
0, // 21: meta_v1.RepoMeta.TagsEntry.value:type_name -> meta_v1.TagDescriptor
8, // 22: meta_v1.RepoMeta.StatisticsEntry.value:type_name -> meta_v1.DescriptorStatistics
11, // 23: meta_v1.RepoMeta.SignaturesEntry.value:type_name -> meta_v1.ManifestSignatures
9, // 24: meta_v1.RepoMeta.ReferrersEntry.value:type_name -> meta_v1.ReferrersInfo
7, // 25: meta_v1.RepoBlobs.BlobsEntry.value:type_name -> meta_v1.BlobInfo
12, // 26: meta_v1.ManifestSignatures.MapEntry.value:type_name -> meta_v1.SignaturesInfo
27, // [27:27] is the sub-list for method output_type
27, // [27:27] is the sub-list for method input_type
27, // [27:27] is the sub-list for extension type_name
27, // [27:27] is the sub-list for extension extendee
0, // [0:27] is the sub-list for field type_name
25, // 15: meta_v1.DescriptorStatistics.LastPullTimestamp:type_name -> google.protobuf.Timestamp
25, // 16: meta_v1.DescriptorStatistics.PushTimestamp:type_name -> google.protobuf.Timestamp
10, // 17: meta_v1.ReferrersInfo.list:type_name -> meta_v1.ReferrerInfo
20, // 18: meta_v1.ReferrerInfo.Annotations:type_name -> meta_v1.ReferrerInfo.AnnotationsEntry
21, // 19: meta_v1.ManifestSignatures.map:type_name -> meta_v1.ManifestSignatures.MapEntry
13, // 20: meta_v1.SignaturesInfo.list:type_name -> meta_v1.SignatureInfo
14, // 21: meta_v1.SignatureInfo.LayersInfo:type_name -> meta_v1.LayersInfo
25, // 22: meta_v1.LayersInfo.Date:type_name -> google.protobuf.Timestamp
0, // 23: meta_v1.RepoMeta.TagsEntry.value:type_name -> meta_v1.TagDescriptor
8, // 24: meta_v1.RepoMeta.StatisticsEntry.value:type_name -> meta_v1.DescriptorStatistics
11, // 25: meta_v1.RepoMeta.SignaturesEntry.value:type_name -> meta_v1.ManifestSignatures
9, // 26: meta_v1.RepoMeta.ReferrersEntry.value:type_name -> meta_v1.ReferrersInfo
7, // 27: meta_v1.RepoBlobs.BlobsEntry.value:type_name -> meta_v1.BlobInfo
12, // 28: meta_v1.ManifestSignatures.MapEntry.value:type_name -> meta_v1.SignaturesInfo
29, // [29:29] is the sub-list for method output_type
29, // [29:29] is the sub-list for method input_type
29, // [29:29] is the sub-list for extension type_name
29, // [29:29] is the sub-list for extension extendee
0, // [0:29] is the sub-list for field type_name
}
func init() { file_meta_meta_proto_init() }

View file

@ -76,6 +76,9 @@ message BlobInfo {
message DescriptorStatistics {
int32 DownloadCount = 1;
google.protobuf.Timestamp LastPullTimestamp = 2;
google.protobuf.Timestamp PushTimestamp = 3;
string PushedBy = 4;
}
message ReferrersInfo {
@ -112,4 +115,4 @@ message LayersInfo {
string Signer = 4;
google.protobuf.Timestamp Date = 5;
}
}

View file

@ -64,7 +64,7 @@ type MetaDB interface { //nolint:interfacebloat
SetImageMeta(digest godigest.Digest, imageMeta ImageMeta) error
// SetRepoReference sets the given image data to the repo metadata.
SetRepoReference(repo string, reference string, imageMeta ImageMeta) error
SetRepoReference(ctx context.Context, repo string, reference string, imageMeta ImageMeta) error
// SearchRepos searches for repos given a search string
SearchRepos(ctx context.Context, searchText string) ([]RepoMeta, error)
@ -116,8 +116,8 @@ type MetaDB interface { //nolint:interfacebloat
// artifact types.
GetReferrersInfo(repo string, referredDigest godigest.Digest, artifactTypes []string) ([]ReferrerInfo, error)
// IncrementImageDownloads adds 1 to the download count of an image
IncrementImageDownloads(repo string, reference string) error
// UpdateStatsOnDownload adds 1 to the download count of an image and sets the timestamp of download
UpdateStatsOnDownload(repo string, reference string) error
// FilterImageMeta returns the image data for the given digests
FilterImageMeta(ctx context.Context, digests []string) (map[string]ImageMeta, error)
@ -274,7 +274,10 @@ type Descriptor struct {
}
type DescriptorStatistics struct {
DownloadCount int
DownloadCount int
LastPullTimestamp time.Time
PushTimestamp time.Time
PushedBy string
}
type ManifestSignatures map[string][]SignatureInfo

View file

@ -0,0 +1,29 @@
package retention
import (
mTypes "zotregistry.io/zot/pkg/meta/types"
"zotregistry.io/zot/pkg/retention/types"
)
func GetCandidates(repoMeta mTypes.RepoMeta) []*types.Candidate {
candidates := make([]*types.Candidate, 0)
// collect all statistic of repo's manifests
for tag, desc := range repoMeta.Tags {
for digestStr, stats := range repoMeta.Statistics {
if digestStr == desc.Digest {
candidate := &types.Candidate{
MediaType: desc.MediaType,
DigestStr: digestStr,
Tag: tag,
PushTimestamp: stats.PushTimestamp,
PullTimestamp: stats.LastPullTimestamp,
}
candidates = append(candidates, candidate)
}
}
}
return candidates
}

39
pkg/retention/matcher.go Normal file
View file

@ -0,0 +1,39 @@
package retention
import "regexp"
type RegexMatcher struct {
compiled map[string]*regexp.Regexp
}
func NewRegexMatcher() *RegexMatcher {
return &RegexMatcher{
make(map[string]*regexp.Regexp, 0),
}
}
// MatchesListOfRegex is used by retention, it return true if list of regexes is empty.
func (r *RegexMatcher) MatchesListOfRegex(name string, regexes []string) bool {
if len(regexes) == 0 {
// empty regexes matches everything in retention logic
return true
}
for _, regex := range regexes {
if tagReg, ok := r.compiled[regex]; ok {
if tagReg.MatchString(name) {
return true
}
} else {
// all are compilable because they are checked at startup
if tagReg, err := regexp.Compile(regex); err == nil {
r.compiled[regex] = tagReg
if tagReg.MatchString(name) {
return true
}
}
}
}
return false
}

272
pkg/retention/retention.go Normal file
View file

@ -0,0 +1,272 @@
package retention
import (
"fmt"
glob "github.com/bmatcuk/doublestar/v4"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/api/config"
zcommon "zotregistry.io/zot/pkg/common"
zlog "zotregistry.io/zot/pkg/log"
mTypes "zotregistry.io/zot/pkg/meta/types"
"zotregistry.io/zot/pkg/retention/types"
)
const (
// reasons for gc.
filteredByTagRules = "didn't meet any tag retention rule"
filteredByTagNames = "didn't meet any tag 'patterns' rules"
// reasons for retention.
retainedStrFormat = "retained by %s policy"
)
type candidatesRules struct {
candidates []*types.Candidate
// tag retention rules
rules []types.Rule
}
type policyManager struct {
config config.ImageRetention
regex *RegexMatcher
log zlog.Logger
auditLog *zlog.Logger
}
func NewPolicyManager(config config.ImageRetention, log zlog.Logger, auditLog *zlog.Logger) policyManager {
return policyManager{
config: config,
regex: NewRegexMatcher(),
log: log,
auditLog: auditLog,
}
}
func (p policyManager) HasDeleteUntagged(repo string) bool {
if policy, err := p.getRepoPolicy(repo); err == nil {
if policy.DeleteUntagged != nil {
return *policy.DeleteUntagged
}
return true
}
// default
return false
}
func (p policyManager) HasDeleteReferrer(repo string) bool {
if policy, err := p.getRepoPolicy(repo); err == nil {
return policy.DeleteReferrers
}
// default
return false
}
func (p policyManager) HasTagRetention(repo string) bool {
if policy, err := p.getRepoPolicy(repo); err == nil {
return len(policy.KeepTags) > 0
}
// default
return false
}
func (p policyManager) getRules(tagPolicy config.KeepTagsPolicy) []types.Rule {
rules := make([]types.Rule, 0)
if tagPolicy.MostRecentlyPulledCount != 0 {
rules = append(rules, NewLatestPull(tagPolicy.MostRecentlyPulledCount))
}
if tagPolicy.MostRecentlyPushedCount != 0 {
rules = append(rules, NewLatestPush(tagPolicy.MostRecentlyPushedCount))
}
if tagPolicy.PulledWithin != nil {
rules = append(rules, NewDaysPull(*tagPolicy.PulledWithin))
}
if tagPolicy.PushedWithin != nil {
rules = append(rules, NewDaysPush(*tagPolicy.PushedWithin))
}
return rules
}
func (p policyManager) GetRetainedTags(repoMeta mTypes.RepoMeta, index ispec.Index) []string {
repo := repoMeta.Name
matchedByName := make([]string, 0)
candidates := GetCandidates(repoMeta)
retainTags := make([]string, 0)
// we need to make sure tags for which we can not find statistics in repoDB are not removed
actualTags := getIndexTags(index)
// find tags which are not in candidates list, if they are not in repoDB we want to keep them
for _, tag := range actualTags {
found := false
for _, candidate := range candidates {
if candidate.Tag == tag {
found = true
}
}
if !found {
p.log.Info().Str("module", "retention").
Bool("dry-run", p.config.DryRun).
Str("repository", repo).
Str("tag", tag).
Str("decision", "keep").
Str("reason", "tag statistics not found").Msg("will keep tag")
retainTags = append(retainTags, tag)
}
}
// group all tags by tag policy
grouped := p.groupCandidatesByTagPolicy(repo, candidates)
for _, candidates := range grouped {
retainCandidates := candidates.candidates // copy
// tag rules
rules := candidates.rules
for _, retainedByName := range retainCandidates {
matchedByName = append(matchedByName, retainedByName.Tag)
}
rulesCandidates := make([]*types.Candidate, 0)
// we retain candidates if any of the below rules are met (OR logic between rules)
for _, rule := range rules {
ruleCandidates := rule.Perform(retainCandidates)
rulesCandidates = append(rulesCandidates, ruleCandidates...)
}
// if we applied any rule
if len(rules) > 0 {
retainCandidates = rulesCandidates
} // else we retain just the one matching name rule
for _, retainCandidate := range retainCandidates {
// there may be duplicates
if !zcommon.Contains(retainTags, retainCandidate.Tag) {
// format reason log msg
reason := fmt.Sprintf(retainedStrFormat, retainCandidate.RetainedBy)
logAction(repo, "keep", reason, retainCandidate, p.config.DryRun, &p.log)
retainTags = append(retainTags, retainCandidate.Tag)
}
}
}
// log tags which will be removed
for _, candidateInfo := range candidates {
if !zcommon.Contains(retainTags, candidateInfo.Tag) {
var reason string
if zcommon.Contains(matchedByName, candidateInfo.Tag) {
reason = filteredByTagRules
} else {
reason = filteredByTagNames
}
logAction(repo, "delete", reason, candidateInfo, p.config.DryRun, &p.log)
if p.auditLog != nil {
logAction(repo, "delete", reason, candidateInfo, p.config.DryRun, p.auditLog)
}
}
}
return retainTags
}
func (p policyManager) getRepoPolicy(repo string) (config.RetentionPolicy, error) {
for _, policy := range p.config.Policies {
for _, pattern := range policy.Repositories {
matched, err := glob.Match(pattern, repo)
if err == nil && matched {
return policy, nil
}
}
}
return config.RetentionPolicy{}, zerr.ErrRetentionPolicyNotFound
}
func (p policyManager) getTagPolicy(tag string, tagPolicies []config.KeepTagsPolicy,
) (config.KeepTagsPolicy, int, error) {
for idx, tagPolicy := range tagPolicies {
if p.regex.MatchesListOfRegex(tag, tagPolicy.Patterns) {
return tagPolicy, idx, nil
}
}
return config.KeepTagsPolicy{}, -1, zerr.ErrRetentionPolicyNotFound
}
// groups candidates by tag policies, tags which don't match any policy are automatically excluded from this map.
func (p policyManager) groupCandidatesByTagPolicy(repo string, candidates []*types.Candidate,
) map[int]candidatesRules {
candidatesByTagPolicy := make(map[int]candidatesRules)
// no need to check for error, at this point we have both repo policy for this repo and non nil tags policy
repoPolicy, _ := p.getRepoPolicy(repo)
for _, candidateInfo := range candidates {
tagPolicy, tagPolicyID, err := p.getTagPolicy(candidateInfo.Tag, repoPolicy.KeepTags)
if err != nil {
// no tag policy found for the current candidate, skip it (will be gc'ed)
continue
}
candidateInfo.RetainedBy = "patterns"
if _, ok := candidatesByTagPolicy[tagPolicyID]; !ok {
candidatesRules := candidatesRules{candidates: []*types.Candidate{candidateInfo}}
candidatesRules.rules = p.getRules(tagPolicy)
candidatesByTagPolicy[tagPolicyID] = candidatesRules
} else {
candidatesRules := candidatesByTagPolicy[tagPolicyID]
candidatesRules.candidates = append(candidatesRules.candidates, candidateInfo)
candidatesByTagPolicy[tagPolicyID] = candidatesRules
}
}
return candidatesByTagPolicy
}
func logAction(repo, decision, reason string, candidate *types.Candidate, dryRun bool, log *zlog.Logger) {
log.Info().Str("module", "retention").
Bool("dry-run", dryRun).
Str("repository", repo).
Str("mediaType", candidate.MediaType).
Str("digest", candidate.DigestStr).
Str("tag", candidate.Tag).
Str("lastPullTimestamp", candidate.PullTimestamp.String()).
Str("pushTimestamp", candidate.PushTimestamp.String()).
Str("decision", decision).
Str("reason", reason).Msg("applied policy")
}
func getIndexTags(index ispec.Index) []string {
tags := make([]string, 0)
for _, desc := range index.Manifests {
tag, ok := desc.Annotations[ispec.AnnotationRefName]
if ok {
tags = append(tags, tag)
}
}
return tags
}

140
pkg/retention/rules.go Normal file
View file

@ -0,0 +1,140 @@
package retention
import (
"fmt"
"sort"
"time"
"zotregistry.io/zot/pkg/retention/types"
)
const (
// rules name.
daysPullName = "pulledWithin"
daysPushName = "pushedWithin"
latestPullName = "mostRecentlyPulledCount"
latestPushName = "mostRecentlyPushedCount"
)
// rules implementatio
type DaysPull struct {
duration time.Duration
}
func NewDaysPull(duration time.Duration) DaysPull {
return DaysPull{duration: duration}
}
func (dp DaysPull) Name() string {
return fmt.Sprintf("%s:%d", daysPullName, dp.duration)
}
func (dp DaysPull) Perform(candidates []*types.Candidate) []*types.Candidate {
filtered := make([]*types.Candidate, 0)
timestamp := time.Now().Add(-dp.duration)
for _, candidate := range candidates {
// we check pushtimestamp because we don't want to delete tags pushed after timestamp
// ie: if the tag doesn't meet PulledWithin: "3days" and the image is 1day old then do not remove!
if candidate.PullTimestamp.After(timestamp) || candidate.PushTimestamp.After(timestamp) {
candidate.RetainedBy = dp.Name()
filtered = append(filtered, candidate)
}
}
return filtered
}
type DaysPush struct {
duration time.Duration
}
func NewDaysPush(duration time.Duration) DaysPush {
return DaysPush{duration: duration}
}
func (dp DaysPush) Name() string {
return fmt.Sprintf("%s:%d", daysPushName, dp.duration)
}
func (dp DaysPush) Perform(candidates []*types.Candidate) []*types.Candidate {
filtered := make([]*types.Candidate, 0)
timestamp := time.Now().Add(-dp.duration)
for _, candidate := range candidates {
if candidate.PushTimestamp.After(timestamp) {
candidate.RetainedBy = dp.Name()
filtered = append(filtered, candidate)
}
}
return filtered
}
type latestPull struct {
count int
}
func NewLatestPull(count int) latestPull {
return latestPull{count: count}
}
func (lp latestPull) Name() string {
return fmt.Sprintf("%s:%d", latestPullName, lp.count)
}
func (lp latestPull) Perform(candidates []*types.Candidate) []*types.Candidate {
sort.Slice(candidates, func(i, j int) bool {
return candidates[i].PullTimestamp.After(candidates[j].PullTimestamp)
})
// take top count candidates
upper := lp.count
if lp.count > len(candidates) {
upper = len(candidates)
}
candidates = candidates[:upper]
for _, candidate := range candidates {
candidate.RetainedBy = lp.Name()
}
return candidates
}
type latestPush struct {
count int
}
func NewLatestPush(count int) latestPush {
return latestPush{count: count}
}
func (lp latestPush) Name() string {
return fmt.Sprintf("%s:%d", latestPushName, lp.count)
}
func (lp latestPush) Perform(candidates []*types.Candidate) []*types.Candidate {
sort.Slice(candidates, func(i, j int) bool {
return candidates[i].PushTimestamp.After(candidates[j].PushTimestamp)
})
// take top count candidates
upper := lp.count
if lp.count > len(candidates) {
upper = len(candidates)
}
candidates = candidates[:upper]
for _, candidate := range candidates {
candidate.RetainedBy = lp.Name()
}
return candidates
}

View file

@ -0,0 +1,30 @@
package types
import (
"time"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
mTypes "zotregistry.io/zot/pkg/meta/types"
)
type Candidate struct {
DigestStr string
MediaType string
Tag string
PushTimestamp time.Time
PullTimestamp time.Time
RetainedBy string
}
type PolicyManager interface {
HasDeleteReferrer(repo string) bool
HasDeleteUntagged(repo string) bool
HasTagRetention(repo string) bool
GetRetainedTags(repoMeta mTypes.RepoMeta, index ispec.Index) []string
}
type Rule interface {
Name() string
Perform(candidates []*Candidate) []*Candidate
}

View file

@ -6,22 +6,22 @@ import (
const (
// BlobUploadDir defines the upload directory for blob uploads.
BlobUploadDir = ".uploads"
SchemaVersion = 2
DefaultFilePerms = 0o600
DefaultDirPerms = 0o700
RLOCK = "RLock"
RWLOCK = "RWLock"
BlobsCache = "blobs"
DuplicatesBucket = "duplicates"
OriginalBucket = "original"
DBExtensionName = ".db"
DBCacheLockCheckTimeout = 10 * time.Second
BoltdbName = "cache"
DynamoDBDriverName = "dynamodb"
DefaultGCDelay = 1 * time.Hour
DefaultUntaggedImgeRetentionDelay = 24 * time.Hour
DefaultGCInterval = 1 * time.Hour
S3StorageDriverName = "s3"
LocalStorageDriverName = "local"
BlobUploadDir = ".uploads"
SchemaVersion = 2
DefaultFilePerms = 0o600
DefaultDirPerms = 0o700
RLOCK = "RLock"
RWLOCK = "RWLock"
BlobsCache = "blobs"
DuplicatesBucket = "duplicates"
OriginalBucket = "original"
DBExtensionName = ".db"
DBCacheLockCheckTimeout = 10 * time.Second
BoltdbName = "cache"
DynamoDBDriverName = "dynamodb"
DefaultGCDelay = 1 * time.Hour
DefaultRetentionDelay = 24 * time.Hour
DefaultGCInterval = 1 * time.Hour
S3StorageDriverName = "s3"
LocalStorageDriverName = "local"
)

View file

@ -15,9 +15,12 @@ import (
oras "github.com/oras-project/artifacts-spec/specs-go/v1"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/api/config"
zcommon "zotregistry.io/zot/pkg/common"
zlog "zotregistry.io/zot/pkg/log"
mTypes "zotregistry.io/zot/pkg/meta/types"
"zotregistry.io/zot/pkg/retention"
rTypes "zotregistry.io/zot/pkg/retention/types"
"zotregistry.io/zot/pkg/scheduler"
"zotregistry.io/zot/pkg/storage"
common "zotregistry.io/zot/pkg/storage/common"
@ -30,28 +33,31 @@ const (
)
type Options struct {
// will garbage collect referrers with missing subject older than Delay
Referrers bool
// will garbage collect blobs older than Delay
Delay time.Duration
// will garbage collect untagged manifests older than RetentionDelay
RetentionDelay time.Duration
ImageRetention config.ImageRetention
}
type GarbageCollect struct {
imgStore types.ImageStore
opts Options
metaDB mTypes.MetaDB
log zlog.Logger
imgStore types.ImageStore
opts Options
metaDB mTypes.MetaDB
policyMgr rTypes.PolicyManager
auditLog *zlog.Logger
log zlog.Logger
}
func NewGarbageCollect(imgStore types.ImageStore, metaDB mTypes.MetaDB, opts Options, log zlog.Logger,
func NewGarbageCollect(imgStore types.ImageStore, metaDB mTypes.MetaDB, opts Options,
auditLog *zlog.Logger, log zlog.Logger,
) GarbageCollect {
return GarbageCollect{
imgStore: imgStore,
metaDB: metaDB,
opts: opts,
log: log,
imgStore: imgStore,
metaDB: metaDB,
opts: opts,
policyMgr: retention.NewPolicyManager(opts.ImageRetention, log, auditLog),
auditLog: auditLog,
log: log,
}
}
@ -75,17 +81,20 @@ It also gc referrers with missing subject if the Referrer Option is enabled
It also gc untagged manifests.
*/
func (gc GarbageCollect) CleanRepo(repo string) error {
gc.log.Info().Msg(fmt.Sprintf("executing GC of orphaned blobs for %s", path.Join(gc.imgStore.RootDir(), repo)))
gc.log.Info().Str("module", "gc").
Msg(fmt.Sprintf("executing GC of orphaned blobs for %s", path.Join(gc.imgStore.RootDir(), repo)))
if err := gc.cleanRepo(repo); err != nil {
errMessage := fmt.Sprintf("error while running GC for %s", path.Join(gc.imgStore.RootDir(), repo))
gc.log.Error().Err(err).Msg(errMessage)
gc.log.Info().Msg(fmt.Sprintf("GC unsuccessfully completed for %s", path.Join(gc.imgStore.RootDir(), repo)))
gc.log.Error().Err(err).Str("module", "gc").Msg(errMessage)
gc.log.Info().Str("module", "gc").
Msg(fmt.Sprintf("GC unsuccessfully completed for %s", path.Join(gc.imgStore.RootDir(), repo)))
return err
}
gc.log.Info().Msg(fmt.Sprintf("GC successfully completed for %s", path.Join(gc.imgStore.RootDir(), repo)))
gc.log.Info().Str("module", "gc").
Msg(fmt.Sprintf("GC successfully completed for %s", path.Join(gc.imgStore.RootDir(), repo)))
return nil
}
@ -112,28 +121,39 @@ func (gc GarbageCollect) cleanRepo(repo string) error {
*/
index, err := common.GetIndex(gc.imgStore, repo, gc.log)
if err != nil {
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("unable to read index.json in repo")
return err
}
// apply tags retention
if err := gc.removeTagsPerRetentionPolicy(repo, &index); err != nil {
return err
}
// gc referrers manifests with missing subject and untagged manifests
if err := gc.cleanManifests(repo, &index); err != nil {
if err := gc.removeManifestsPerRepoPolicy(repo, &index); err != nil {
return err
}
// update repos's index.json in storage
if err := gc.imgStore.PutIndexContent(repo, index); err != nil {
return err
if !gc.opts.ImageRetention.DryRun {
/* this will update the index.json with manifests deleted above
and the manifests blobs will be removed by gc.removeUnreferencedBlobs()*/
if err := gc.imgStore.PutIndexContent(repo, index); err != nil {
return err
}
}
// gc unreferenced blobs
if err := gc.cleanBlobs(repo, index, gc.opts.Delay, gc.log); err != nil {
if err := gc.removeUnreferencedBlobs(repo, gc.opts.Delay, gc.log); err != nil {
return err
}
return nil
}
func (gc GarbageCollect) cleanManifests(repo string, index *ispec.Index) error {
func (gc GarbageCollect) removeManifestsPerRepoPolicy(repo string, index *ispec.Index) error {
var err error
/* gc all manifests that have a missing subject, stop when neither gc(referrer and untagged)
@ -142,32 +162,36 @@ func (gc GarbageCollect) cleanManifests(repo string, index *ispec.Index) error {
for !stop {
var gcedReferrer bool
if gc.opts.Referrers {
gc.log.Debug().Str("repository", repo).Msg("gc: manifests with missing referrers")
var gcedUntagged bool
gcedReferrer, err = gc.cleanIndexReferrers(repo, index, *index)
if gc.policyMgr.HasDeleteReferrer(repo) {
gc.log.Debug().Str("module", "gc").Str("repository", repo).Msg("manifests with missing referrers")
gcedReferrer, err = gc.removeIndexReferrers(repo, index, *index)
if err != nil {
return err
}
}
referenced := make(map[godigest.Digest]bool, 0)
if gc.policyMgr.HasDeleteUntagged(repo) {
referenced := make(map[godigest.Digest]bool, 0)
/* gather all manifests referenced in multiarch images/by other manifests
so that we can skip them in cleanUntaggedManifests */
if err := gc.identifyManifestsReferencedInIndex(*index, repo, referenced); err != nil {
return err
}
/* gather all manifests referenced in multiarch images/by other manifests
so that we can skip them in cleanUntaggedManifests */
if err := gc.identifyManifestsReferencedInIndex(*index, repo, referenced); err != nil {
return err
}
// apply image retention policy
gcedManifest, err := gc.cleanUntaggedManifests(repo, index, referenced)
if err != nil {
return err
// apply image retention policy
gcedUntagged, err = gc.removeUntaggedManifests(repo, index, referenced)
if err != nil {
return err
}
}
/* if we gced any manifest then loop again and gc manifests with
a subject pointing to the last ones which were gced. */
stop = !gcedReferrer && !gcedManifest
stop = !gcedReferrer && !gcedUntagged
}
return nil
@ -179,7 +203,7 @@ garbageCollectIndexReferrers will gc all referrers with a missing subject recurs
rootIndex is indexJson, need to pass it down to garbageCollectReferrer()
rootIndex is the place we look for referrers.
*/
func (gc GarbageCollect) cleanIndexReferrers(repo string, rootIndex *ispec.Index, index ispec.Index,
func (gc GarbageCollect) removeIndexReferrers(repo string, rootIndex *ispec.Index, index ispec.Index,
) (bool, error) {
var count int
@ -190,13 +214,13 @@ func (gc GarbageCollect) cleanIndexReferrers(repo string, rootIndex *ispec.Index
case ispec.MediaTypeImageIndex:
indexImage, err := common.GetImageIndex(gc.imgStore, repo, desc.Digest, gc.log)
if err != nil {
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
Msg("gc: failed to read multiarch(index) image")
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", desc.Digest.String()).
Msg("failed to read multiarch(index) image")
return false, err
}
gced, err := gc.cleanReferrer(repo, rootIndex, desc, indexImage.Subject, indexImage.ArtifactType)
gced, err := gc.removeReferrer(repo, rootIndex, desc, indexImage.Subject, indexImage.ArtifactType)
if err != nil {
return false, err
}
@ -208,7 +232,7 @@ func (gc GarbageCollect) cleanIndexReferrers(repo string, rootIndex *ispec.Index
return true, nil
}
gced, err = gc.cleanIndexReferrers(repo, rootIndex, indexImage)
gced, err = gc.removeIndexReferrers(repo, rootIndex, indexImage)
if err != nil {
return false, err
}
@ -219,15 +243,15 @@ func (gc GarbageCollect) cleanIndexReferrers(repo string, rootIndex *ispec.Index
case ispec.MediaTypeImageManifest, oras.MediaTypeArtifactManifest:
image, err := common.GetImageManifest(gc.imgStore, repo, desc.Digest, gc.log)
if err != nil {
gc.log.Error().Err(err).Str("repo", repo).Str("digest", desc.Digest.String()).
Msg("gc: failed to read manifest image")
gc.log.Error().Err(err).Str("module", "gc").Str("repo", repo).Str("digest", desc.Digest.String()).
Msg("failed to read manifest image")
return false, err
}
artifactType := zcommon.GetManifestArtifactType(image)
gced, err := gc.cleanReferrer(repo, rootIndex, desc, image.Subject, artifactType)
gced, err := gc.removeReferrer(repo, rootIndex, desc, image.Subject, artifactType)
if err != nil {
return false, err
}
@ -241,7 +265,7 @@ func (gc GarbageCollect) cleanIndexReferrers(repo string, rootIndex *ispec.Index
return count > 0, err
}
func (gc GarbageCollect) cleanReferrer(repo string, index *ispec.Index, manifestDesc ispec.Descriptor,
func (gc GarbageCollect) removeReferrer(repo string, index *ispec.Index, manifestDesc ispec.Descriptor,
subject *ispec.Descriptor, artifactType string,
) (bool, error) {
var gced bool
@ -259,18 +283,35 @@ func (gc GarbageCollect) cleanReferrer(repo string, index *ispec.Index, manifest
}
if !referenced {
gced, err = gc.gcManifest(repo, index, manifestDesc, signatureType, subject.Digest, gc.opts.Delay)
gced, err = gc.gcManifest(repo, index, manifestDesc, signatureType, subject.Digest, gc.opts.ImageRetention.Delay)
if err != nil {
return false, err
}
if gced {
gc.log.Info().Str("module", "gc").
Str("repository", repo).
Str("reference", manifestDesc.Digest.String()).
Str("subject", subject.Digest.String()).
Str("decision", "delete").
Str("reason", "deleteReferrers").Msg("removed manifest without reference")
if gc.auditLog != nil {
gc.auditLog.Info().Str("module", "gc").
Str("repository", repo).
Str("reference", manifestDesc.Digest.String()).
Str("subject", subject.Digest.String()).
Str("decision", "delete").
Str("reason", "deleteReferrers").Msg("removed manifest without reference")
}
}
}
}
// cosign
tag, ok := manifestDesc.Annotations[ispec.AnnotationRefName]
tag, ok := getDescriptorTag(manifestDesc)
if ok {
if strings.HasPrefix(tag, "sha256-") && (strings.HasSuffix(tag, cosignSignatureTagSuffix) ||
strings.HasSuffix(tag, SBOMTagSuffix)) {
if isCosignTag(tag) {
subjectDigest := getSubjectFromCosignTag(tag)
referenced := isManifestReferencedInIndex(index, subjectDigest)
@ -279,6 +320,26 @@ func (gc GarbageCollect) cleanReferrer(repo string, index *ispec.Index, manifest
if err != nil {
return false, err
}
if gced {
gc.log.Info().Str("module", "gc").
Bool("dry-run", gc.opts.ImageRetention.DryRun).
Str("repository", repo).
Str("reference", tag).
Str("subject", subjectDigest.String()).
Str("decision", "delete").
Str("reason", "deleteReferrers").Msg("removed cosign manifest without reference")
if gc.auditLog != nil {
gc.auditLog.Info().Str("module", "gc").
Bool("dry-run", gc.opts.ImageRetention.DryRun).
Str("repository", repo).
Str("reference", tag).
Str("subject", subjectDigest.String()).
Str("decision", "delete").
Str("reason", "deleteReferrers").Msg("removed cosign manifest without reference")
}
}
}
}
}
@ -286,6 +347,36 @@ func (gc GarbageCollect) cleanReferrer(repo string, index *ispec.Index, manifest
return gced, nil
}
func (gc GarbageCollect) removeTagsPerRetentionPolicy(repo string, index *ispec.Index) error {
if !gc.policyMgr.HasTagRetention(repo) {
return nil
}
repoMeta, err := gc.metaDB.GetRepoMeta(context.Background(), repo)
if err != nil {
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("can't retrieve repoMeta for repo")
return err
}
retainTags := gc.policyMgr.GetRetainedTags(repoMeta, *index)
// remove
for _, desc := range index.Manifests {
// check tag
tag, ok := getDescriptorTag(desc)
if ok && !zcommon.Contains(retainTags, tag) {
// remove tags which should not be retained
_, err := gc.removeManifest(repo, index, desc, tag, "", "")
if err != nil && !errors.Is(err, zerr.ErrManifestNotFound) {
return err
}
}
}
return nil
}
// gcManifest removes a manifest entry from an index and syncs metaDB accordingly if the blob is older than gc.Delay.
func (gc GarbageCollect) gcManifest(repo string, index *ispec.Index, desc ispec.Descriptor,
signatureType string, subjectDigest godigest.Digest, delay time.Duration,
@ -294,14 +385,14 @@ func (gc GarbageCollect) gcManifest(repo string, index *ispec.Index, desc ispec.
canGC, err := isBlobOlderThan(gc.imgStore, repo, desc.Digest, delay, gc.log)
if err != nil {
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
Str("delay", gc.opts.Delay.String()).Msg("gc: failed to check if blob is older than delay")
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", desc.Digest.String()).
Str("delay", delay.String()).Msg("failed to check if blob is older than delay")
return false, err
}
if canGC {
if gced, err = gc.removeManifest(repo, index, desc, signatureType, subjectDigest); err != nil {
if gced, err = gc.removeManifest(repo, index, desc, desc.Digest.String(), signatureType, subjectDigest); err != nil {
return false, err
}
}
@ -311,12 +402,9 @@ func (gc GarbageCollect) gcManifest(repo string, index *ispec.Index, desc ispec.
// removeManifest removes a manifest entry from an index and syncs metaDB accordingly.
func (gc GarbageCollect) removeManifest(repo string, index *ispec.Index,
desc ispec.Descriptor, signatureType string, subjectDigest godigest.Digest,
desc ispec.Descriptor, reference string, signatureType string, subjectDigest godigest.Digest,
) (bool, error) {
gc.log.Debug().Str("repository", repo).Str("digest", desc.Digest.String()).Msg("gc: removing manifest")
// remove from index
_, err := common.RemoveManifestDescByReference(index, desc.Digest.String(), true)
_, err := common.RemoveManifestDescByReference(index, reference, true)
if err != nil {
if errors.Is(err, zerr.ErrManifestConflict) {
return false, nil
@ -325,6 +413,10 @@ func (gc GarbageCollect) removeManifest(repo string, index *ispec.Index,
return false, err
}
if gc.opts.ImageRetention.DryRun {
return true, nil
}
// sync metaDB
if gc.metaDB != nil {
if signatureType != "" {
@ -333,14 +425,14 @@ func (gc GarbageCollect) removeManifest(repo string, index *ispec.Index,
SignatureType: signatureType,
})
if err != nil {
gc.log.Error().Err(err).Msg("gc,metadb: unable to remove signature in metaDB")
gc.log.Error().Err(err).Str("module", "gc").Msg("metadb: unable to remove signature in metaDB")
return false, err
}
} else {
err := gc.metaDB.RemoveRepoReference(repo, desc.Digest.String(), desc.Digest)
err := gc.metaDB.RemoveRepoReference(repo, reference, desc.Digest)
if err != nil {
gc.log.Error().Err(err).Msg("gc, metadb: unable to remove repo reference in metaDB")
gc.log.Error().Err(err).Str("module", "gc").Msg("metadb: unable to remove repo reference in metaDB")
return false, err
}
@ -350,16 +442,15 @@ func (gc GarbageCollect) removeManifest(repo string, index *ispec.Index,
return true, nil
}
func (gc GarbageCollect) cleanUntaggedManifests(repo string, index *ispec.Index,
func (gc GarbageCollect) removeUntaggedManifests(repo string, index *ispec.Index,
referenced map[godigest.Digest]bool,
) (bool, error) {
var gced bool
var err error
gc.log.Debug().Str("repository", repo).Msg("gc: manifests without tags")
gc.log.Debug().Str("module", "gc").Str("repository", repo).Msg("manifests without tags")
// first gather manifests part of image indexes and referrers, we want to skip checking them
for _, desc := range index.Manifests {
// skip manifests referenced in image indexes
if _, referenced := referenced[desc.Digest]; referenced {
@ -368,12 +459,30 @@ func (gc GarbageCollect) cleanUntaggedManifests(repo string, index *ispec.Index,
// remove untagged images
if desc.MediaType == ispec.MediaTypeImageManifest || desc.MediaType == ispec.MediaTypeImageIndex {
_, ok := desc.Annotations[ispec.AnnotationRefName]
_, ok := getDescriptorTag(desc)
if !ok {
gced, err = gc.gcManifest(repo, index, desc, "", "", gc.opts.RetentionDelay)
gced, err = gc.gcManifest(repo, index, desc, "", "", gc.opts.ImageRetention.Delay)
if err != nil {
return false, err
}
if gced {
gc.log.Info().Str("module", "gc").
Bool("dry-run", gc.opts.ImageRetention.DryRun).
Str("repository", repo).
Str("reference", desc.Digest.String()).
Str("decision", "delete").
Str("reason", "deleteUntagged").Msg("removed untagged manifest")
if gc.auditLog != nil {
gc.auditLog.Info().Str("module", "gc").
Bool("dry-run", gc.opts.ImageRetention.DryRun).
Str("repository", repo).
Str("reference", desc.Digest.String()).
Str("decision", "delete").
Str("reason", "deleteUntagged").Msg("removed untagged manifest")
}
}
}
}
}
@ -390,8 +499,8 @@ func (gc GarbageCollect) identifyManifestsReferencedInIndex(index ispec.Index, r
case ispec.MediaTypeImageIndex:
indexImage, err := common.GetImageIndex(gc.imgStore, repo, desc.Digest, gc.log)
if err != nil {
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
Msg("gc: failed to read multiarch(index) image")
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", desc.Digest.String()).Msg("failed to read multiarch(index) image")
return err
}
@ -410,8 +519,8 @@ func (gc GarbageCollect) identifyManifestsReferencedInIndex(index ispec.Index, r
case ispec.MediaTypeImageManifest, oras.MediaTypeArtifactManifest:
image, err := common.GetImageManifest(gc.imgStore, repo, desc.Digest, gc.log)
if err != nil {
gc.log.Error().Err(err).Str("repo", repo).Str("digest", desc.Digest.String()).
Msg("gc: failed to read manifest image")
gc.log.Error().Err(err).Str("module", "gc").Str("repo", repo).
Str("digest", desc.Digest.String()).Msg("failed to read manifest image")
return err
}
@ -425,17 +534,23 @@ func (gc GarbageCollect) identifyManifestsReferencedInIndex(index ispec.Index, r
return nil
}
// cleanBlobs gc all blobs which are not referenced by any manifest found in repo's index.json.
func (gc GarbageCollect) cleanBlobs(repo string, index ispec.Index,
delay time.Duration, log zlog.Logger,
// removeUnreferencedBlobs gc all blobs which are not referenced by any manifest found in repo's index.json.
func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duration, log zlog.Logger,
) error {
gc.log.Debug().Str("repository", repo).Msg("gc: blobs")
gc.log.Debug().Str("module", "gc").Str("repository", repo).Msg("cleaning orphan blobs")
refBlobs := map[string]bool{}
err := gc.addIndexBlobsToReferences(repo, index, refBlobs)
index, err := common.GetIndex(gc.imgStore, repo, gc.log)
if err != nil {
log.Error().Err(err).Str("repository", repo).Msg("gc: unable to get referenced blobs in repo")
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("unable to read index.json in repo")
return err
}
err = gc.addIndexBlobsToReferences(repo, index, refBlobs)
if err != nil {
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("unable to get referenced blobs in repo")
return err
}
@ -447,7 +562,7 @@ func (gc GarbageCollect) cleanBlobs(repo string, index ispec.Index,
return nil
}
log.Error().Err(err).Str("repository", repo).Msg("gc: unable to get all blobs")
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("unable to get all blobs")
return err
}
@ -457,7 +572,8 @@ func (gc GarbageCollect) cleanBlobs(repo string, index ispec.Index,
for _, blob := range allBlobs {
digest := godigest.NewDigestFromEncoded(godigest.SHA256, blob)
if err = digest.Validate(); err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", blob).Msg("gc: unable to parse digest")
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", blob).
Msg("unable to parse digest")
return err
}
@ -465,7 +581,8 @@ func (gc GarbageCollect) cleanBlobs(repo string, index ispec.Index,
if _, ok := refBlobs[digest.String()]; !ok {
canGC, err := isBlobOlderThan(gc.imgStore, repo, digest, delay, log)
if err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", blob).Msg("gc: unable to determine GC delay")
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", blob).
Msg("unable to determine GC delay")
return err
}
@ -484,11 +601,13 @@ func (gc GarbageCollect) cleanBlobs(repo string, index ispec.Index,
return err
}
log.Info().Str("repository", repo).Int("count", reaped).Msg("gc: garbage collected blobs")
log.Info().Str("module", "gc").Str("repository", repo).Int("count", reaped).
Msg("garbage collected blobs")
return nil
}
// used by removeUnreferencedBlobs()
// addIndexBlobsToReferences adds referenced blobs found in referenced manifests (index.json) in refblobs map.
func (gc GarbageCollect) addIndexBlobsToReferences(repo string, index ispec.Index, refBlobs map[string]bool,
) error {
@ -496,22 +615,22 @@ func (gc GarbageCollect) addIndexBlobsToReferences(repo string, index ispec.Inde
switch desc.MediaType {
case ispec.MediaTypeImageIndex:
if err := gc.addImageIndexBlobsToReferences(repo, desc.Digest, refBlobs); err != nil {
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
Msg("gc: failed to read blobs in multiarch(index) image")
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", desc.Digest.String()).Msg("failed to read blobs in multiarch(index) image")
return err
}
case ispec.MediaTypeImageManifest:
if err := gc.addImageManifestBlobsToReferences(repo, desc.Digest, refBlobs); err != nil {
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
Msg("gc: failed to read blobs in image manifest")
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", desc.Digest.String()).Msg("failed to read blobs in image manifest")
return err
}
case oras.MediaTypeArtifactManifest:
if err := gc.addORASImageManifestBlobsToReferences(repo, desc.Digest, refBlobs); err != nil {
gc.log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
Msg("gc: failed to read blobs in ORAS image manifest")
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", desc.Digest.String()).Msg("failed to read blobs in ORAS image manifest")
return err
}
@ -525,8 +644,8 @@ func (gc GarbageCollect) addImageIndexBlobsToReferences(repo string, mdigest god
) error {
index, err := common.GetImageIndex(gc.imgStore, repo, mdigest, gc.log)
if err != nil {
gc.log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
Msg("gc: failed to read manifest image")
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", mdigest.String()).
Msg("failed to read manifest image")
return err
}
@ -550,8 +669,8 @@ func (gc GarbageCollect) addImageManifestBlobsToReferences(repo string, mdigest
) error {
manifestContent, err := common.GetImageManifest(gc.imgStore, repo, mdigest, gc.log)
if err != nil {
gc.log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
Msg("gc: failed to read manifest image")
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", mdigest.String()).Msg("failed to read manifest image")
return err
}
@ -576,8 +695,8 @@ func (gc GarbageCollect) addORASImageManifestBlobsToReferences(repo string, mdig
) error {
manifestContent, err := common.GetOrasManifestByDigest(gc.imgStore, repo, mdigest, gc.log)
if err != nil {
gc.log.Error().Err(err).Str("repository", repo).Str("digest", mdigest.String()).
Msg("gc: failed to read manifest image")
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", mdigest.String()).Msg("failed to read manifest image")
return err
}
@ -611,8 +730,8 @@ func isBlobOlderThan(imgStore types.ImageStore, repo string,
) (bool, error) {
_, _, modtime, err := imgStore.StatBlob(repo, digest)
if err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).
Msg("gc: failed to stat blob")
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", digest.String()).
Msg("failed to stat blob")
return false, err
}
@ -631,6 +750,22 @@ func getSubjectFromCosignTag(tag string) godigest.Digest {
return godigest.NewDigestFromEncoded(godigest.Algorithm(alg), encoded)
}
func getDescriptorTag(desc ispec.Descriptor) (string, bool) {
tag, ok := desc.Annotations[ispec.AnnotationRefName]
return tag, ok
}
// this function will check if tag is a cosign tag (signature or sbom).
func isCosignTag(tag string) bool {
if strings.HasPrefix(tag, "sha256-") &&
(strings.HasSuffix(tag, cosignSignatureTagSuffix) || strings.HasSuffix(tag, SBOMTagSuffix)) {
return true
}
return false
}
/*
GCTaskGenerator takes all repositories found in the storage.imagestore
@ -704,5 +839,5 @@ func NewGCTask(imgStore types.ImageStore, gc GarbageCollect, repo string,
func (gct *gcTask) DoWork(ctx context.Context) error {
// run task
return gct.gc.CleanRepo(gct.repo)
return gct.gc.CleanRepo(gct.repo) //nolint: contextcheck
}

View file

@ -2,6 +2,7 @@ package gc
import (
"bytes"
"context"
"encoding/json"
"errors"
"os"
@ -12,12 +13,12 @@ import (
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
"github.com/rs/zerolog"
. "github.com/smartystreets/goconvey/convey"
"zotregistry.io/zot/pkg/api/config"
zcommon "zotregistry.io/zot/pkg/common"
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
zlog "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/types"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
@ -37,8 +38,11 @@ func TestGarbageCollectManifestErrors(t *testing.T) {
Convey("Make imagestore and upload manifest", t, func(c C) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.NewLogger("debug", "")
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
@ -47,10 +51,17 @@ func TestGarbageCollectManifestErrors(t *testing.T) {
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Referrers: true,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
},
},
},
}, audit, log)
Convey("trigger repo not found in addImageIndexBlobsToReferences()", func() {
err := gc.addIndexBlobsToReferences(repoName, ispec.Index{
@ -164,7 +175,9 @@ func TestGarbageCollectIndexErrors(t *testing.T) {
Convey("Make imagestore and upload manifest", t, func(c C) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.NewLogger("debug", "")
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -174,10 +187,17 @@ func TestGarbageCollectIndexErrors(t *testing.T) {
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Referrers: true,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
},
},
},
}, audit, log)
content := []byte("this is a blob")
bdgst := godigest.FromBytes(content)
@ -270,8 +290,85 @@ func TestGarbageCollectIndexErrors(t *testing.T) {
}
func TestGarbageCollectWithMockedImageStore(t *testing.T) {
trueVal := true
Convey("Cover gc error paths", t, func(c C) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.NewLogger("debug", "")
audit := zlog.NewAuditLogger("debug", "")
gcOptions := Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
},
},
},
}
Convey("Error on GetIndex in gc.cleanRepo()", func() {
gc := NewGarbageCollect(mocks.MockedImageStore{}, mocks.MetaDBMock{
GetRepoMetaFn: func(ctx context.Context, repo string) (types.RepoMeta, error) {
return types.RepoMeta{}, errGC
},
}, gcOptions, audit, log)
err := gc.cleanRepo(repoName)
So(err, ShouldNotBeNil)
})
Convey("Error on GetIndex in gc.removeUnreferencedBlobs()", func() {
gc := NewGarbageCollect(mocks.MockedImageStore{}, mocks.MetaDBMock{
GetRepoMetaFn: func(ctx context.Context, repo string) (types.RepoMeta, error) {
return types.RepoMeta{}, errGC
},
}, gcOptions, audit, log)
err := gc.removeUnreferencedBlobs("repo", time.Hour, log)
So(err, ShouldNotBeNil)
})
Convey("Error on gc.removeManifest()", func() {
gc := NewGarbageCollect(mocks.MockedImageStore{}, mocks.MetaDBMock{
GetRepoMetaFn: func(ctx context.Context, repo string) (types.RepoMeta, error) {
return types.RepoMeta{}, errGC
},
}, gcOptions, audit, log)
_, err := gc.removeManifest("", &ispec.Index{}, ispec.DescriptorEmptyJSON, "tag", "", "")
So(err, ShouldNotBeNil)
})
Convey("Error on metaDB in gc.cleanRepo()", func() {
gcOptions := Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{".*"},
},
},
},
},
},
}
gc := NewGarbageCollect(mocks.MockedImageStore{}, mocks.MetaDBMock{
GetRepoMetaFn: func(ctx context.Context, repo string) (types.RepoMeta, error) {
return types.RepoMeta{}, errGC
},
}, gcOptions, audit, log)
err := gc.removeTagsPerRetentionPolicy("name", &ispec.Index{})
So(err, ShouldNotBeNil)
})
Convey("Error on PutIndexContent in gc.cleanRepo()", func() {
returnedIndexJSON := ispec.Index{}
@ -288,11 +385,7 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
},
}
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Referrers: true,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gcOptions, audit, log)
err = gc.cleanRepo(repoName)
So(err, ShouldNotBeNil)
@ -316,11 +409,7 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
},
}
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Referrers: true,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gcOptions, audit, log)
err = gc.cleanRepo(repoName)
So(err, ShouldNotBeNil)
@ -333,11 +422,7 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
},
}
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Referrers: true,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gcOptions, audit, log)
err := gc.cleanRepo(repoName)
So(err, ShouldNotBeNil)
@ -369,13 +454,17 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
},
}
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Referrers: false,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
gcOptions.ImageRetention = config.ImageRetention{
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteUntagged: &trueVal,
},
},
}
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gcOptions, audit, log)
err = gc.cleanManifests(repoName, &ispec.Index{
err = gc.removeManifestsPerRepoPolicy(repoName, &ispec.Index{
Manifests: []ispec.Descriptor{
{
MediaType: ispec.MediaTypeImageIndex,
@ -393,13 +482,18 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
},
}
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Referrers: false,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
gcOptions.ImageRetention = config.ImageRetention{
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteUntagged: &trueVal,
},
},
}
err := gc.cleanManifests(repoName, &ispec.Index{
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gcOptions, audit, log)
err := gc.removeManifestsPerRepoPolicy(repoName, &ispec.Index{
Manifests: []ispec.Descriptor{
{
MediaType: ispec.MediaTypeImageManifest,
@ -430,13 +524,17 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
},
}
gc := NewGarbageCollect(imgStore, metaDB, Options{
Referrers: false,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
gcOptions.ImageRetention = config.ImageRetention{
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteUntagged: &trueVal,
},
},
}
gc := NewGarbageCollect(imgStore, metaDB, gcOptions, audit, log)
err = gc.cleanManifests(repoName, &ispec.Index{
err = gc.removeManifestsPerRepoPolicy(repoName, &ispec.Index{
Manifests: []ispec.Descriptor{
{
MediaType: ispec.MediaTypeImageManifest,
@ -467,11 +565,8 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
},
}
gc := NewGarbageCollect(imgStore, metaDB, Options{
Referrers: false,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
gcOptions.ImageRetention = config.ImageRetention{}
gc := NewGarbageCollect(imgStore, metaDB, gcOptions, audit, log)
desc := ispec.Descriptor{
MediaType: ispec.MediaTypeImageManifest,
@ -481,7 +576,7 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
index := &ispec.Index{
Manifests: []ispec.Descriptor{desc},
}
_, err = gc.removeManifest(repoName, index, desc, storage.NotationType,
_, err = gc.removeManifest(repoName, index, desc, desc.Digest.String(), storage.NotationType,
godigest.FromBytes([]byte("digest2")))
So(err, ShouldNotBeNil)
@ -515,13 +610,9 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
},
}
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Referrers: true,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gcOptions, audit, log)
err = gc.cleanManifests(repoName, &returnedIndexImage)
err = gc.removeManifestsPerRepoPolicy(repoName, &returnedIndexImage)
So(err, ShouldNotBeNil)
})
@ -550,13 +641,9 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
},
}
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, Options{
Referrers: true,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
gc := NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gcOptions, audit, log)
err = gc.cleanManifests(repoName, &ispec.Index{
err = gc.removeManifestsPerRepoPolicy(repoName, &ispec.Index{
Manifests: []ispec.Descriptor{
manifestDesc,
},

863
pkg/storage/gc/gc_test.go Normal file
View file

@ -0,0 +1,863 @@
package gc_test
import (
"context"
"fmt"
"os"
"path"
"testing"
"time"
"github.com/docker/distribution/registry/storage/driver/factory"
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
guuid "github.com/gofrs/uuid"
. "github.com/smartystreets/goconvey/convey"
"gopkg.in/resty.v1"
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/extensions/monitoring"
zlog "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta"
"zotregistry.io/zot/pkg/meta/boltdb"
"zotregistry.io/zot/pkg/meta/dynamodb"
mTypes "zotregistry.io/zot/pkg/meta/types"
"zotregistry.io/zot/pkg/storage"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/storage/gc"
"zotregistry.io/zot/pkg/storage/local"
"zotregistry.io/zot/pkg/storage/s3"
storageTypes "zotregistry.io/zot/pkg/storage/types"
. "zotregistry.io/zot/pkg/test/image-utils"
tskip "zotregistry.io/zot/pkg/test/skip"
)
const (
region = "us-east-2"
)
//nolint:gochecknoglobals
var testCases = []struct {
testCaseName string
storageType string
}{
{
testCaseName: "S3APIs",
storageType: storageConstants.S3StorageDriverName,
},
{
testCaseName: "LocalAPIs",
storageType: storageConstants.LocalStorageDriverName,
},
}
func TestGarbageCollectAndRetention(t *testing.T) {
log := zlog.NewLogger("info", "")
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
trueVal := true
for _, testcase := range testCases {
testcase := testcase
t.Run(testcase.testCaseName, func(t *testing.T) {
var imgStore storageTypes.ImageStore
var metaDB mTypes.MetaDB
if testcase.storageType == storageConstants.S3StorageDriverName {
tskip.SkipDynamo(t)
tskip.SkipS3(t)
uuid, err := guuid.NewV4()
if err != nil {
panic(err)
}
rootDir := path.Join("/oci-repo-test", uuid.String())
cacheDir := t.TempDir()
bucket := "zot-storage-test"
storageDriverParams := map[string]interface{}{
"rootDir": rootDir,
"name": "s3",
"region": region,
"bucket": bucket,
"regionendpoint": os.Getenv("S3MOCK_ENDPOINT"),
"accesskey": "minioadmin",
"secretkey": "minioadmin",
"secure": false,
"skipverify": false,
}
storeName := fmt.Sprintf("%v", storageDriverParams["name"])
store, err := factory.Create(storeName, storageDriverParams)
if err != nil {
panic(err)
}
defer store.Delete(context.Background(), rootDir) //nolint: errcheck
// create bucket if it doesn't exists
_, err = resty.R().Put("http://" + os.Getenv("S3MOCK_ENDPOINT") + "/" + bucket)
if err != nil {
panic(err)
}
uuid, err = guuid.NewV4()
if err != nil {
panic(err)
}
params := dynamodb.DBDriverParameters{ //nolint:contextcheck
Endpoint: os.Getenv("DYNAMODBMOCK_ENDPOINT"),
Region: region,
RepoMetaTablename: "repo" + uuid.String(),
RepoBlobsInfoTablename: "repoblobsinfo" + uuid.String(),
ImageMetaTablename: "imagemeta" + uuid.String(),
UserDataTablename: "user" + uuid.String(),
APIKeyTablename: "apiKey" + uuid.String(),
VersionTablename: "version" + uuid.String(),
}
client, err := dynamodb.GetDynamoClient(params)
if err != nil {
panic(err)
}
metaDB, err = dynamodb.New(client, params, log)
if err != nil {
panic(err)
}
imgStore = s3.NewImageStore(rootDir, cacheDir, true, false, log, metrics, nil, store, nil)
} else {
// Create temporary directory
rootDir := t.TempDir()
// Create ImageStore
imgStore = local.NewImageStore(rootDir, false, false, log, metrics, nil, nil)
// init metaDB
params := boltdb.DBParameters{
RootDir: rootDir,
}
boltDriver, err := boltdb.GetBoltDriver(params)
if err != nil {
panic(err)
}
metaDB, err = boltdb.New(boltDriver, log)
if err != nil {
panic(err)
}
}
storeController := storage.StoreController{}
storeController.DefaultStore = imgStore
Convey("setup gc images", t, func() {
// for gc testing
// basic images
gcTest1 := CreateRandomImage()
err := WriteImageToFileSystem(gcTest1, "gc-test1", "0.0.1", storeController)
So(err, ShouldBeNil)
// also add same image(same digest) with another tag
err = WriteImageToFileSystem(gcTest1, "gc-test1", "0.0.2", storeController)
So(err, ShouldBeNil)
gcTest2 := CreateRandomImage()
err = WriteImageToFileSystem(gcTest2, "gc-test2", "0.0.1", storeController)
So(err, ShouldBeNil)
gcTest3 := CreateRandomImage()
err = WriteImageToFileSystem(gcTest3, "gc-test3", "0.0.1", storeController)
So(err, ShouldBeNil)
// referrers
ref1 := CreateRandomImageWith().Subject(gcTest1.DescriptorRef()).Build()
err = WriteImageToFileSystem(ref1, "gc-test1", ref1.DigestStr(), storeController)
So(err, ShouldBeNil)
ref2 := CreateRandomImageWith().Subject(gcTest2.DescriptorRef()).Build()
err = WriteImageToFileSystem(ref2, "gc-test2", ref2.DigestStr(), storeController)
So(err, ShouldBeNil)
ref3 := CreateRandomImageWith().Subject(gcTest3.DescriptorRef()).Build()
err = WriteImageToFileSystem(ref3, "gc-test3", ref3.DigestStr(), storeController)
So(err, ShouldBeNil)
// referrers pointing to referrers
refOfRef1 := CreateRandomImageWith().Subject(ref1.DescriptorRef()).Build()
err = WriteImageToFileSystem(refOfRef1, "gc-test1", refOfRef1.DigestStr(), storeController)
So(err, ShouldBeNil)
refOfRef2 := CreateRandomImageWith().Subject(ref2.DescriptorRef()).Build()
err = WriteImageToFileSystem(refOfRef2, "gc-test2", refOfRef2.DigestStr(), storeController)
So(err, ShouldBeNil)
refOfRef3 := CreateRandomImageWith().Subject(ref3.DescriptorRef()).Build()
err = WriteImageToFileSystem(refOfRef3, "gc-test3", refOfRef3.DigestStr(), storeController)
So(err, ShouldBeNil)
// untagged images
gcUntagged1 := CreateRandomImage()
err = WriteImageToFileSystem(gcUntagged1, "gc-test1", gcUntagged1.DigestStr(), storeController)
So(err, ShouldBeNil)
gcUntagged2 := CreateRandomImage()
err = WriteImageToFileSystem(gcUntagged2, "gc-test2", gcUntagged2.DigestStr(), storeController)
So(err, ShouldBeNil)
gcUntagged3 := CreateRandomImage()
err = WriteImageToFileSystem(gcUntagged3, "gc-test3", gcUntagged3.DigestStr(), storeController)
So(err, ShouldBeNil)
// for image retention testing
// old images
gcOld1 := CreateRandomImage()
err = WriteImageToFileSystem(gcOld1, "retention", "0.0.1", storeController)
So(err, ShouldBeNil)
gcOld2 := CreateRandomImage()
err = WriteImageToFileSystem(gcOld2, "retention", "0.0.2", storeController)
So(err, ShouldBeNil)
gcOld3 := CreateRandomImage()
err = WriteImageToFileSystem(gcOld3, "retention", "0.0.3", storeController)
So(err, ShouldBeNil)
// new images
gcNew1 := CreateRandomImage()
err = WriteImageToFileSystem(gcNew1, "retention", "0.0.4", storeController)
So(err, ShouldBeNil)
gcNew2 := CreateRandomImage()
err = WriteImageToFileSystem(gcNew2, "retention", "0.0.5", storeController)
So(err, ShouldBeNil)
gcNew3 := CreateRandomImage()
err = WriteImageToFileSystem(gcNew3, "retention", "0.0.6", storeController)
So(err, ShouldBeNil)
err = meta.ParseStorage(metaDB, storeController, log)
So(err, ShouldBeNil)
retentionMeta, err := metaDB.GetRepoMeta(context.Background(), "retention")
So(err, ShouldBeNil)
// update timestamps for image retention
gcOld1Stats := retentionMeta.Statistics[gcOld1.DigestStr()]
gcOld1Stats.PushTimestamp = time.Now().Add(-10 * 24 * time.Hour)
gcOld1Stats.LastPullTimestamp = time.Now().Add(-10 * 24 * time.Hour)
gcOld2Stats := retentionMeta.Statistics[gcOld2.DigestStr()]
gcOld2Stats.PushTimestamp = time.Now().Add(-11 * 24 * time.Hour)
gcOld2Stats.LastPullTimestamp = time.Now().Add(-11 * 24 * time.Hour)
gcOld3Stats := retentionMeta.Statistics[gcOld3.DigestStr()]
gcOld3Stats.PushTimestamp = time.Now().Add(-12 * 24 * time.Hour)
gcOld3Stats.LastPullTimestamp = time.Now().Add(-12 * 24 * time.Hour)
gcNew1Stats := retentionMeta.Statistics[gcNew1.DigestStr()]
gcNew1Stats.PushTimestamp = time.Now().Add(-1 * 24 * time.Hour)
gcNew1Stats.LastPullTimestamp = time.Now().Add(-1 * 24 * time.Hour)
gcNew2Stats := retentionMeta.Statistics[gcNew2.DigestStr()]
gcNew2Stats.PushTimestamp = time.Now().Add(-2 * 24 * time.Hour)
gcNew2Stats.LastPullTimestamp = time.Now().Add(-2 * 24 * time.Hour)
gcNew3Stats := retentionMeta.Statistics[gcNew3.DigestStr()]
gcNew3Stats.PushTimestamp = time.Now().Add(-3 * 24 * time.Hour)
gcNew3Stats.LastPullTimestamp = time.Now().Add(-2 * 24 * time.Hour)
retentionMeta.Statistics[gcOld1.DigestStr()] = gcOld1Stats
retentionMeta.Statistics[gcOld2.DigestStr()] = gcOld2Stats
retentionMeta.Statistics[gcOld3.DigestStr()] = gcOld3Stats
retentionMeta.Statistics[gcNew1.DigestStr()] = gcNew1Stats
retentionMeta.Statistics[gcNew2.DigestStr()] = gcNew2Stats
retentionMeta.Statistics[gcNew3.DigestStr()] = gcNew3Stats
// update repo meta
err = metaDB.SetRepoMeta("retention", retentionMeta)
So(err, ShouldBeNil)
Convey("should not gc anything", func() {
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{},
},
},
},
},
}, audit, log)
err := gc.CleanRepo("gc-test1")
So(err, ShouldBeNil)
err = gc.CleanRepo("gc-test2")
So(err, ShouldBeNil)
err = gc.CleanRepo("gc-test3")
So(err, ShouldBeNil)
err = gc.CleanRepo("retention")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", gcTest1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", gcUntagged1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", ref1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", refOfRef1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test2", gcTest2.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test2", gcUntagged2.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test2", ref2.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test2", refOfRef2.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test3", gcTest3.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test3", gcUntagged3.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test3", ref3.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test3", refOfRef3.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.1")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.2")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.3")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.4")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.5")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.6")
So(err, ShouldBeNil)
})
Convey("gc untagged manifests", func() {
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: 1 * time.Millisecond,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{},
},
},
},
}, audit, log)
err := gc.CleanRepo("gc-test1")
So(err, ShouldBeNil)
err = gc.CleanRepo("gc-test2")
So(err, ShouldBeNil)
err = gc.CleanRepo("gc-test3")
So(err, ShouldBeNil)
err = gc.CleanRepo("retention")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", gcTest1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", gcUntagged1.DigestStr())
So(err, ShouldNotBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", ref1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", refOfRef1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test2", gcTest2.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test2", gcUntagged2.DigestStr())
So(err, ShouldNotBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test2", ref2.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test2", refOfRef2.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test3", gcTest3.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test3", gcUntagged3.DigestStr())
So(err, ShouldNotBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test3", ref3.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test3", refOfRef3.DigestStr())
So(err, ShouldBeNil)
})
Convey("gc all tags, untagged, and afterwards referrers", func() {
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: 1 * time.Millisecond,
ImageRetention: config.ImageRetention{
Delay: 1 * time.Millisecond,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"gc-test1"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{"v1"}, // should not match any tag
},
},
},
},
},
}, audit, log)
err := gc.CleanRepo("gc-test1")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", gcUntagged1.DigestStr())
So(err, ShouldNotBeNil)
// although we have two tags both should be deleted
_, _, _, err = imgStore.GetImageManifest("gc-test1", gcTest1.DigestStr())
So(err, ShouldNotBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", ref1.DigestStr())
So(err, ShouldNotBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", refOfRef1.DigestStr())
So(err, ShouldNotBeNil)
// now repo should get gc'ed
repos, err := imgStore.GetRepositories()
So(err, ShouldBeNil)
So(repos, ShouldNotContain, "gc-test1")
So(repos, ShouldContain, "gc-test2")
So(repos, ShouldContain, "gc-test3")
So(repos, ShouldContain, "retention")
})
Convey("gc with dry-run all tags, untagged, and afterwards referrers", func() {
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: 1 * time.Millisecond,
ImageRetention: config.ImageRetention{
Delay: 1 * time.Millisecond,
DryRun: true,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"gc-test1"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{"v1"}, // should not match any tag
},
},
},
},
},
}, audit, log)
err := gc.CleanRepo("gc-test1")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", gcUntagged1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", ref1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", refOfRef1.DigestStr())
So(err, ShouldBeNil)
// now repo should not be gc'ed
repos, err := imgStore.GetRepositories()
So(err, ShouldBeNil)
So(repos, ShouldContain, "gc-test1")
So(repos, ShouldContain, "gc-test2")
So(repos, ShouldContain, "gc-test3")
So(repos, ShouldContain, "retention")
tags, err := imgStore.GetImageTags("gc-test1")
So(err, ShouldBeNil)
So(tags, ShouldContain, "0.0.1")
So(tags, ShouldContain, "0.0.2")
})
Convey("all tags matches for retention", func() {
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{"0.0.*"},
},
},
},
},
},
}, audit, log)
err = gc.CleanRepo("retention")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", "0.0.1")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", "0.0.2")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test2", "0.0.1")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test3", "0.0.1")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.1")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.2")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.3")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.4")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.5")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.6")
So(err, ShouldBeNil)
})
Convey("retain new tags", func() {
sevenDays := 7 * 24 * time.Hour
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{".*"},
PulledWithin: &sevenDays,
PushedWithin: &sevenDays,
},
},
},
},
},
}, audit, log)
err = gc.CleanRepo("retention")
So(err, ShouldBeNil)
tags, err := imgStore.GetImageTags("retention")
So(err, ShouldBeNil)
So(tags, ShouldContain, "0.0.4")
So(tags, ShouldContain, "0.0.5")
So(tags, ShouldContain, "0.0.6")
So(tags, ShouldNotContain, "0.0.1")
So(tags, ShouldNotContain, "0.0.2")
So(tags, ShouldNotContain, "0.0.3")
})
Convey("retain 3 most recently pushed images", func() {
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{".*"},
MostRecentlyPushedCount: 3,
},
},
},
},
},
}, audit, log)
err = gc.CleanRepo("retention")
So(err, ShouldBeNil)
tags, err := imgStore.GetImageTags("retention")
So(err, ShouldBeNil)
So(tags, ShouldContain, "0.0.4")
So(tags, ShouldContain, "0.0.5")
So(tags, ShouldContain, "0.0.6")
So(tags, ShouldNotContain, "0.0.1")
So(tags, ShouldNotContain, "0.0.2")
So(tags, ShouldNotContain, "0.0.3")
})
Convey("retain 3 most recently pulled images", func() {
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{".*"},
MostRecentlyPulledCount: 3,
},
},
},
},
},
}, audit, log)
err = gc.CleanRepo("retention")
So(err, ShouldBeNil)
tags, err := imgStore.GetImageTags("retention")
So(err, ShouldBeNil)
So(tags, ShouldContain, "0.0.4")
So(tags, ShouldContain, "0.0.5")
So(tags, ShouldContain, "0.0.6")
So(tags, ShouldNotContain, "0.0.1")
So(tags, ShouldNotContain, "0.0.2")
So(tags, ShouldNotContain, "0.0.3")
})
Convey("retain 3 most recently pulled OR 4 most recently pushed images", func() {
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{".*"},
MostRecentlyPulledCount: 3,
MostRecentlyPushedCount: 4,
},
},
},
},
},
}, audit, log)
err = gc.CleanRepo("retention")
So(err, ShouldBeNil)
tags, err := imgStore.GetImageTags("retention")
So(err, ShouldBeNil)
So(tags, ShouldContain, "0.0.1")
So(tags, ShouldContain, "0.0.4")
So(tags, ShouldContain, "0.0.5")
So(tags, ShouldContain, "0.0.6")
So(tags, ShouldNotContain, "0.0.2")
So(tags, ShouldNotContain, "0.0.3")
})
Convey("test if first match rule logic works", func() {
twoDays := 2 * 24 * time.Hour
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{"0.0.1"},
},
{
Patterns: []string{"0.0.2"},
},
{
Patterns: []string{".*"},
PulledWithin: &twoDays,
},
},
},
},
},
}, audit, log)
err = gc.CleanRepo("retention")
So(err, ShouldBeNil)
tags, err := imgStore.GetImageTags("retention")
So(err, ShouldBeNil)
t.Log(tags)
So(tags, ShouldContain, "0.0.1")
So(tags, ShouldContain, "0.0.2")
So(tags, ShouldContain, "0.0.4")
So(tags, ShouldNotContain, "0.0.3")
So(tags, ShouldNotContain, "0.0.5")
So(tags, ShouldNotContain, "0.0.6")
})
Convey("gc - do not match any repo", func() {
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: 1 * time.Millisecond,
ImageRetention: config.ImageRetention{
Delay: 1 * time.Millisecond,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"no-match"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
},
},
},
}, audit, log)
err := gc.CleanRepo("gc-test1")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", gcUntagged1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", ref1.DigestStr())
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("gc-test1", refOfRef1.DigestStr())
So(err, ShouldBeNil)
repos, err := imgStore.GetRepositories()
So(err, ShouldBeNil)
So(repos, ShouldContain, "gc-test1")
So(repos, ShouldContain, "gc-test2")
So(repos, ShouldContain, "gc-test3")
So(repos, ShouldContain, "retention")
})
Convey("remove one tag because it didn't match, preserve tags without statistics in metaDB", func() {
// add new tag in retention repo which can not be found in metaDB, should be always retained
err = WriteImageToFileSystem(CreateRandomImage(), "retention", "0.0.7", storeController)
So(err, ShouldBeNil)
gc := gc.NewGarbageCollect(imgStore, metaDB, gc.Options{
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
KeepTags: []config.KeepTagsPolicy{
{
Patterns: []string{"0.0.[1-5]"},
},
},
},
},
},
}, audit, log)
err = gc.CleanRepo("retention")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.1")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.2")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.3")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.4")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.5")
So(err, ShouldBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.6")
So(err, ShouldNotBeNil)
_, _, _, err = imgStore.GetImageManifest("retention", "0.0.7")
So(err, ShouldBeNil)
})
})
})
}
}

View file

@ -1167,7 +1167,7 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6
// Check blobs in cache
dstRecord, err := is.checkCacheBlob(digest)
if err != nil {
is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found")
is.log.Debug().Err(err).Str("digest", digest.String()).Msg("cache: not found")
return false, -1, zerr.ErrBlobNotFound
}
@ -1213,7 +1213,7 @@ func (is *ImageStore) StatBlob(repo string, digest godigest.Digest) (bool, int64
// Check blobs in cache
dstRecord, err := is.checkCacheBlob(digest)
if err != nil {
is.log.Error().Err(err).Str("digest", digest.String()).Msg("cache: not found")
is.log.Debug().Err(err).Str("digest", digest.String()).Msg("cache: not found")
return false, -1, time.Time{}, zerr.ErrBlobNotFound
}
@ -1540,7 +1540,8 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe
count := 0
for _, digest := range blobs {
is.log.Debug().Str("repository", repo).Str("digest", digest.String()).Msg("perform GC on blob")
is.log.Debug().Str("repository", repo).
Str("digest", digest.String()).Msg("perform GC on blob")
if err := is.deleteBlob(repo, digest); err != nil {
if errors.Is(err, zerr.ErrBlobReferenced) {
@ -1572,6 +1573,8 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe
// if removeRepo flag is true and we cleanup all blobs and there are no blobs currently being uploaded.
if removeRepo && count == len(blobs) && count > 0 && len(blobUploads) == 0 {
is.log.Info().Str("repository", repo).Msg("removed all blobs, removing repo")
if err := is.storeDriver.Delete(path.Join(is.rootDir, repo)); err != nil {
is.log.Error().Err(err).Str("repository", repo).Msg("unable to remove repo")

View file

@ -293,7 +293,7 @@ func (driver *Driver) Link(src, dest string) error {
/* also update the modtime, so that gc won't remove recently linked blobs
otherwise ifBlobOlderThan(gcDelay) will return the modtime of the inode */
currentTime := time.Now().Local() //nolint: gosmopolitan
currentTime := time.Now() //nolint: gosmopolitan
if err := os.Chtimes(dest, currentTime, currentTime); err != nil {
return driver.formatErr(err)
}

View file

@ -29,7 +29,7 @@ import (
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/common"
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
zlog "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/scheduler"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
@ -47,10 +47,23 @@ const (
repoName = "test"
)
var trueVal bool = true //nolint: gochecknoglobals
var DeleteReferrers = config.ImageRetention{ //nolint: gochecknoglobals
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
},
},
}
var errCache = errors.New("new cache error")
func runAndGetScheduler() (*scheduler.Scheduler, context.CancelFunc) {
taskScheduler := scheduler.NewScheduler(config.New(), log.Logger{})
taskScheduler := scheduler.NewScheduler(config.New(), zlog.Logger{})
taskScheduler.RateLimit = 50 * time.Millisecond
ctx, cancel := context.WithCancel(context.Background())
@ -62,7 +75,7 @@ func runAndGetScheduler() (*scheduler.Scheduler, context.CancelFunc) {
func TestStorageFSAPIs(t *testing.T) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -199,7 +212,7 @@ func TestStorageFSAPIs(t *testing.T) {
func TestGetOrasReferrers(t *testing.T) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -258,7 +271,7 @@ func FuzzNewBlobUpload(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
t.Logf("Input argument is %s", data)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -284,7 +297,8 @@ func FuzzPutBlobChunk(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
t.Logf("Input argument is %s", data)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -317,7 +331,7 @@ func FuzzPutBlobChunkStreamed(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
t.Logf("Input argument is %s", data)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -349,7 +363,7 @@ func FuzzGetBlobUpload(f *testing.F) {
f.Fuzz(func(t *testing.T, data1 string, data2 string) {
dir := t.TempDir()
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -371,7 +385,7 @@ func FuzzGetBlobUpload(f *testing.F) {
func FuzzTestPutGetImageManifest(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
dir := t.TempDir()
@ -422,7 +436,7 @@ func FuzzTestPutGetImageManifest(f *testing.F) {
func FuzzTestPutDeleteImageManifest(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
dir := t.TempDir()
@ -480,7 +494,7 @@ func FuzzTestPutDeleteImageManifest(f *testing.F) {
// no integration with PutImageManifest, just throw fuzz data.
func FuzzTestDeleteImageManifest(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
dir := t.TempDir()
@ -515,7 +529,7 @@ func FuzzDirExists(f *testing.F) {
func FuzzInitRepo(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
dir := t.TempDir()
@ -539,7 +553,7 @@ func FuzzInitRepo(f *testing.F) {
func FuzzInitValidateRepo(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
dir := t.TempDir()
@ -570,7 +584,7 @@ func FuzzInitValidateRepo(f *testing.F) {
func FuzzGetImageTags(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
dir := t.TempDir()
@ -594,7 +608,7 @@ func FuzzGetImageTags(f *testing.F) {
func FuzzBlobUploadPath(f *testing.F) {
f.Fuzz(func(t *testing.T, repo, uuid string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
dir := t.TempDir()
@ -613,7 +627,7 @@ func FuzzBlobUploadPath(f *testing.F) {
func FuzzBlobUploadInfo(f *testing.F) {
f.Fuzz(func(t *testing.T, data string, uuid string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
dir := t.TempDir()
@ -642,7 +656,7 @@ func FuzzTestGetImageManifest(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -670,7 +684,7 @@ func FuzzFinishBlobUpload(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -714,7 +728,7 @@ func FuzzFinishBlobUpload(f *testing.F) {
func FuzzFullBlobUpload(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
repoName := "test"
@ -745,7 +759,7 @@ func FuzzFullBlobUpload(f *testing.F) {
func TestStorageCacheErrors(t *testing.T) {
Convey("get error in DedupeBlob() when cache.Put() deduped blob", t, func() {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
dir := t.TempDir()
@ -787,7 +801,7 @@ func TestStorageCacheErrors(t *testing.T) {
func FuzzDedupeBlob(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
dir := t.TempDir()
@ -827,7 +841,7 @@ func FuzzDedupeBlob(f *testing.F) {
func FuzzDeleteBlobUpload(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
repoName := data
@ -858,7 +872,7 @@ func FuzzDeleteBlobUpload(f *testing.F) {
func FuzzBlobPath(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
repoName := data
@ -879,7 +893,7 @@ func FuzzBlobPath(f *testing.F) {
func FuzzCheckBlob(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
repoName := data
@ -910,7 +924,7 @@ func FuzzCheckBlob(f *testing.F) {
func FuzzGetBlob(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
repoName := data
@ -948,7 +962,7 @@ func FuzzGetBlob(f *testing.F) {
func FuzzDeleteBlob(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
repoName := data
@ -983,7 +997,7 @@ func FuzzDeleteBlob(f *testing.F) {
func FuzzGetIndexContent(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
repoName := data
@ -1018,7 +1032,7 @@ func FuzzGetIndexContent(f *testing.F) {
func FuzzGetBlobContent(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
repoName := data
@ -1053,7 +1067,7 @@ func FuzzGetBlobContent(f *testing.F) {
func FuzzGetOrasReferrers(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := &log.Logger{Logger: zerolog.New(os.Stdout)}
log := &zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, *log)
dir := t.TempDir()
@ -1116,7 +1130,9 @@ func FuzzGetOrasReferrers(f *testing.F) {
func FuzzRunGCRepo(f *testing.F) {
f.Fuzz(func(t *testing.T, data string) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.NewLogger("debug", "")
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
dir := t.TempDir()
defer os.RemoveAll(dir)
@ -1129,10 +1145,9 @@ func FuzzRunGCRepo(f *testing.F) {
imgStore := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
ImageRetention: DeleteReferrers,
}, audit, log)
if err := gc.CleanRepo(data); err != nil {
t.Error(err)
@ -1155,7 +1170,7 @@ func TestDedupeLinks(t *testing.T) {
},
}
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
for _, testCase := range testCases {
@ -1520,7 +1535,7 @@ func TestDedupe(t *testing.T) {
Convey("Valid ImageStore", func() {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -1540,7 +1555,7 @@ func TestNegativeCases(t *testing.T) {
Convey("Invalid root dir", t, func(c C) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -1563,7 +1578,7 @@ func TestNegativeCases(t *testing.T) {
Convey("Invalid init repo", t, func(c C) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -1613,7 +1628,7 @@ func TestNegativeCases(t *testing.T) {
Convey("Invalid validate repo", t, func(c C) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -1725,7 +1740,7 @@ func TestNegativeCases(t *testing.T) {
Convey("Invalid get image tags", t, func(c C) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -1750,7 +1765,7 @@ func TestNegativeCases(t *testing.T) {
Convey("Invalid get image manifest", t, func(c C) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -1797,7 +1812,7 @@ func TestNegativeCases(t *testing.T) {
Convey("Invalid new blob upload", t, func(c C) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -1968,7 +1983,7 @@ func TestInjectWriteFile(t *testing.T) {
Convey("writeFile without commit", t, func() {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -1994,7 +2009,9 @@ func TestGarbageCollectForImageStore(t *testing.T) {
defer os.Remove(logFile.Name()) // clean up
log := log.NewLogger("debug", logFile.Name())
log := zlog.NewLogger("debug", logFile.Name())
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2006,10 +2023,9 @@ func TestGarbageCollectForImageStore(t *testing.T) {
repoName := "gc-all-repos-short"
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: 1 * time.Second,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
ImageRetention: DeleteReferrers,
}, audit, log)
image := CreateDefaultVulnerableImage()
err := WriteImageToFileSystem(image, repoName, "0.0.1", storage.StoreController{
@ -2039,7 +2055,9 @@ func TestGarbageCollectForImageStore(t *testing.T) {
defer os.Remove(logFile.Name()) // clean up
log := log.NewLogger("debug", logFile.Name())
log := zlog.NewLogger("debug", logFile.Name())
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2051,10 +2069,9 @@ func TestGarbageCollectForImageStore(t *testing.T) {
repoName := "gc-all-repos-short"
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: 1 * time.Second,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
ImageRetention: DeleteReferrers,
}, audit, log)
image := CreateDefaultVulnerableImage()
err := WriteImageToFileSystem(image, repoName, "0.0.1", storage.StoreController{
@ -2081,7 +2098,9 @@ func TestGarbageCollectForImageStore(t *testing.T) {
defer os.Remove(logFile.Name()) // clean up
log := log.NewLogger("debug", logFile.Name())
log := zlog.NewLogger("debug", logFile.Name())
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2092,10 +2111,9 @@ func TestGarbageCollectForImageStore(t *testing.T) {
repoName := "gc-sig"
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: 1 * time.Second,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
ImageRetention: DeleteReferrers,
}, audit, log)
storeController := storage.StoreController{DefaultStore: imgStore}
img := CreateRandomImage()
@ -2146,7 +2164,9 @@ func TestGarbageCollectImageUnknownManifest(t *testing.T) {
Convey("Garbage collect with short delay", t, func() {
dir := t.TempDir()
log := log.NewLogger("debug", "")
log := zlog.NewLogger("debug", "")
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2161,10 +2181,9 @@ func TestGarbageCollectImageUnknownManifest(t *testing.T) {
}
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: 1 * time.Second,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
ImageRetention: DeleteReferrers,
}, audit, log)
unsupportedMediaType := "application/vnd.oci.artifact.manifest.v1+json"
@ -2324,7 +2343,9 @@ func TestGarbageCollectErrors(t *testing.T) {
Convey("Make image store", t, func(c C) {
dir := t.TempDir()
log := log.NewLogger("debug", "")
log := zlog.NewLogger("debug", "")
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2336,10 +2357,9 @@ func TestGarbageCollectErrors(t *testing.T) {
repoName := "gc-index"
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: 500 * time.Millisecond,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
ImageRetention: DeleteReferrers,
}, audit, log)
// create a blob/layer
upload, err := imgStore.NewBlobUpload(repoName)
@ -2538,7 +2558,7 @@ func TestGarbageCollectErrors(t *testing.T) {
err = gc.CleanRepo(repoName)
So(err, ShouldBeNil)
// blob shouldn't be gc'ed
// blob shouldn't be gc'ed //TODO check this one
found, _, err := imgStore.CheckBlob(repoName, digest)
So(err, ShouldBeNil)
So(found, ShouldEqual, true)
@ -2566,7 +2586,7 @@ func TestInitRepo(t *testing.T) {
Convey("Get error when creating BlobUploadDir subdir on initRepo", t, func() {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2588,7 +2608,7 @@ func TestValidateRepo(t *testing.T) {
Convey("Get error when unable to read directory", t, func() {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2608,7 +2628,7 @@ func TestValidateRepo(t *testing.T) {
Convey("Get error when repo name is not compliant with repo spec", t, func() {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2653,7 +2673,7 @@ func TestGetRepositories(t *testing.T) {
Convey("Verify errors and repos returned by GetRepositories()", t, func() {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2748,7 +2768,7 @@ func TestGetRepositories(t *testing.T) {
Convey("Verify GetRepositories() doesn't return '.' when having an oci layout as root directory ", t, func() {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2794,7 +2814,7 @@ func TestGetRepositories(t *testing.T) {
err := os.Mkdir(rootDir, 0o755)
So(err, ShouldBeNil)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: rootDir,
@ -2838,7 +2858,7 @@ func TestGetRepositories(t *testing.T) {
func TestGetNextRepository(t *testing.T) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2891,7 +2911,7 @@ func TestPutBlobChunkStreamed(t *testing.T) {
Convey("Get error on opening file", t, func() {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -2918,7 +2938,7 @@ func TestPullRange(t *testing.T) {
Convey("Repo layout", t, func(c C) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
Convey("Negative cases", func() {
@ -2968,7 +2988,7 @@ func TestPullRange(t *testing.T) {
func TestStorageDriverErr(t *testing.T) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,

View file

@ -28,8 +28,9 @@ import (
"gopkg.in/resty.v1"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
zlog "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
storageCommon "zotregistry.io/zot/pkg/storage/common"
@ -44,6 +45,19 @@ import (
tskip "zotregistry.io/zot/pkg/test/skip"
)
var trueVal bool = true //nolint: gochecknoglobals
var DeleteReferrers = config.ImageRetention{ //nolint: gochecknoglobals
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
},
},
}
func cleanupStorage(store driver.StorageDriver, name string) {
_ = store.Delete(context.Background(), name)
}
@ -78,7 +92,7 @@ func createObjectsStore(rootDir string, cacheDir string) (
panic(err)
}
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
@ -129,7 +143,7 @@ func TestStorageAPIs(t *testing.T) {
} else {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
@ -741,7 +755,7 @@ func TestMandatoryAnnotations(t *testing.T) {
var testDir, tdir string
var store driver.StorageDriver
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
if testcase.storageType == storageConstants.S3StorageDriverName {
@ -865,7 +879,7 @@ func TestDeleteBlobsInUse(t *testing.T) {
var testDir, tdir string
var store driver.StorageDriver
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
if testcase.storageType == storageConstants.S3StorageDriverName {
@ -1165,7 +1179,7 @@ func TestReuploadCorruptedBlob(t *testing.T) {
var store driver.StorageDriver
var driver storageTypes.Driver
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
if testcase.storageType == storageConstants.S3StorageDriverName {
@ -1403,7 +1417,7 @@ func TestStorageHandler(t *testing.T) {
secondRootDir = t.TempDir()
thirdRootDir = t.TempDir()
log := log.NewLogger("debug", "")
log := zlog.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
@ -1462,7 +1476,9 @@ func TestGarbageCollectImageManifest(t *testing.T) {
for _, testcase := range testCases {
testcase := testcase
t.Run(testcase.testCaseName, func(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.NewLogger("debug", "")
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
Convey("Repo layout", t, func(c C) {
@ -1497,10 +1513,17 @@ func TestGarbageCollectImageManifest(t *testing.T) {
}
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
Delay: storageConstants.DefaultGCDelay,
ImageRetention: config.ImageRetention{
Delay: storageConstants.DefaultRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
},
},
},
}, audit, log)
repoName := "gc-long"
@ -1660,10 +1683,18 @@ func TestGarbageCollectImageManifest(t *testing.T) {
}
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: gcDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
Delay: gcDelay,
ImageRetention: config.ImageRetention{ //nolint: gochecknoglobals
Delay: gcDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
},
},
},
}, audit, log)
// upload orphan blob
upload, err := imgStore.NewBlobUpload(repoName)
@ -1970,10 +2001,9 @@ func TestGarbageCollectImageManifest(t *testing.T) {
}
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: gcDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
ImageRetention: DeleteReferrers,
}, audit, log)
// first upload an image to the first repo and wait for GC timeout
@ -2171,7 +2201,9 @@ func TestGarbageCollectImageIndex(t *testing.T) {
for _, testcase := range testCases {
testcase := testcase
t.Run(testcase.testCaseName, func(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.NewLogger("debug", "")
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
Convey("Repo layout", t, func(c C) {
@ -2206,10 +2238,9 @@ func TestGarbageCollectImageIndex(t *testing.T) {
}
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: storageConstants.DefaultGCDelay,
RetentionDelay: storageConstants.DefaultUntaggedImgeRetentionDelay,
}, log)
ImageRetention: DeleteReferrers,
}, audit, log)
repoName := "gc-long"
@ -2336,10 +2367,18 @@ func TestGarbageCollectImageIndex(t *testing.T) {
}
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: gcDelay,
RetentionDelay: imageRetentionDelay,
}, log)
Delay: gcDelay,
ImageRetention: config.ImageRetention{ //nolint: gochecknoglobals
Delay: imageRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
},
},
},
}, audit, log)
// upload orphan blob
upload, err := imgStore.NewBlobUpload(repoName)
@ -2608,7 +2647,9 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
for _, testcase := range testCases {
testcase := testcase
t.Run(testcase.testCaseName, func(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
log := zlog.NewLogger("debug", "")
audit := zlog.NewAuditLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
Convey("Garbage collect with short delay", t, func() {
@ -2646,10 +2687,18 @@ func TestGarbageCollectChainedImageIndexes(t *testing.T) {
}
gc := gc.NewGarbageCollect(imgStore, mocks.MetaDBMock{}, gc.Options{
Referrers: true,
Delay: gcDelay,
RetentionDelay: imageRetentionDelay,
}, log)
Delay: gcDelay,
ImageRetention: config.ImageRetention{ //nolint: gochecknoglobals
Delay: imageRetentionDelay,
Policies: []config.RetentionPolicy{
{
Repositories: []string{"**"},
DeleteReferrers: true,
DeleteUntagged: &trueVal,
},
},
},
}, audit, log)
// upload orphan blob
upload, err := imgStore.NewBlobUpload(repoName)

View file

@ -81,17 +81,24 @@ func TestUploadImage(t *testing.T) {
conf.HTTP.Port = port
conf.Storage.RootDirectory = tempDir
err := os.Chmod(tempDir, 0o400)
if err != nil {
t.Fatal(err)
}
ctlr := api.NewController(conf)
ctlrManager := tcommon.NewControllerManager(ctlr)
ctlrManager.StartAndWait(port)
defer ctlrManager.StopServer()
err := os.Chmod(tempDir, 0o400)
if err != nil {
t.Fatal(err)
}
defer func() {
err := os.Chmod(tempDir, 0o700)
if err != nil {
t.Fatal(err)
}
}()
img := Image{
Layers: make([][]byte, 10),
}

View file

@ -45,7 +45,7 @@ type MetaDBMock struct {
SetImageTrustStoreFn func(mTypes.ImageTrustStore)
SetRepoReferenceFn func(repo string, reference string, imageMeta mTypes.ImageMeta) error
SetRepoReferenceFn func(ctx context.Context, repo string, reference string, imageMeta mTypes.ImageMeta) error
SearchReposFn func(ctx context.Context, searchText string,
) ([]mTypes.RepoMeta, error)
@ -74,7 +74,7 @@ type MetaDBMock struct {
GetReferrersInfoFn func(repo string, referredDigest godigest.Digest, artifactTypes []string,
) ([]mTypes.ReferrerInfo, error)
IncrementImageDownloadsFn func(repo string, reference string) error
UpdateStatsOnDownloadFn func(repo string, reference string) error
UpdateSignaturesValidityFn func(repo string, manifestDigest godigest.Digest) error
@ -259,9 +259,11 @@ func (sdm MetaDBMock) SetImageMeta(digest godigest.Digest, imageMeta mTypes.Imag
return nil
}
func (sdm MetaDBMock) SetRepoReference(repo string, reference string, imageMeta mTypes.ImageMeta) error {
func (sdm MetaDBMock) SetRepoReference(ctx context.Context, repo string, reference string,
imageMeta mTypes.ImageMeta,
) error {
if sdm.SetRepoReferenceFn != nil {
return sdm.SetRepoReferenceFn(repo, reference, imageMeta)
return sdm.SetRepoReferenceFn(ctx, repo, reference, imageMeta)
}
return nil
@ -362,9 +364,9 @@ func (sdm MetaDBMock) GetReferrersInfo(repo string, referredDigest godigest.Dige
return []mTypes.ReferrerInfo{}, nil
}
func (sdm MetaDBMock) IncrementImageDownloads(repo string, reference string) error {
if sdm.IncrementImageDownloadsFn != nil {
return sdm.IncrementImageDownloadsFn(repo, reference)
func (sdm MetaDBMock) UpdateStatsOnDownload(repo string, reference string) error {
if sdm.UpdateStatsOnDownloadFn != nil {
return sdm.UpdateStatsOnDownloadFn(repo, reference)
}
return nil

View file

@ -46,7 +46,7 @@ func InitializeTestMetaDB(ctx context.Context, metaDB mTypes.MetaDB, repos ...Re
statistics := map[string]mTypes.DescriptorStatistics{"": {}}
for _, image := range repo.Images {
err := metaDB.SetRepoReference(repo.Name, image.Reference, image.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo.Name, image.Reference, image.AsImageMeta())
if err != nil {
return uacContext, err
}
@ -56,7 +56,7 @@ func InitializeTestMetaDB(ctx context.Context, metaDB mTypes.MetaDB, repos ...Re
for _, multiArch := range repo.MultiArchImages {
for _, image := range multiArch.Images {
err := metaDB.SetRepoReference(repo.Name, image.DigestStr(), image.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo.Name, image.DigestStr(), image.AsImageMeta())
if err != nil {
return uacContext, err
}
@ -64,7 +64,7 @@ func InitializeTestMetaDB(ctx context.Context, metaDB mTypes.MetaDB, repos ...Re
statistics[image.DigestStr()] = multiArch.ImageStatistics[image.DigestStr()]
}
err := metaDB.SetRepoReference(repo.Name, multiArch.Reference, multiArch.AsImageMeta())
err := metaDB.SetRepoReference(ctx, repo.Name, multiArch.Reference, multiArch.AsImageMeta())
if err != nil {
return uacContext, err
}

View file

@ -34,10 +34,18 @@ function setup_file() {
"storage": {
"rootDirectory": "${zot_root_dir}",
"gc": true,
"gcReferrers": true,
"gcDelay": "30s",
"untaggedImageRetentionDelay": "40s",
"gcInterval": "1s"
"gcInterval": "1s",
"retention": {
"delay": "40s",
"policies": [
{
"repositories": ["**"],
"deleteReferrers": true,
"deleteUntagged": true
}
]
}
},
"http": {
"address": "0.0.0.0",

View file

@ -2,7 +2,7 @@
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot",
"gc": false,
"gc": true,
"dedupe": false,
"storageDriver": {
"name": "s3",

View file

@ -3,9 +3,7 @@
"storage": {
"rootDirectory": "/tmp/zot/local",
"gc": true,
"gcReferrers": false,
"gcDelay": "20s",
"untaggedImageRetentionDelay": "20s",
"gcInterval": "1s"
},
"http": {

View file

@ -3,9 +3,7 @@
"storage": {
"rootDirectory": "/tmp/zot/s3",
"gc": true,
"gcReferrers": false,
"gcDelay": "50m",
"untaggedImageRetentionDelay": "50m",
"gcInterval": "2m",
"storageDriver": {
"name": "s3",
@ -20,7 +18,13 @@
"name": "dynamodb",
"endpoint": "http://localhost:4566",
"region": "us-east-2",
"cacheTablename": "BlobTable"
"cacheTablename": "BlobTable",
"repoMetaTablename": "RepoMetadataTable",
"indexDataTablename": "IndexDataTable",
"manifestDataTablename": "ManifestDataTable",
"apikeytablename": "ApiKeyDataTable",
"userdatatablename": "UserDataTable",
"versionTablename": "VersionTable"
}
},
"http": {

View file

@ -3,9 +3,7 @@
"storage": {
"rootDirectory": "/tmp/zot/s3",
"gc": true,
"gcReferrers": false,
"gcDelay": "4m",
"untaggedImageRetentionDelay": "4m",
"gcInterval": "1s",
"storageDriver": {
"name": "s3",
@ -22,7 +20,13 @@
"name": "dynamodb",
"endpoint": "http://localhost:4566",
"region": "us-east-2",
"cacheTablename": "BlobTable"
"cacheTablename": "BlobTable",
"repoMetaTablename": "RepoMetadataTable",
"indexDataTablename": "IndexDataTable",
"manifestDataTablename": "ManifestDataTable",
"apikeytablename": "ApiKeyDataTable",
"userdatatablename": "UserDataTable",
"versionTablename": "VersionTable"
}
},
"http": {

View file

@ -3,10 +3,17 @@
"storage": {
"rootDirectory": "/tmp/zot/local",
"gc": true,
"gcReferrers": true,
"gcDelay": "20s",
"untaggedImageRetentionDelay": "20s",
"gcInterval": "1s"
"gcInterval": "1s",
"retention": {
"delay": "20s",
"policies": [
{
"repositories": ["**"],
"deleteReferrers": true
}
]
}
},
"http": {
"address": "127.0.0.1",

View file

@ -3,10 +3,17 @@
"storage": {
"rootDirectory": "/tmp/zot/s3",
"gc": true,
"gcReferrers": true,
"gcDelay": "50m",
"untaggedImageRetentionDelay": "50m",
"gcInterval": "2m",
"retention": {
"delay": "50m",
"policies": [
{
"repositories": ["**"],
"deleteReferrers": true
}
]
},
"storageDriver": {
"name": "s3",
"rootdirectory": "/zot",
@ -20,7 +27,13 @@
"name": "dynamodb",
"endpoint": "http://localhost:4566",
"region": "us-east-2",
"cacheTablename": "BlobTable"
"cacheTablename": "BlobTable",
"repoMetaTablename": "RepoMetadataTable",
"indexDataTablename": "IndexDataTable",
"manifestDataTablename": "ManifestDataTable",
"apikeytablename": "ApiKeyDataTable",
"userdatatablename": "UserDataTable",
"versionTablename": "VersionTable"
}
},
"http": {

View file

@ -3,10 +3,17 @@
"storage": {
"rootDirectory": "/tmp/zot/s3",
"gc": true,
"gcReferrers": true,
"gcDelay": "4m",
"untaggedImageRetentionDelay": "4m",
"gcInterval": "1s",
"retention": {
"delay": "4m",
"policies": [
{
"repositories": ["**"],
"deleteReferrers": true
}
]
},
"storageDriver": {
"name": "s3",
"rootdirectory": "/zot",
@ -22,7 +29,13 @@
"name": "dynamodb",
"endpoint": "http://localhost:4566",
"region": "us-east-2",
"cacheTablename": "BlobTable"
"cacheTablename": "BlobTable",
"repoMetaTablename": "RepoMetadataTable",
"indexDataTablename": "IndexDataTable",
"manifestDataTablename": "ManifestDataTable",
"apikeytablename": "ApiKeyDataTable",
"userdatatablename": "UserDataTable",
"versionTablename": "VersionTable"
}
},
"http": {