0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-16 21:56:37 -05:00

refactor(cache): rewrote/refactored cachedb functionality to use interface (#667)

Moved boltdb to a driver implementation for such interface
Added CreateCacheDatabaseDriver in controller
Fixed default directory creation (boltDB will only create the file, not the dir
Added coverage tests
Added example config for boltdb
Re-added caching on subpaths, rewrote CreateCacheDatabaseDriver
Fix tests
Made cacheDriver argument mandatory for NewImageStore, added more validation, added defaults
Moved cache interface to own file, removed useRelPaths from config
Got rid of cache config, refactored
Moved cache to own package and folder
Renamed + removed cache factory to backend, replaced CloudCache to RemoteCache
Moved storage constants back to storage package
moved cache interface and factory to storage package, changed remoteCache defaulting

Signed-off-by: Catalin Hofnar <catalin.hofnar@gmail.com>
This commit is contained in:
Catalin-George Hofnar 2022-11-03 00:53:08 +02:00 committed by GitHub
parent e6539290d4
commit 4170d2adbc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
31 changed files with 1204 additions and 468 deletions

View file

@ -0,0 +1,15 @@
{
"distSpecVersion": "1.0.1-dev",
"storage": {
"rootDirectory": "/tmp/zot",
"dedupe": true,
"remoteCache": false
},
"http": {
"address": "127.0.0.1",
"port": "8080"
},
"log": {
"level": "debug"
}
}

View file

@ -22,8 +22,9 @@ var (
type StorageConfig struct {
RootDirectory string
GC bool
Dedupe bool
RemoteCache bool
GC bool
Commit bool
GCDelay time.Duration
GCInterval time.Duration
@ -95,13 +96,7 @@ type LogConfig struct {
}
type GlobalStorageConfig struct {
Dedupe bool
GC bool
Commit bool
GCDelay time.Duration
GCInterval time.Duration
RootDirectory string
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
StorageConfig `mapstructure:",squash"`
SubPaths map[string]StorageConfig
}
@ -143,9 +138,11 @@ func New() *Config {
Commit: Commit,
ReleaseTag: ReleaseTag,
BinaryType: BinaryType,
Storage: GlobalStorageConfig{GC: true, GCDelay: storage.DefaultGCDelay, Dedupe: true},
HTTP: HTTPConfig{Address: "127.0.0.1", Port: "8080", Auth: &AuthConfig{FailDelay: 0}},
Log: &LogConfig{Level: "debug"},
Storage: GlobalStorageConfig{
StorageConfig: StorageConfig{GC: true, GCDelay: storage.DefaultGCDelay, Dedupe: true},
},
HTTP: HTTPConfig{Address: "127.0.0.1", Port: "8080", Auth: &AuthConfig{FailDelay: 0}},
Log: &LogConfig{Level: "debug"},
}
}

View file

@ -26,6 +26,8 @@ import (
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/scheduler"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/storage/local"
"zotregistry.io/zot/pkg/storage/s3"
)
@ -270,6 +272,7 @@ func (c *Controller) InitImageStore(reloadCtx context.Context) error {
defaultStore = local.NewImageStore(c.Config.Storage.RootDirectory,
c.Config.Storage.GC, c.Config.Storage.GCDelay,
c.Config.Storage.Dedupe, c.Config.Storage.Commit, c.Log, c.Metrics, linter,
CreateCacheDatabaseDriver(c.Config.Storage.StorageConfig, c.Log),
)
} else {
storeName := fmt.Sprintf("%v", c.Config.Storage.StorageDriver["name"])
@ -296,7 +299,8 @@ func (c *Controller) InitImageStore(reloadCtx context.Context) error {
//nolint: typecheck
defaultStore = s3.NewImageStore(rootDir, c.Config.Storage.RootDirectory,
c.Config.Storage.GC, c.Config.Storage.GCDelay, c.Config.Storage.Dedupe,
c.Config.Storage.Commit, c.Log, c.Metrics, linter, store)
c.Config.Storage.Commit, c.Log, c.Metrics, linter, store,
CreateCacheDatabaseDriver(c.Config.Storage.StorageConfig, c.Log))
}
c.StoreController.DefaultStore = defaultStore
@ -374,7 +378,8 @@ func (c *Controller) getSubStore(subPaths map[string]config.StorageConfig,
// Create a new image store and assign it to imgStoreMap
if isUnique {
imgStoreMap[storageConfig.RootDirectory] = local.NewImageStore(storageConfig.RootDirectory,
storageConfig.GC, storageConfig.GCDelay, storageConfig.Dedupe, storageConfig.Commit, c.Log, c.Metrics, linter)
storageConfig.GC, storageConfig.GCDelay, storageConfig.Dedupe,
storageConfig.Commit, c.Log, c.Metrics, linter, CreateCacheDatabaseDriver(storageConfig, c.Log))
subImageStore[route] = imgStoreMap[storageConfig.RootDirectory]
}
@ -404,6 +409,7 @@ func (c *Controller) getSubStore(subPaths map[string]config.StorageConfig,
subImageStore[route] = s3.NewImageStore(rootDir, storageConfig.RootDirectory,
storageConfig.GC, storageConfig.GCDelay,
storageConfig.Dedupe, storageConfig.Commit, c.Log, c.Metrics, linter, store,
CreateCacheDatabaseDriver(storageConfig, c.Log),
)
}
}
@ -421,6 +427,29 @@ func compareImageStore(root1, root2 string) bool {
return isSameFile
}
func getUseRelPaths(storageConfig *config.StorageConfig) bool {
return storageConfig.StorageDriver == nil
}
func CreateCacheDatabaseDriver(storageConfig config.StorageConfig, log log.Logger) cache.Cache {
if storageConfig.Dedupe {
if !storageConfig.RemoteCache {
params := cache.BoltDBDriverParameters{}
params.RootDir = storageConfig.RootDirectory
params.Name = storageConstants.BoltdbName
params.UseRelPaths = getUseRelPaths(&storageConfig)
driver, _ := storage.Create("boltdb", params, log)
return driver
}
// used for tests, dynamodb when it comes
return nil
}
return nil
}
func (c *Controller) LoadNewConfig(reloadCtx context.Context, config *config.Config) {
// reload access control config
c.Config.AccessControl = config.AccessControl

View file

@ -49,6 +49,7 @@ import (
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/api/constants"
extconf "zotregistry.io/zot/pkg/extensions/config"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/local"
"zotregistry.io/zot/pkg/test"
@ -106,6 +107,33 @@ func TestNew(t *testing.T) {
})
}
func TestCreateCacheDatabaseDriver(t *testing.T) {
Convey("Test CreateCacheDatabaseDriver", t, func() {
log := log.NewLogger("debug", "")
// fail create db, no perm
dir := t.TempDir()
conf := config.New()
conf.Storage.RootDirectory = dir
conf.Storage.Dedupe = true
conf.Storage.RemoteCache = false
err := os.Chmod(dir, 0o000)
if err != nil {
panic(err)
}
driver := api.CreateCacheDatabaseDriver(conf.Storage.StorageConfig, log)
So(driver, ShouldBeNil)
conf.Storage.RemoteCache = true
conf.Storage.RootDirectory = t.TempDir()
driver = api.CreateCacheDatabaseDriver(conf.Storage.StorageConfig, log)
So(driver, ShouldBeNil)
})
}
func TestRunAlreadyRunningServer(t *testing.T) {
Convey("Run server on unavailable port", t, func() {
port := test.GetFreePort()
@ -3180,6 +3208,7 @@ func TestCrossRepoMount(t *testing.T) {
panic(err)
}
ctlr.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.RemoteCache = false
go startServer(ctlr)
defer stopServer(ctlr)
@ -5746,6 +5775,7 @@ func TestInjectTooManyOpenFiles(t *testing.T) {
ctlr := api.NewController(conf)
dir := t.TempDir()
ctlr.Config.Storage.RootDirectory = dir
conf.Storage.RemoteCache = false
go startServer(ctlr)
defer stopServer(ctlr)
@ -5981,6 +6011,7 @@ func TestPeriodicGC(t *testing.T) {
baseURL := test.GetBaseURL(port)
conf := config.New()
conf.HTTP.Port = port
conf.Storage.RemoteCache = false
logFile, err := os.CreateTemp("", "zot-log*.txt")
So(err, ShouldBeNil)
@ -6032,7 +6063,7 @@ func TestPeriodicGC(t *testing.T) {
subPaths := make(map[string]config.StorageConfig)
subPaths["/a"] = config.StorageConfig{RootDirectory: subDir, GC: true, GCDelay: 1 * time.Second, GCInterval: 24 * time.Hour} //nolint:lll // gofumpt conflicts with lll
subPaths["/a"] = config.StorageConfig{RootDirectory: subDir, GC: true, GCDelay: 1 * time.Second, GCInterval: 24 * time.Hour, RemoteCache: false} //nolint:lll // gofumpt conflicts with lll
ctlr.Config.Storage.SubPaths = subPaths
ctlr.Config.Storage.RootDirectory = dir
@ -6045,10 +6076,10 @@ func TestPeriodicGC(t *testing.T) {
So(err, ShouldBeNil)
// periodic GC is not enabled for default store
So(string(data), ShouldContainSubstring,
"\"GCDelay\":3600000000000,\"GCInterval\":0,\"RootDirectory\":\""+dir+"\"")
"\"GCDelay\":3600000000000,\"GCInterval\":0,\"")
// periodic GC is enabled for sub store
So(string(data), ShouldContainSubstring,
fmt.Sprintf("\"SubPaths\":{\"/a\":{\"RootDirectory\":\"%s\",\"GC\":true,\"Dedupe\":false,\"Commit\":false,\"GCDelay\":1000000000,\"GCInterval\":86400000000000", subDir)) //nolint:lll // gofumpt conflicts with lll
fmt.Sprintf("\"SubPaths\":{\"/a\":{\"RootDirectory\":\"%s\",\"Dedupe\":false,\"RemoteCache\":false,\"GC\":true,\"Commit\":false,\"GCDelay\":1000000000,\"GCInterval\":86400000000000", subDir)) //nolint:lll // gofumpt conflicts with lll
})
}

View file

@ -5,6 +5,8 @@ import (
"fmt"
"net"
"net/http"
"os"
"path"
"strconv"
"strings"
"time"
@ -23,6 +25,8 @@ import (
extconf "zotregistry.io/zot/pkg/extensions/config"
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/storage"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/storage/s3"
)
// metadataConfig reports metadata after parsing, which we use to track
@ -324,6 +328,7 @@ func validateAuthzPolicies(config *config.Config) error {
return nil
}
//nolint:gocyclo
func applyDefaultValues(config *config.Config, viperInstance *viper.Viper) {
defaultVal := true
@ -401,6 +406,51 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper) {
if !config.Storage.GC && viperInstance.Get("storage::gcdelay") == nil {
config.Storage.GCDelay = 0
}
// cache
// global storage
// if dedupe is true but remoteCache bool not set in config file
// for cloud based storage, remoteCache defaults to true
if config.Storage.Dedupe && !viperInstance.IsSet("storage::remotecache") && config.Storage.StorageDriver != nil {
config.Storage.RemoteCache = true
}
// s3 dedup=false, check for previous dedup usage and set to true if cachedb found
if !config.Storage.Dedupe && config.Storage.StorageDriver != nil {
cacheDir, _ := config.Storage.StorageDriver["rootdirectory"].(string)
cachePath := path.Join(cacheDir, s3.CacheDBName+storageConstants.DBExtensionName)
if _, err := os.Stat(cachePath); err == nil {
log.Info().Msg("Config: dedupe set to false for s3 driver but used to be true.")
log.Info().Str("cache path", cachePath).Msg("found cache database")
config.Storage.RemoteCache = false
}
}
// subpaths
for name, storageConfig := range config.Storage.SubPaths {
// if dedupe is true but remoteCache bool not set in config file
// for cloud based storage, remoteCache defaults to true
if storageConfig.Dedupe && !viperInstance.IsSet("storage::subpaths::"+name+"::remotecache") && storageConfig.StorageDriver != nil { //nolint:lll
storageConfig.RemoteCache = true
}
// s3 dedup=false, check for previous dedup usage and set to true if cachedb found
if !storageConfig.Dedupe && storageConfig.StorageDriver != nil {
subpathCacheDir, _ := storageConfig.StorageDriver["rootdirectory"].(string)
subpathCachePath := path.Join(subpathCacheDir, s3.CacheDBName+storageConstants.DBExtensionName)
if _, err := os.Stat(subpathCachePath); err == nil {
log.Info().Msg("Config: dedupe set to false for s3 driver but used to be true. ")
log.Info().Str("cache path", subpathCachePath).Msg("found cache database")
storageConfig.RemoteCache = false
}
}
}
}
func updateDistSpecVersion(config *config.Config) {

View file

@ -16,6 +16,8 @@ import (
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/cli"
"zotregistry.io/zot/pkg/storage"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/storage/s3"
. "zotregistry.io/zot/pkg/test"
)
@ -110,6 +112,112 @@ func TestVerify(t *testing.T) {
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
})
Convey("Test cached db config", t, func(c C) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
// dedup true, can't parse database type
content := []byte(`{"storage":{"rootDirectory":"/tmp/zot", "dedupe": true,
"cache": {"type": 123}},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}}}`)
err = os.WriteFile(tmpfile.Name(), content, 0o0600)
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
// dedup true, wrong database type
content = []byte(`{"storage":{"rootDirectory":"/tmp/zot", "dedupe": true,
"cache": {"type": "wrong"}},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}}}`)
err = os.WriteFile(tmpfile.Name(), content, 0o0600)
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
// SubPaths
// dedup true, wrong database type
content = []byte(`{"storage":{"rootDirectory":"/tmp/zot", "dedupe": false,
"subPaths": {"/a": {"rootDirectory": "/zot-a", "dedupe": true,
"cache": {"type": "wrong"}}}},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}}}`)
err = os.WriteFile(tmpfile.Name(), content, 0o0600)
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
// dedup true, can't parse database type
content = []byte(`{"storage":{"rootDirectory":"/tmp/zot", "dedupe": false,
"subPaths": {"/a": {"rootDirectory": "/zot-a", "dedupe": true,
"cache": {"type": 123}}}},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}}}`)
err = os.WriteFile(tmpfile.Name(), content, 0o0600)
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
})
Convey("Test apply defaults cache db", t, func(c C) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(tmpfile.Name()) // clean up
// s3 dedup=false, check for previous dedup usage and set to true if cachedb found
cacheDir := t.TempDir()
existingDBPath := path.Join(cacheDir, s3.CacheDBName+storageConstants.DBExtensionName)
_, err = os.Create(existingDBPath)
So(err, ShouldBeNil)
content := []byte(`{"storage":{"rootDirectory":"/tmp/zot", "dedupe": false,
"storageDriver": {"rootDirectory": "` + cacheDir + `"}},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}}}`)
err = os.WriteFile(tmpfile.Name(), content, 0o0600)
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
// subpath s3 dedup=false, check for previous dedup usage and set to true if cachedb found
cacheDir = t.TempDir()
existingDBPath = path.Join(cacheDir, s3.CacheDBName+storageConstants.DBExtensionName)
_, err = os.Create(existingDBPath)
So(err, ShouldBeNil)
content = []byte(`{"storage":{"rootDirectory":"/tmp/zot", "dedupe": true,
"subpaths": {"/a": {"rootDirectory":"/tmp/zot1", "dedupe": false,
"storageDriver": {"rootDirectory": "` + cacheDir + `"}}}},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}}}`)
err = os.WriteFile(tmpfile.Name(), content, 0o0600)
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
// subpath s3 dedup=false, check for previous dedup usage and set to true if cachedb found
cacheDir = t.TempDir()
content = []byte(`{"storage":{"rootDirectory":"/tmp/zot", "dedupe": true,
"subpaths": {"/a": {"rootDirectory":"/tmp/zot1", "dedupe": true,
"storageDriver": {"rootDirectory": "` + cacheDir + `"}}}},
"http":{"address":"127.0.0.1","port":"8080","realm":"zot",
"auth":{"htpasswd":{"path":"test/data/htpasswd"},"failDelay":1}}}`)
err = os.WriteFile(tmpfile.Name(), content, 0o0600)
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "verify", tmpfile.Name()}
So(func() { _ = cli.NewServerRootCmd().Execute() }, ShouldPanic)
})
Convey("Test verify storage driver different than s3", t, func(c C) {
tmpfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)

View file

@ -495,7 +495,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
indexContent, err := imgStore.GetIndexContent("zot-test")
So(err, ShouldBeNil)
@ -528,7 +528,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
indexContent, err := imgStore.GetIndexContent("zot-test")
So(err, ShouldBeNil)
@ -599,7 +599,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
So(err, ShouldBeNil)
@ -662,7 +662,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
So(err, ShouldBeNil)
@ -727,7 +727,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
pass, err := linter.CheckMandatoryAnnotations("zot-test", digest, imgStore)
So(err, ShouldBeNil)
@ -791,7 +791,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
err = os.Chmod(path.Join(dir, "zot-test", "blobs"), 0o000)
if err != nil {
@ -890,7 +890,7 @@ func TestVerifyMandatoryAnnotationsFunction(t *testing.T) {
linter := lint.NewLinter(lintConfig, log.NewLogger("debug", ""))
imgStore := local.NewImageStore(dir, false, 0, false, false,
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter)
log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), linter, nil)
err = os.Chmod(path.Join(dir, "zot-test", "blobs", "sha256", manifest.Config.Digest.Encoded()), 0o000)
if err != nil {

View file

@ -21,6 +21,8 @@ import (
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/extensions/scrub"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
"zotregistry.io/zot/pkg/storage/local"
"zotregistry.io/zot/pkg/test"
)
@ -239,8 +241,13 @@ func TestRunScrubRepo(t *testing.T) {
dir := t.TempDir()
log := log.NewLogger("debug", logFile.Name())
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second, true,
true, log, metrics, nil)
true, log, metrics, nil, cacheDriver)
err = test.CopyFiles("../../../test/data/zot-test", path.Join(dir, repoName))
if err != nil {
@ -269,8 +276,13 @@ func TestRunScrubRepo(t *testing.T) {
dir := t.TempDir()
log := log.NewLogger("debug", logFile.Name())
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second, true,
true, log, metrics, nil)
true, log, metrics, nil, cacheDriver)
err = test.CopyFiles("../../../test/data/zot-test", path.Join(dir, repoName))
if err != nil {
@ -305,8 +317,14 @@ func TestRunScrubRepo(t *testing.T) {
dir := t.TempDir()
log := log.NewLogger("debug", logFile.Name())
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver,
)
err = test.CopyFiles("../../../test/data/zot-test", path.Join(dir, repoName))
if err != nil {

View file

@ -1029,10 +1029,10 @@ func TestUtilsMethod(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log)
defaultStore := local.NewImageStore(rootDir, false,
storage.DefaultGCDelay, false, false, log, metrics, nil)
storage.DefaultGCDelay, false, false, log, metrics, nil, nil)
subStore := local.NewImageStore(subRootDir, false,
storage.DefaultGCDelay, false, false, log, metrics, nil)
storage.DefaultGCDelay, false, false, log, metrics, nil, nil)
subStoreMap := make(map[string]storage.ImageStore)

View file

@ -88,7 +88,7 @@ func testSetup() error {
conf.Extensions = &extconf.ExtensionConfig{}
conf.Extensions.Lint = &extconf.LintConfig{}
storeController := storage.StoreController{DefaultStore: local.NewImageStore(dir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)}
storeController := storage.StoreController{DefaultStore: local.NewImageStore(dir, false, storage.DefaultGCDelay, false, false, log, metrics, nil, nil)}
layoutUtils := common.NewBaseOciLayoutUtils(storeController, log)
scanner := trivy.NewScanner(storeController, layoutUtils, log)
@ -332,7 +332,7 @@ func TestImageFormat(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log)
defaultStore := local.NewImageStore(dbDir, false, storage.DefaultGCDelay,
false, false, log, metrics, nil)
false, false, log, metrics, nil, nil)
storeController := storage.StoreController{DefaultStore: defaultStore}
cveInfo := cveinfo.NewCVEInfo(storeController, log)

View file

@ -66,11 +66,11 @@ func TestMultipleStoragePath(t *testing.T) {
conf.Extensions.Lint = &extconf.LintConfig{}
// Create ImageStore
firstStore := local.NewImageStore(firstRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)
firstStore := local.NewImageStore(firstRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil, nil)
secondStore := local.NewImageStore(secondRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)
secondStore := local.NewImageStore(secondRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil, nil)
thirdStore := local.NewImageStore(thirdRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil)
thirdStore := local.NewImageStore(thirdRootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil, nil)
storeController := storage.StoreController{}

View file

@ -85,7 +85,7 @@ func testSetup(t *testing.T) (string, string, *digestinfo.DigestInfo) {
log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
storeController := storage.StoreController{
DefaultStore: local.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil),
DefaultStore: local.NewImageStore(rootDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil, nil),
}
digestInfo := digestinfo.NewDigestInfo(storeController, log)

View file

@ -266,7 +266,7 @@ func TestUserAvailableRepos(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
dir := t.TempDir()
metrics := monitoring.NewMetricsServer(false, log)
defaultStore := local.NewImageStore(dir, false, 0, false, false, log, metrics, nil)
defaultStore := local.NewImageStore(dir, false, 0, false, false, log, metrics, nil, nil)
repoList, err := defaultStore.GetRepositories()
So(err, ShouldBeNil)

View file

@ -65,7 +65,7 @@ func TestInjectSyncUtils(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
imageStore := local.NewImageStore(t.TempDir(), false, storage.DefaultGCDelay,
false, false, log, metrics, nil,
false, false, log, metrics, nil, nil,
)
injected = test.InjectFailure(0)
@ -164,7 +164,7 @@ func TestSyncInternal(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log)
imageStore := local.NewImageStore(t.TempDir(), false, storage.DefaultGCDelay,
false, false, log, metrics, nil)
false, false, log, metrics, nil, nil)
err := os.Chmod(imageStore.RootDir(), 0o000)
So(err, ShouldBeNil)
@ -346,7 +346,7 @@ func TestSyncInternal(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log)
imageStore := local.NewImageStore(storageDir, false, storage.DefaultGCDelay,
false, false, log, metrics, nil)
false, false, log, metrics, nil, nil)
refs := ReferenceList{[]artifactspec.Descriptor{
{
@ -439,7 +439,7 @@ func TestSyncInternal(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log)
imageStore := local.NewImageStore(storageDir, false, storage.DefaultGCDelay,
false, false, log, metrics, nil)
false, false, log, metrics, nil, nil)
storeController := storage.StoreController{}
storeController.DefaultStore = imageStore
@ -461,7 +461,7 @@ func TestSyncInternal(t *testing.T) {
}
testImageStore := local.NewImageStore(testRootDir, false,
storage.DefaultGCDelay, false, false, log, metrics, nil)
storage.DefaultGCDelay, false, false, log, metrics, nil, nil)
manifestContent, _, _, err := testImageStore.GetImageManifest(testImage, testImageTag)
So(err, ShouldBeNil)
@ -538,7 +538,7 @@ func TestSyncInternal(t *testing.T) {
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storage.ImageStore) (bool, error) {
return false, nil
},
},
}, nil,
)
err = pushSyncedLocalImage(repo, "latest", testRootDir, imageStoreWithLinter, log)

View file

@ -336,7 +336,7 @@ func pushSyncedLocalImage(localRepo, reference, localCachePath string,
metrics := monitoring.NewMetricsServer(false, log)
cacheImageStore := local.NewImageStore(localCachePath, false,
storage.DefaultGCDelay, false, false, log, metrics, nil)
storage.DefaultGCDelay, false, false, log, metrics, nil, nil)
manifestContent, _, mediaType, err := cacheImageStore.GetImageManifest(localRepo, reference)
if err != nil {

View file

@ -3,3 +3,7 @@
1. **local** - a locally mounted filesystem
2. **remote** - a remote filesystem such as AWS S3
The cache database can be configured independently of storage. Right now, `zot` supports the following database implementations:
1. **BoltDB** - local storage. Set the "cloudCache" field in the config file to false. Example: examples/config-boltdb.json

View file

@ -1,301 +1,20 @@
package storage
import (
"path"
"path/filepath"
"strings"
"time"
godigest "github.com/opencontainers/go-digest"
"go.etcd.io/bbolt"
"zotregistry.io/zot/errors"
zlog "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage/cache"
)
const (
// global bucket.
BlobsCache = "blobs"
// bucket where we store all blobs from storage(deduped blobs + original blob).
DuplicatesBucket = "duplicates"
/* bucket where we store only the original/source blob (used by s3 to know which is the blob with content)
it should contain only one blob, this is the only place from which we'll get blobs. */
OriginalBucket = "original"
DBExtensionName = ".db"
dbCacheLockCheckTimeout = 10 * time.Second
)
type Cache struct {
rootDir string
db *bbolt.DB
log zlog.Logger
useRelPaths bool // weather or not to use relative paths, should be true for filesystem and false for s3
}
// Blob is a blob record.
type Blob struct {
Path string
}
func NewCache(rootDir string, name string, useRelPaths bool, log zlog.Logger) *Cache {
dbPath := path.Join(rootDir, name+DBExtensionName)
dbOpts := &bbolt.Options{
Timeout: dbCacheLockCheckTimeout,
FreelistType: bbolt.FreelistArrayType,
}
cacheDB, err := bbolt.Open(dbPath, 0o600, dbOpts) //nolint:gomnd
if err != nil {
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create cache db")
return nil
}
if err := cacheDB.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucketIfNotExists([]byte(BlobsCache)); err != nil {
// this is a serious failure
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create a root bucket")
return err
}
return nil
}); err != nil {
// something went wrong
log.Error().Err(err).Msg("unable to create a cache")
return nil
}
return &Cache{rootDir: rootDir, db: cacheDB, useRelPaths: useRelPaths, log: log}
}
func (c *Cache) PutBlob(digest godigest.Digest, path string) error {
if path == "" {
c.log.Error().Err(errors.ErrEmptyValue).Str("digest", digest.String()).Msg("empty path provided")
return errors.ErrEmptyValue
}
// use only relative (to rootDir) paths on blobs
var err error
if c.useRelPaths {
path, err = filepath.Rel(c.rootDir, path)
if err != nil {
c.log.Error().Err(err).Str("path", path).Msg("unable to get relative path")
}
}
if err := c.db.Update(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
bucket, err := root.CreateBucketIfNotExists([]byte(digest.String()))
if err != nil {
// this is a serious failure
c.log.Error().Err(err).Str("bucket", digest.String()).Msg("unable to create a bucket")
return err
}
// create nested deduped bucket where we store all the deduped blobs + original blob
deduped, err := bucket.CreateBucketIfNotExists([]byte(DuplicatesBucket))
if err != nil {
// this is a serious failure
c.log.Error().Err(err).Str("bucket", DuplicatesBucket).Msg("unable to create a bucket")
return err
}
if err := deduped.Put([]byte(path), nil); err != nil {
c.log.Error().Err(err).Str("bucket", DuplicatesBucket).Str("value", path).Msg("unable to put record")
return err
}
// create origin bucket and insert only the original blob
origin := bucket.Bucket([]byte(OriginalBucket))
if origin == nil {
// if the bucket doesn't exist yet then 'path' is the original blob
origin, err := bucket.CreateBucket([]byte(OriginalBucket))
if err != nil {
// this is a serious failure
c.log.Error().Err(err).Str("bucket", OriginalBucket).Msg("unable to create a bucket")
return err
}
if err := origin.Put([]byte(path), nil); err != nil {
c.log.Error().Err(err).Str("bucket", OriginalBucket).Str("value", path).Msg("unable to put record")
return err
}
}
return nil
}); err != nil {
return err
}
return nil
}
func (c *Cache) GetBlob(digest godigest.Digest) (string, error) {
var blobPath strings.Builder
if err := c.db.View(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
bucket := root.Bucket([]byte(digest.String()))
if bucket != nil {
origin := bucket.Bucket([]byte(OriginalBucket))
blobPath.WriteString(string(c.getOne(origin)))
return nil
}
return errors.ErrCacheMiss
}); err != nil {
return "", err
}
return blobPath.String(), nil
}
func (c *Cache) HasBlob(digest godigest.Digest, blob string) bool {
if err := c.db.View(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
bucket := root.Bucket([]byte(digest.String()))
if bucket == nil {
return errors.ErrCacheMiss
}
origin := bucket.Bucket([]byte(OriginalBucket))
if origin == nil {
return errors.ErrCacheMiss
}
if origin.Get([]byte(blob)) == nil {
return errors.ErrCacheMiss
}
return nil
}); err != nil {
return false
}
return true
}
func (c *Cache) getOne(bucket *bbolt.Bucket) []byte {
if bucket != nil {
cursor := bucket.Cursor()
k, _ := cursor.First()
return k
}
return nil
}
func (c *Cache) DeleteBlob(digest godigest.Digest, path string) error {
// use only relative (to rootDir) paths on blobs
var err error
if c.useRelPaths {
path, err = filepath.Rel(c.rootDir, path)
if err != nil {
c.log.Error().Err(err).Str("path", path).Msg("unable to get relative path")
}
}
if err := c.db.Update(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
bucket := root.Bucket([]byte(digest.String()))
if bucket == nil {
return errors.ErrCacheMiss
}
deduped := bucket.Bucket([]byte(DuplicatesBucket))
if deduped == nil {
return errors.ErrCacheMiss
}
if err := deduped.Delete([]byte(path)); err != nil {
c.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", DuplicatesBucket).
Str("path", path).Msg("unable to delete")
return err
}
origin := bucket.Bucket([]byte(OriginalBucket))
if origin != nil {
originBlob := c.getOne(origin)
if originBlob != nil {
if err := origin.Delete([]byte(path)); err != nil {
c.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", OriginalBucket).
Str("path", path).Msg("unable to delete")
return err
}
// move next candidate to origin bucket, next GetKey will return this one and storage will move the content here
dedupedBlob := c.getOne(deduped)
if dedupedBlob != nil {
if err := origin.Put(dedupedBlob, nil); err != nil {
c.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", OriginalBucket).Str("path", path).
Msg("unable to put")
return err
}
}
}
}
// if no key in origin bucket then digest bucket is empty, remove it
k := c.getOne(origin)
if k == nil {
c.log.Debug().Str("digest", digest.String()).Str("path", path).Msg("deleting empty bucket")
if err := root.DeleteBucket([]byte(digest.String())); err != nil {
c.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", digest.String()).Str("path", path).
Msg("unable to delete")
return err
}
}
return nil
}); err != nil {
return err
}
return nil
func Create(dbtype string, parameters interface{}, log zlog.Logger) (cache.Cache, error) {
switch dbtype {
case "boltdb":
{
return cache.NewBoltDBCache(parameters, log), nil
}
default:
{
return nil, errors.ErrBadConfig
}
}
}

310
pkg/storage/cache/boltdb.go vendored Normal file
View file

@ -0,0 +1,310 @@
package cache
import (
"os"
"path"
"path/filepath"
"strings"
godigest "github.com/opencontainers/go-digest"
"go.etcd.io/bbolt"
"zotregistry.io/zot/errors"
zlog "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage/constants"
)
type BoltDBDriver struct {
rootDir string
db *bbolt.DB
log zlog.Logger
useRelPaths bool // whether or not to use relative paths, should be true for filesystem and false for s3
}
type BoltDBDriverParameters struct {
RootDir, Name string
UseRelPaths bool
}
func NewBoltDBCache(parameters interface{}, log zlog.Logger) Cache {
properParameters, ok := parameters.(BoltDBDriverParameters)
if !ok {
panic("Failed type assertion")
}
return NewCache(properParameters, log)
}
func NewCache(parameters BoltDBDriverParameters, log zlog.Logger) *BoltDBDriver {
err := os.MkdirAll(parameters.RootDir, constants.DefaultDirPerms)
if err != nil {
log.Error().Err(err).Msgf("unable to create directory for cache db: %v", parameters.RootDir)
return nil
}
dbPath := path.Join(parameters.RootDir, parameters.Name+constants.DBExtensionName)
dbOpts := &bbolt.Options{
Timeout: constants.DBCacheLockCheckTimeout,
FreelistType: bbolt.FreelistArrayType,
}
cacheDB, err := bbolt.Open(dbPath, 0o600, dbOpts) //nolint:gomnd
if err != nil {
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create cache db")
return nil
}
if err := cacheDB.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucketIfNotExists([]byte(constants.BlobsCache)); err != nil {
// this is a serious failure
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create a root bucket")
return err
}
return nil
}); err != nil {
// something went wrong
log.Error().Err(err).Msg("unable to create a cache")
return nil
}
return &BoltDBDriver{rootDir: parameters.RootDir, db: cacheDB, useRelPaths: parameters.UseRelPaths, log: log}
}
func (d *BoltDBDriver) Name() string {
return "boltdb"
}
func (d *BoltDBDriver) PutBlob(digest godigest.Digest, path string) error {
if path == "" {
d.log.Error().Err(errors.ErrEmptyValue).Str("digest", digest.String()).Msg("empty path provided")
return errors.ErrEmptyValue
}
// use only relative (to rootDir) paths on blobs
var err error
if d.useRelPaths {
path, err = filepath.Rel(d.rootDir, path)
if err != nil {
d.log.Error().Err(err).Str("path", path).Msg("unable to get relative path")
}
}
if err := d.db.Update(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(constants.BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
d.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
bucket, err := root.CreateBucketIfNotExists([]byte(digest.String()))
if err != nil {
// this is a serious failure
d.log.Error().Err(err).Str("bucket", digest.String()).Msg("unable to create a bucket")
return err
}
// create nested deduped bucket where we store all the deduped blobs + original blob
deduped, err := bucket.CreateBucketIfNotExists([]byte(constants.DuplicatesBucket))
if err != nil {
// this is a serious failure
d.log.Error().Err(err).Str("bucket", constants.DuplicatesBucket).Msg("unable to create a bucket")
return err
}
if err := deduped.Put([]byte(path), nil); err != nil {
d.log.Error().Err(err).Str("bucket", constants.DuplicatesBucket).Str("value", path).Msg("unable to put record")
return err
}
// create origin bucket and insert only the original blob
origin := bucket.Bucket([]byte(constants.OriginalBucket))
if origin == nil {
// if the bucket doesn't exist yet then 'path' is the original blob
origin, err := bucket.CreateBucket([]byte(constants.OriginalBucket))
if err != nil {
// this is a serious failure
d.log.Error().Err(err).Str("bucket", constants.OriginalBucket).Msg("unable to create a bucket")
return err
}
if err := origin.Put([]byte(path), nil); err != nil {
d.log.Error().Err(err).Str("bucket", constants.OriginalBucket).Str("value", path).Msg("unable to put record")
return err
}
}
return nil
}); err != nil {
return err
}
return nil
}
func (d *BoltDBDriver) GetBlob(digest godigest.Digest) (string, error) {
var blobPath strings.Builder
if err := d.db.View(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(constants.BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
d.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
bucket := root.Bucket([]byte(digest.String()))
if bucket != nil {
origin := bucket.Bucket([]byte(constants.OriginalBucket))
blobPath.WriteString(string(d.getOne(origin)))
return nil
}
return errors.ErrCacheMiss
}); err != nil {
return "", err
}
return blobPath.String(), nil
}
func (d *BoltDBDriver) HasBlob(digest godigest.Digest, blob string) bool {
if err := d.db.View(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(constants.BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
d.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
bucket := root.Bucket([]byte(digest.String()))
if bucket == nil {
return errors.ErrCacheMiss
}
origin := bucket.Bucket([]byte(constants.OriginalBucket))
if origin == nil {
return errors.ErrCacheMiss
}
if origin.Get([]byte(blob)) == nil {
return errors.ErrCacheMiss
}
return nil
}); err != nil {
return false
}
return true
}
func (d *BoltDBDriver) getOne(bucket *bbolt.Bucket) []byte {
if bucket != nil {
cursor := bucket.Cursor()
k, _ := cursor.First()
return k
}
return nil
}
func (d *BoltDBDriver) DeleteBlob(digest godigest.Digest, path string) error {
// use only relative (to rootDir) paths on blobs
var err error
if d.useRelPaths {
path, err = filepath.Rel(d.rootDir, path)
if err != nil {
d.log.Error().Err(err).Str("path", path).Msg("unable to get relative path")
}
}
if err := d.db.Update(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(constants.BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
d.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
bucket := root.Bucket([]byte(digest.String()))
if bucket == nil {
return errors.ErrCacheMiss
}
deduped := bucket.Bucket([]byte(constants.DuplicatesBucket))
if deduped == nil {
return errors.ErrCacheMiss
}
if err := deduped.Delete([]byte(path)); err != nil {
d.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", constants.DuplicatesBucket).
Str("path", path).Msg("unable to delete")
return err
}
origin := bucket.Bucket([]byte(constants.OriginalBucket))
if origin != nil {
originBlob := d.getOne(origin)
if originBlob != nil {
if err := origin.Delete([]byte(path)); err != nil {
d.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", constants.OriginalBucket).
Str("path", path).Msg("unable to delete")
return err
}
// move next candidate to origin bucket, next GetKey will return this one and storage will move the content here
dedupedBlob := d.getOne(deduped)
if dedupedBlob != nil {
if err := origin.Put(dedupedBlob, nil); err != nil {
d.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", constants.OriginalBucket).Str("path", path).
Msg("unable to put")
return err
}
}
}
}
// if no key in origin bucket then digest bucket is empty, remove it
k := d.getOne(origin)
if k == nil {
d.log.Debug().Str("digest", digest.String()).Str("path", path).Msg("deleting empty bucket")
if err := root.DeleteBucket([]byte(digest)); err != nil {
d.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", digest.String()).Str("path", path).
Msg("unable to delete")
return err
}
}
return nil
}); err != nil {
return err
}
return nil
}

64
pkg/storage/cache/boltdb_test.go vendored Normal file
View file

@ -0,0 +1,64 @@
package cache_test
import (
"path"
"testing"
. "github.com/smartystreets/goconvey/convey"
"zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
)
func TestBoltDBCache(t *testing.T) {
Convey("Make a new cache", t, func() {
dir := t.TempDir()
log := log.NewLogger("debug", "")
So(log, ShouldNotBeNil)
So(func() { _, _ = storage.Create("boltdb", "failTypeAssertion", log) }, ShouldPanic)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{"/deadBEEF", "cache_test", true}, log)
So(cacheDriver, ShouldBeNil)
cacheDriver, _ = storage.Create("boltdb", cache.BoltDBDriverParameters{dir, "cache_test", true}, log)
So(cacheDriver, ShouldNotBeNil)
name := cacheDriver.Name()
So(name, ShouldEqual, "boltdb")
val, err := cacheDriver.GetBlob("key")
So(err, ShouldEqual, errors.ErrCacheMiss)
So(val, ShouldBeEmpty)
exists := cacheDriver.HasBlob("key", "value")
So(exists, ShouldBeFalse)
err = cacheDriver.PutBlob("key", path.Join(dir, "value"))
So(err, ShouldBeNil)
err = cacheDriver.PutBlob("key", "value")
So(err, ShouldNotBeNil)
exists = cacheDriver.HasBlob("key", "value")
So(exists, ShouldBeTrue)
val, err = cacheDriver.GetBlob("key")
So(err, ShouldBeNil)
So(val, ShouldNotBeEmpty)
err = cacheDriver.DeleteBlob("bogusKey", "bogusValue")
So(err, ShouldEqual, errors.ErrCacheMiss)
err = cacheDriver.DeleteBlob("key", "bogusValue")
So(err, ShouldBeNil)
// try to insert empty path
err = cacheDriver.PutBlob("key", "")
So(err, ShouldNotBeNil)
So(err, ShouldEqual, errors.ErrEmptyValue)
})
}

22
pkg/storage/cache/cacheinterface.go vendored Normal file
View file

@ -0,0 +1,22 @@
package cache
import (
godigest "github.com/opencontainers/go-digest"
)
type Cache interface {
// Returns the human-readable "name" of the driver.
Name() string
// Retrieves the blob matching provided digest.
GetBlob(digest godigest.Digest) (string, error)
// Uploads blob to cachedb.
PutBlob(digest godigest.Digest, path string) error
// Check if blob exists in cachedb.
HasBlob(digest godigest.Digest, path string) bool
// Delete a blob from the cachedb.
DeleteBlob(digest godigest.Digest, path string) error
}

View file

@ -9,6 +9,7 @@ import (
"zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
)
func TestCache(t *testing.T) {
@ -18,39 +19,53 @@ func TestCache(t *testing.T) {
log := log.NewLogger("debug", "")
So(log, ShouldNotBeNil)
So(storage.NewCache("/deadBEEF", "cache_test", true, log), ShouldBeNil)
So(func() { _, _ = storage.Create("boltdb", "failTypeAssertion", log) }, ShouldPanic)
cache := storage.NewCache(dir, "cache_test", true, log)
So(cache, ShouldNotBeNil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: "/deadBEEF",
Name: "cache_test",
UseRelPaths: true,
}, log)
So(cacheDriver, ShouldBeNil)
val, err := cache.GetBlob("key")
cacheDriver, _ = storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache_test",
UseRelPaths: true,
}, log)
So(cacheDriver, ShouldNotBeNil)
name := cacheDriver.Name()
So(name, ShouldEqual, "boltdb")
val, err := cacheDriver.GetBlob("key")
So(err, ShouldEqual, errors.ErrCacheMiss)
So(val, ShouldBeEmpty)
exists := cache.HasBlob("key", "value")
exists := cacheDriver.HasBlob("key", "value")
So(exists, ShouldBeFalse)
err = cache.PutBlob("key", path.Join(dir, "value"))
err = cacheDriver.PutBlob("key", path.Join(dir, "value"))
So(err, ShouldBeNil)
err = cache.PutBlob("key", "value")
err = cacheDriver.PutBlob("key", "value")
So(err, ShouldNotBeNil)
exists = cache.HasBlob("key", "value")
exists = cacheDriver.HasBlob("key", "value")
So(exists, ShouldBeTrue)
val, err = cache.GetBlob("key")
val, err = cacheDriver.GetBlob("key")
So(err, ShouldBeNil)
So(val, ShouldNotBeEmpty)
err = cache.DeleteBlob("bogusKey", "bogusValue")
err = cacheDriver.DeleteBlob("bogusKey", "bogusValue")
So(err, ShouldEqual, errors.ErrCacheMiss)
err = cache.DeleteBlob("key", "bogusValue")
err = cacheDriver.DeleteBlob("key", "bogusValue")
So(err, ShouldBeNil)
// try to insert empty path
err = cache.PutBlob("key", "")
err = cacheDriver.PutBlob("key", "")
So(err, ShouldNotBeNil)
So(err, ShouldEqual, errors.ErrEmptyValue)
})

View file

@ -13,14 +13,7 @@ import (
"github.com/sigstore/cosign/pkg/oci/remote"
zerr "zotregistry.io/zot/errors"
)
const (
// BlobUploadDir defines the upload directory for blob uploads.
BlobUploadDir = ".uploads"
SchemaVersion = 2
RLOCK = "RLock"
RWLOCK = "RWLock"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
)
func GetTagsByIndex(index ispec.Index) []string {
@ -101,7 +94,7 @@ func ValidateManifest(imgStore ImageStore, repo, reference, mediaType string, bo
func validateOCIManifest(imgStore ImageStore, repo, reference string, manifest *ispec.Manifest, //nolint:unparam
log zerolog.Logger,
) (godigest.Digest, error) {
if manifest.SchemaVersion != SchemaVersion {
if manifest.SchemaVersion != storageConstants.SchemaVersion {
log.Error().Int("SchemaVersion", manifest.SchemaVersion).Msg("invalid manifest")
return "", zerr.ErrBadManifest

View file

@ -14,6 +14,7 @@ import (
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
"zotregistry.io/zot/pkg/storage/local"
"zotregistry.io/zot/pkg/test"
)
@ -24,8 +25,13 @@ func TestValidateManifest(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
true, log, metrics, nil)
true, log, metrics, nil, cacheDriver)
content := []byte("this is a blob")
digest := godigest.FromBytes(content)

View file

@ -0,0 +1,21 @@
package constants
import (
"time"
)
const (
// BlobUploadDir defines the upload directory for blob uploads.
BlobUploadDir = ".uploads"
SchemaVersion = 2
DefaultFilePerms = 0o600
DefaultDirPerms = 0o700
RLOCK = "RLock"
RWLOCK = "RWLock"
BlobsCache = "blobs"
DuplicatesBucket = "duplicates"
OriginalBucket = "original"
DBExtensionName = ".db"
DBCacheLockCheckTimeout = 10 * time.Second
BoltdbName = "cache"
)

View file

@ -31,6 +31,8 @@ import (
zlog "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/scheduler"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/test"
)
@ -44,7 +46,7 @@ type ImageStoreLocal struct {
rootDir string
lock *sync.RWMutex
blobUploads map[string]storage.BlobUpload
cache *storage.Cache
cache cache.Cache
gc bool
dedupe bool
commit bool
@ -63,8 +65,9 @@ func (is *ImageStoreLocal) DirExists(d string) bool {
}
// NewImageStore returns a new image store backed by a file storage.
// Use the last argument to properly set a cache database, or it will default to boltDB local storage.
func NewImageStore(rootDir string, gc bool, gcDelay time.Duration, dedupe, commit bool,
log zlog.Logger, metrics monitoring.MetricServer, linter storage.Lint,
log zlog.Logger, metrics monitoring.MetricServer, linter storage.Lint, cacheDriver cache.Cache,
) storage.ImageStore {
if _, err := os.Stat(rootDir); os.IsNotExist(err) {
if err := os.MkdirAll(rootDir, DefaultDirPerms); err != nil {
@ -87,9 +90,7 @@ func NewImageStore(rootDir string, gc bool, gcDelay time.Duration, dedupe, commi
linter: linter,
}
if dedupe {
imgStore.cache = storage.NewCache(rootDir, "cache", true, log)
}
imgStore.cache = cacheDriver
if gc {
// we use umoci GC to perform garbage-collection, but it uses its own logger
@ -122,7 +123,7 @@ func (is *ImageStoreLocal) RUnlock(lockStart *time.Time) {
lockEnd := time.Now()
latency := lockEnd.Sub(*lockStart)
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storage.RLOCK) // histogram
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storageConstants.RLOCK) // histogram
}
// Lock write-lock.
@ -138,7 +139,7 @@ func (is *ImageStoreLocal) Unlock(lockStart *time.Time) {
lockEnd := time.Now()
latency := lockEnd.Sub(*lockStart)
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storage.RWLOCK) // histogram
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storageConstants.RWLOCK) // histogram
}
func (is *ImageStoreLocal) initRepo(name string) error {
@ -158,7 +159,7 @@ func (is *ImageStoreLocal) initRepo(name string) error {
return err
}
// create BlobUploadDir subdir
err = ensureDir(path.Join(repoDir, storage.BlobUploadDir), is.log)
err = ensureDir(path.Join(repoDir, storageConstants.BlobUploadDir), is.log)
if err != nil {
is.log.Error().Err(err).Msg("error creating blob upload subdir")
@ -249,7 +250,7 @@ func (is *ImageStoreLocal) ValidateRepo(name string) (bool, error) {
}
for k, v := range found {
if !v && k != storage.BlobUploadDir {
if !v && k != storageConstants.BlobUploadDir {
return false, nil
}
}
@ -618,7 +619,7 @@ func (is *ImageStoreLocal) DeleteImageManifest(repo, reference string) error {
// BlobUploadPath returns the upload path for a blob in this store.
func (is *ImageStoreLocal) BlobUploadPath(repo, uuid string) string {
dir := path.Join(is.rootDir, repo)
blobUploadPath := path.Join(dir, storage.BlobUploadDir, uuid)
blobUploadPath := path.Join(dir, storageConstants.BlobUploadDir, uuid)
return blobUploadPath
}
@ -836,7 +837,7 @@ func (is *ImageStoreLocal) FinishBlobUpload(repo, uuid string, body io.Reader, d
dst := is.BlobPath(repo, dstDigest)
if is.dedupe && is.cache != nil {
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
err = is.DedupeBlob(src, dstDigest, dst)
if err := test.Error(err); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
@ -917,7 +918,7 @@ func (is *ImageStoreLocal) FullBlobUpload(repo string, body io.Reader, dstDigest
_ = ensureDir(dir, is.log)
dst := is.BlobPath(repo, dstDigest)
if is.dedupe && is.cache != nil {
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
if err := is.DedupeBlob(src, dstDigest, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to dedupe blob")
@ -1046,7 +1047,7 @@ func (is *ImageStoreLocal) CheckBlob(repo string, digest godigest.Digest) (bool,
blobPath := is.BlobPath(repo, digest)
if is.dedupe && is.cache != nil {
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
is.Lock(&lockLatency)
defer is.Unlock(&lockLatency)
} else {
@ -1091,7 +1092,7 @@ func (is *ImageStoreLocal) checkCacheBlob(digest godigest.Digest) (string, error
return "", err
}
if !is.dedupe || is.cache == nil {
if !is.dedupe || fmt.Sprintf("%v", is.cache) == fmt.Sprintf("%v", nil) {
return "", zerr.ErrBlobNotFound
}
@ -1319,7 +1320,7 @@ func (is *ImageStoreLocal) DeleteBlob(repo string, digest godigest.Digest) error
return zerr.ErrBlobNotFound
}
if is.cache != nil {
if fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
if err := is.cache.DeleteBlob(digest, blobPath); err != nil {
is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", blobPath).
Msg("unable to remove blob path from cache")

View file

@ -19,6 +19,7 @@ import (
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
"zotregistry.io/zot/pkg/storage/local"
)
@ -28,7 +29,13 @@ func TestElevatedPrivilegesInvalidDedupe(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil, cacheDriver)
upload, err := imgStore.NewBlobUpload("dedupe1")
So(err, ShouldBeNil)

View file

@ -28,6 +28,7 @@ import (
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
"zotregistry.io/zot/pkg/storage/local"
"zotregistry.io/zot/pkg/test"
)
@ -42,8 +43,13 @@ func TestStorageFSAPIs(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
true, log, metrics, nil)
true, log, metrics, nil, cacheDriver)
Convey("Repo layout", t, func(c C) {
Convey("Bad image manifest", func() {
@ -174,7 +180,12 @@ func TestGetReferrers(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil, cacheDriver)
Convey("Get referrers", t, func(c C) {
err := test.CopyFiles("../../../test/data/zot-test", path.Join(dir, "zot-test"))
@ -224,7 +235,12 @@ func FuzzNewBlobUpload(f *testing.F) {
t.Logf("Input argument is %s", data)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil, cacheDriver)
_, err := imgStore.NewBlobUpload(data)
if err != nil {
@ -244,7 +260,12 @@ func FuzzPutBlobChunk(f *testing.F) {
t.Logf("Input argument is %s", data)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil, cacheDriver)
repoName := data
uuid, err := imgStore.NewBlobUpload(repoName)
@ -272,7 +293,12 @@ func FuzzPutBlobChunkStreamed(f *testing.F) {
t.Logf("Input argument is %s", data)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil, cacheDriver)
repoName := data
@ -299,7 +325,12 @@ func FuzzGetBlobUpload(f *testing.F) {
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil, cacheDriver)
_, err := imgStore.GetBlobUpload(data1, data2)
if err != nil {
@ -319,7 +350,12 @@ func FuzzTestPutGetImageManifest(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
cblob, cdigest := test.GetRandomImageConfig()
@ -365,7 +401,12 @@ func FuzzTestPutDeleteImageManifest(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
cblob, cdigest := test.GetRandomImageConfig()
@ -418,7 +459,12 @@ func FuzzTestDeleteImageManifest(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
digest, _, err := newRandomBlobForFuzz(data)
if err != nil {
@ -448,7 +494,12 @@ func FuzzInitRepo(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
err := imgStore.InitRepo(data)
if err != nil {
if isKnownErr(err) {
@ -467,7 +518,12 @@ func FuzzInitValidateRepo(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
err := imgStore.InitRepo(data)
if err != nil {
if isKnownErr(err) {
@ -493,7 +549,12 @@ func FuzzGetImageTags(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
_, err := imgStore.GetImageTags(data)
if err != nil {
if errors.Is(err, zerr.ErrRepoNotFound) || isKnownErr(err) {
@ -512,7 +573,12 @@ func FuzzBlobUploadPath(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
_ = imgStore.BlobUploadPath(repo, uuid)
})
@ -526,7 +592,12 @@ func FuzzBlobUploadInfo(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
repo := data
_, err := imgStore.BlobUploadInfo(repo, uuid)
@ -546,7 +617,12 @@ func FuzzTestGetImageManifest(f *testing.F) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil, cacheDriver)
repoName := data
@ -569,7 +645,12 @@ func FuzzFinishBlobUpload(f *testing.F) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil, cacheDriver)
repoName := data
@ -613,7 +694,12 @@ func FuzzFullBlobUpload(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
ldigest, lblob, err := newRandomBlobForFuzz(data)
if err != nil {
@ -638,7 +724,12 @@ func FuzzDedupeBlob(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
blobDigest := godigest.FromString(data)
@ -674,7 +765,12 @@ func FuzzDeleteBlobUpload(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
uuid, err := imgStore.NewBlobUpload(repoName)
if err != nil {
@ -700,7 +796,12 @@ func FuzzBlobPath(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
digest := godigest.FromString(data)
_ = imgStore.BlobPath(repoName, digest)
@ -716,7 +817,12 @@ func FuzzCheckBlob(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
digest := godigest.FromString(data)
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
@ -742,7 +848,12 @@ func FuzzGetBlob(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
digest := godigest.FromString(data)
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
@ -775,7 +886,12 @@ func FuzzDeleteBlob(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
digest := godigest.FromString(data)
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
@ -805,7 +921,12 @@ func FuzzGetIndexContent(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
digest := godigest.FromString(data)
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
@ -835,7 +956,12 @@ func FuzzGetBlobContent(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
digest := godigest.FromString(data)
_, _, err := imgStore.FullBlobUpload(repoName, bytes.NewReader([]byte(data)), digest)
@ -864,7 +990,12 @@ func FuzzGetReferrers(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
err := test.CopyFiles("../../../test/data/zot-test", path.Join(dir, "zot-test"))
if err != nil {
@ -919,7 +1050,12 @@ func FuzzRunGCRepo(f *testing.F) {
dir := t.TempDir()
defer os.RemoveAll(dir)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, *log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, *log, metrics, nil, cacheDriver)
if err := imgStore.RunGCRepo(data); err != nil {
t.Error(err)
@ -932,8 +1068,13 @@ func TestDedupeLinks(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
Convey("Dedupe", t, func(c C) {
// manifest1
@ -1104,7 +1245,12 @@ func TestDedupe(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
il := local.NewImageStore(dir, true, storage.DefaultGCDelay, true, true, log, metrics, nil, cacheDriver)
So(il.DedupeBlob("", "", ""), ShouldNotBeNil)
})
@ -1118,12 +1264,21 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
So(local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
true, log, metrics, nil), ShouldNotBeNil)
true, log, metrics, nil, cacheDriver), ShouldNotBeNil)
if os.Geteuid() != 0 {
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: "/deadBEEF",
Name: "cache",
UseRelPaths: true,
}, log)
So(local.NewImageStore("/deadBEEF", true, storage.DefaultGCDelay,
true, true, log, metrics, nil), ShouldBeNil)
true, true, log, metrics, nil, cacheDriver), ShouldBeNil)
}
})
@ -1132,8 +1287,13 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
err := os.Chmod(dir, 0o000) // remove all perms
if err != nil {
@ -1172,8 +1332,13 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
true, log, metrics, nil)
true, log, metrics, nil, cacheDriver)
So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil)
@ -1287,8 +1452,13 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil)
@ -1311,8 +1481,13 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
true, log, metrics, nil)
true, log, metrics, nil, cacheDriver)
So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil)
@ -1353,8 +1528,13 @@ func TestNegativeCases(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
So(imgStore, ShouldNotBeNil)
So(imgStore.InitRepo("test"), ShouldBeNil)
@ -1519,8 +1699,13 @@ func TestInjectWriteFile(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
Convey("Failure path1", func() {
injected := test.InjectFailure(0)
@ -1550,8 +1735,13 @@ func TestInjectWriteFile(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, false, log, metrics, nil)
true, false, log, metrics, nil, cacheDriver)
Convey("Failure path not reached", func() {
err := imgStore.InitRepo("repo1")
@ -1568,8 +1758,13 @@ func TestGarbageCollect(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log)
Convey("Garbage collect with default/long delay", func() {
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
repoName := "gc-long"
upload, err := imgStore.NewBlobUpload(repoName)
@ -1636,7 +1831,12 @@ func TestGarbageCollect(t *testing.T) {
})
Convey("Garbage collect with short delay", func() {
imgStore := local.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil, cacheDriver)
repoName := "gc-short"
// upload orphan blob
@ -1732,7 +1932,12 @@ func TestGarbageCollect(t *testing.T) {
Convey("Garbage collect with dedupe", func() {
// garbage-collect is repo-local and dedupe is global and they can interact in strange ways
imgStore := local.NewImageStore(dir, true, 5*time.Second, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, 5*time.Second, true, true, log, metrics, nil, cacheDriver)
// first upload an image to the first repo and wait for GC timeout
@ -1933,7 +2138,12 @@ func TestGarbageCollectForImageStore(t *testing.T) {
log := log.NewLogger("debug", logFile.Name())
metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil, cacheDriver)
repoName := "gc-all-repos-short"
err := test.CopyFiles("../../../test/data/zot-test", path.Join(dir, repoName))
@ -1966,7 +2176,12 @@ func TestGarbageCollectForImageStore(t *testing.T) {
log := log.NewLogger("debug", logFile.Name())
metrics := monitoring.NewMetricsServer(false, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, 1*time.Second, true, true, log, metrics, nil, cacheDriver)
repoName := "gc-all-repos-short"
err := test.CopyFiles("../../../test/data/zot-test", path.Join(dir, repoName))
@ -2012,8 +2227,13 @@ func TestInitRepo(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
err := os.Mkdir(path.Join(dir, "test-dir"), 0o000)
So(err, ShouldBeNil)
@ -2029,8 +2249,13 @@ func TestValidateRepo(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
err := os.Mkdir(path.Join(dir, "test-dir"), 0o000)
So(err, ShouldBeNil)
@ -2046,8 +2271,13 @@ func TestGetRepositoriesError(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil,
true, true, log, metrics, nil, cacheDriver,
)
// create valid directory with permissions
@ -2066,8 +2296,13 @@ func TestGetNextRepository(t *testing.T) {
dir := t.TempDir()
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil,
true, true, log, metrics, nil, cacheDriver,
)
firstRepoName := "repo1"
secondRepoName := "repo2"
@ -2103,8 +2338,13 @@ func TestPutBlobChunkStreamed(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
uuid, err := imgStore.NewBlobUpload("test")
So(err, ShouldBeNil)
@ -2127,8 +2367,13 @@ func TestPullRange(t *testing.T) {
metrics := monitoring.NewMetricsServer(false, log)
Convey("Negative cases", func() {
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
repoName := "pull-range"
upload, err := imgStore.NewBlobUpload(repoName)

View file

@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"sync"
@ -29,6 +28,8 @@ import (
zlog "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/scheduler"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/test"
)
@ -44,7 +45,7 @@ type ObjectStorage struct {
blobUploads map[string]storage.BlobUpload
log zerolog.Logger
metrics monitoring.MetricServer
cache *storage.Cache
cache cache.Cache
dedupe bool
linter storage.Lint
}
@ -63,9 +64,10 @@ func (is *ObjectStorage) DirExists(d string) bool {
// NewObjectStorage returns a new image store backed by cloud storages.
// see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers
// Use the last argument to properly set a cache database, or it will default to boltDB local storage.
func NewImageStore(rootDir string, cacheDir string, gc bool, gcDelay time.Duration, dedupe, commit bool,
log zlog.Logger, metrics monitoring.MetricServer, linter storage.Lint,
store driver.StorageDriver,
store driver.StorageDriver, cacheDriver cache.Cache,
) storage.ImageStore {
imgStore := &ObjectStorage{
rootDir: rootDir,
@ -78,17 +80,7 @@ func NewImageStore(rootDir string, cacheDir string, gc bool, gcDelay time.Durati
linter: linter,
}
cachePath := path.Join(cacheDir, CacheDBName+storage.DBExtensionName)
if dedupe {
imgStore.cache = storage.NewCache(cacheDir, CacheDBName, false, log)
} else {
// if dedupe was used in previous runs use it to serve blobs correctly
if _, err := os.Stat(cachePath); err == nil {
log.Info().Str("cache path", cachePath).Msg("found cache database")
imgStore.cache = storage.NewCache(cacheDir, CacheDBName, false, log)
}
}
imgStore.cache = cacheDriver
return imgStore
}
@ -107,7 +99,7 @@ func (is *ObjectStorage) RUnlock(lockStart *time.Time) {
lockEnd := time.Now()
// includes time spent in acquiring and holding a lock
latency := lockEnd.Sub(*lockStart)
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storage.RLOCK) // histogram
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storageConstants.RLOCK) // histogram
}
// Lock write-lock.
@ -124,7 +116,7 @@ func (is *ObjectStorage) Unlock(lockStart *time.Time) {
lockEnd := time.Now()
// includes time spent in acquiring and holding a lock
latency := lockEnd.Sub(*lockStart)
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storage.RWLOCK) // histogram
monitoring.ObserveStorageLockLatency(is.metrics, latency, is.RootDir(), storageConstants.RWLOCK) // histogram
}
func (is *ObjectStorage) initRepo(name string) error {
@ -225,7 +217,7 @@ func (is *ObjectStorage) ValidateRepo(name string) (bool, error) {
}
for k, v := range found {
if !v && k != storage.BlobUploadDir {
if !v && k != storageConstants.BlobUploadDir {
return false, nil
}
}
@ -520,7 +512,7 @@ func (is *ObjectStorage) DeleteImageManifest(repo, reference string) error {
// BlobUploadPath returns the upload path for a blob in this store.
func (is *ObjectStorage) BlobUploadPath(repo, uuid string) string {
dir := path.Join(is.rootDir, repo)
blobUploadPath := path.Join(dir, storage.BlobUploadDir, uuid)
blobUploadPath := path.Join(dir, storageConstants.BlobUploadDir, uuid)
return blobUploadPath
}
@ -732,7 +724,7 @@ func (is *ObjectStorage) FinishBlobUpload(repo, uuid string, body io.Reader, dst
is.Lock(&lockLatency)
defer is.Unlock(&lockLatency)
if is.dedupe && is.cache != nil {
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
if err := is.DedupeBlob(src, dstDigest, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to dedupe blob")
@ -807,7 +799,7 @@ func (is *ObjectStorage) FullBlobUpload(repo string, body io.Reader, dstDigest g
dst := is.BlobPath(repo, dstDigest)
if is.dedupe && is.cache != nil {
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
if err := is.DedupeBlob(src, dstDigest, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to dedupe blob")
@ -954,7 +946,7 @@ func (is *ObjectStorage) CheckBlob(repo string, digest godigest.Digest) (bool, i
blobPath := is.BlobPath(repo, digest)
if is.dedupe && is.cache != nil {
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
is.Lock(&lockLatency)
defer is.Unlock(&lockLatency)
} else {
@ -998,7 +990,7 @@ func (is *ObjectStorage) checkCacheBlob(digest godigest.Digest) (string, error)
return "", err
}
if is.cache == nil {
if fmt.Sprintf("%v", is.cache) == fmt.Sprintf("%v", nil) {
return "", zerr.ErrBlobNotFound
}
@ -1183,7 +1175,7 @@ func (is *ObjectStorage) GetBlob(repo string, digest godigest.Digest, mediaType
}
// is a 'deduped' blob?
if binfo.Size() == 0 {
if binfo.Size() == 0 && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
// Check blobs in cache
dstRecord, err := is.checkCacheBlob(digest)
if err != nil {
@ -1275,7 +1267,7 @@ func (is *ObjectStorage) DeleteBlob(repo string, digest godigest.Digest) error {
return zerr.ErrBlobNotFound
}
if is.cache != nil {
if fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
dstRecord, err := is.cache.GetBlob(digest)
if err != nil && !errors.Is(err, zerr.ErrCacheMiss) {
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to lookup blob record")

View file

@ -25,9 +25,13 @@ import (
"gopkg.in/resty.v1"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/api"
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/storage/s3"
"zotregistry.io/zot/pkg/test"
)
@ -56,8 +60,19 @@ func skipIt(t *testing.T) {
func createMockStorage(rootDir string, cacheDir string, dedupe bool, store driver.StorageDriver) storage.ImageStore {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
var cacheDriver cache.Cache
// from pkg/cli/root.go/applyDefaultValues, s3 magic
if _, err := os.Stat(cacheDir); dedupe || (!dedupe && err == nil) {
cacheDriver, _ = storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: cacheDir,
Name: "s3_cache",
UseRelPaths: false,
}, log)
}
il := s3.NewImageStore(rootDir, cacheDir, false, storage.DefaultGCDelay,
dedupe, false, log, metrics, nil, store,
dedupe, false, log, metrics, nil, store, cacheDriver,
)
return il
@ -97,8 +112,19 @@ func createObjectsStore(rootDir string, cacheDir string, dedupe bool) (
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
var cacheDriver cache.Cache
// from pkg/cli/root.go/applyDefaultValues, s3 magic
if _, err := os.Stat(cacheDir); dedupe || (!dedupe && err == nil) {
cacheDriver, _ = storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: cacheDir,
Name: "s3_cache",
UseRelPaths: false,
}, log)
}
il := s3.NewImageStore(rootDir, cacheDir, false, storage.DefaultGCDelay,
dedupe, false, log, metrics, nil, store)
dedupe, false, log, metrics, nil, store, cacheDriver)
return store, il, err
}
@ -382,6 +408,44 @@ func TestNegativeCasesObjectsStorage(t *testing.T) {
So(err, ShouldBeNil)
})
Convey("Unable to create subpath cache db", func(c C) {
bucket := "zot-storage-test"
endpoint := os.Getenv("S3MOCK_ENDPOINT")
storageDriverParams := config.GlobalStorageConfig{
StorageConfig: config.StorageConfig{
Dedupe: true,
RootDirectory: t.TempDir(),
RemoteCache: false,
},
SubPaths: map[string]config.StorageConfig{
"/a": {
Dedupe: true,
RootDirectory: t.TempDir(),
StorageDriver: map[string]interface{}{
"rootDir": "/a",
"name": "s3",
"region": "us-east-2",
"bucket": bucket,
"regionendpoint": endpoint,
"accesskey": "minioadmin",
"secretkey": "minioadmin",
"secure": false,
"skipverify": false,
},
RemoteCache: false,
},
},
}
conf := config.New()
conf.Storage = storageDriverParams
controller := api.NewController(conf)
So(controller, ShouldNotBeNil)
err = controller.InitImageStore(context.TODO())
So(err, ShouldBeNil)
})
Convey("Invalid get image tags", func(c C) {
So(imgStore.InitRepo(testImage), ShouldBeNil)
@ -1043,12 +1107,12 @@ func TestS3Dedupe(t *testing.T) {
Convey("Check backward compatibility - switch dedupe to false", func() {
/* copy cache to the new storage with dedupe false (doing this because we
already have a cache object holding the lock on cache db file) */
input, err := os.ReadFile(path.Join(tdir, s3.CacheDBName+storage.DBExtensionName))
input, err := os.ReadFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName))
So(err, ShouldBeNil)
tdir = t.TempDir()
err = os.WriteFile(path.Join(tdir, s3.CacheDBName+storage.DBExtensionName), input, 0o600)
err = os.WriteFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName), input, 0o600)
So(err, ShouldBeNil)
storeDriver, imgStore, _ := createObjectsStore(testDir, tdir, false)
@ -1798,7 +1862,7 @@ func TestS3DedupeErr(t *testing.T) {
imgStore = createMockStorage(testDir, tdir, true, &StorageDriverMock{})
err = os.Remove(path.Join(tdir, s3.CacheDBName+storage.DBExtensionName))
err = os.Remove(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName))
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
// trigger unable to insert blob record
@ -1975,12 +2039,12 @@ func TestS3DedupeErr(t *testing.T) {
So(err, ShouldBeNil)
// copy cache db to the new imagestore
input, err := os.ReadFile(path.Join(tdir, s3.CacheDBName+storage.DBExtensionName))
input, err := os.ReadFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName))
So(err, ShouldBeNil)
tdir = t.TempDir()
err = os.WriteFile(path.Join(tdir, s3.CacheDBName+storage.DBExtensionName), input, 0o600)
err = os.WriteFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName), input, 0o600)
So(err, ShouldBeNil)
imgStore = createMockStorage(testDir, tdir, true, &StorageDriverMock{
@ -2015,12 +2079,12 @@ func TestS3DedupeErr(t *testing.T) {
So(err, ShouldBeNil)
// copy cache db to the new imagestore
input, err := os.ReadFile(path.Join(tdir, s3.CacheDBName+storage.DBExtensionName))
input, err := os.ReadFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName))
So(err, ShouldBeNil)
tdir = t.TempDir()
err = os.WriteFile(path.Join(tdir, s3.CacheDBName+storage.DBExtensionName), input, 0o600)
err = os.WriteFile(path.Join(tdir, s3.CacheDBName+storageConstants.DBExtensionName), input, 0o600)
So(err, ShouldBeNil)
imgStore = createMockStorage(testDir, tdir, true, &StorageDriverMock{

View file

@ -18,6 +18,7 @@ import (
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
"zotregistry.io/zot/pkg/storage/local"
)
@ -31,9 +32,13 @@ func TestCheckAllBlobsIntegrity(t *testing.T) {
log := log.NewLogger("debug", "")
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore := local.NewImageStore(dir, true, storage.DefaultGCDelay,
true, true, log, metrics, nil)
true, true, log, metrics, nil, cacheDriver)
Convey("Scrub only one repo", t, func(c C) {
// initialize repo

View file

@ -28,6 +28,7 @@ import (
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/storage/cache"
"zotregistry.io/zot/pkg/storage/local"
"zotregistry.io/zot/pkg/storage/s3"
"zotregistry.io/zot/pkg/test"
@ -77,8 +78,13 @@ func createObjectsStore(rootDir string, cacheDir string) (driver.StorageDriver,
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: cacheDir,
Name: "s3_cache",
UseRelPaths: false,
}, log)
il := s3.NewImageStore(rootDir, cacheDir, false, storage.DefaultGCDelay,
true, false, log, metrics, nil, store,
true, false, log, metrics, nil, store, cacheDriver,
)
return store, il, err
@ -123,8 +129,13 @@ func TestStorageAPIs(t *testing.T) {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore = local.NewImageStore(dir, true, storage.DefaultGCDelay, true,
true, log, metrics, nil)
true, log, metrics, nil, cacheDriver)
}
Convey("Repo layout", t, func(c C) {
@ -705,18 +716,22 @@ func TestMandatoryAnnotations(t *testing.T) {
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storage.ImageStore) (bool, error) {
return false, nil
},
}, store)
}, store, nil)
defer cleanupStorage(store, testDir)
} else {
tdir = t.TempDir()
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: tdir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore = local.NewImageStore(tdir, true, storage.DefaultGCDelay, true,
true, log, metrics, &mocks.MockedLint{
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storage.ImageStore) (bool, error) {
return false, nil
},
})
}, cacheDriver)
}
Convey("Setup manifest", t, func() {
@ -769,15 +784,20 @@ func TestMandatoryAnnotations(t *testing.T) {
//nolint: goerr113
return false, errors.New("linter error")
},
}, store)
}, store, nil)
} else {
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: tdir,
Name: "cache",
UseRelPaths: true,
}, log)
imgStore = local.NewImageStore(tdir, true, storage.DefaultGCDelay, true,
true, log, metrics, &mocks.MockedLint{
LintFn: func(repo string, manifestDigest godigest.Digest, imageStore storage.ImageStore) (bool, error) {
//nolint: goerr113
return false, errors.New("linter error")
},
})
}, cacheDriver)
}
_, err = imgStore.PutImageManifest("test", "1.0.0", ispec.MediaTypeImageManifest, manifestBuf)
@ -828,13 +848,13 @@ func TestStorageHandler(t *testing.T) {
// Create ImageStore
firstStore = local.NewImageStore(firstRootDir, false, storage.DefaultGCDelay,
false, false, log, metrics, nil)
false, false, log, metrics, nil, nil)
secondStore = local.NewImageStore(secondRootDir, false,
storage.DefaultGCDelay, false, false, log, metrics, nil)
storage.DefaultGCDelay, false, false, log, metrics, nil, nil)
thirdStore = local.NewImageStore(thirdRootDir, false, storage.DefaultGCDelay,
false, false, log, metrics, nil)
false, false, log, metrics, nil, nil)
}
Convey("Test storage handler", t, func() {