diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index 01ad65b6..10306379 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -81,6 +81,8 @@ jobs: echo "Startup complete" aws dynamodb --endpoint-url http://localhost:4566 --region "us-east-2" create-table --table-name BlobTable --attribute-definitions AttributeName=Digest,AttributeType=S --key-schema AttributeName=Digest,KeyType=HASH --provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=5 + aws dynamodb --endpoint-url http://localhost:4566 --region "us-east-2" create-table --table-name RepoMetadataTable --attribute-definitions AttributeName=RepoName,AttributeType=S --key-schema AttributeName=RepoName,KeyType=HASH --provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=5 + aws dynamodb --endpoint-url http://localhost:4566 --region "us-east-2" create-table --table-name ManifestDataTable --attribute-definitions AttributeName=Digest,AttributeType=S --key-schema AttributeName=Digest,KeyType=HASH --provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=5 env: AWS_ACCESS_KEY_ID: fake AWS_SECRET_ACCESS_KEY: fake diff --git a/errors/errors.go b/errors/errors.go index 8c7825f8..c12698de 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -3,61 +3,76 @@ package errors import "errors" var ( - ErrBadConfig = errors.New("config: invalid config") - ErrCliBadConfig = errors.New("cli: bad config") - ErrRepoNotFound = errors.New("repository: not found") - ErrRepoIsNotDir = errors.New("repository: not a directory") - ErrRepoBadVersion = errors.New("repository: unsupported layout version") - ErrManifestNotFound = errors.New("manifest: not found") - ErrBadManifest = errors.New("manifest: invalid contents") - ErrBadIndex = errors.New("index: invalid contents") - ErrUploadNotFound = errors.New("uploads: not found") - ErrBadUploadRange = errors.New("uploads: bad range") - ErrBlobNotFound = errors.New("blob: not found") - ErrBadBlob = errors.New("blob: bad blob") - ErrBadBlobDigest = errors.New("blob: bad blob digest") - ErrUnknownCode = errors.New("error: unknown error code") - ErrBadCACert = errors.New("tls: invalid ca cert") - ErrBadUser = errors.New("auth: non-existent user") - ErrEntriesExceeded = errors.New("ldap: too many entries returned") - ErrLDAPEmptyPassphrase = errors.New("ldap: empty passphrase") - ErrLDAPBadConn = errors.New("ldap: bad connection") - ErrLDAPConfig = errors.New("config: invalid LDAP configuration") - ErrCacheRootBucket = errors.New("cache: unable to create/update root bucket") - ErrCacheNoBucket = errors.New("cache: unable to find bucket") - ErrCacheMiss = errors.New("cache: miss") - ErrRequireCred = errors.New("ldap: bind credentials required") - ErrInvalidCred = errors.New("ldap: invalid credentials") - ErrEmptyJSON = errors.New("cli: config json is empty") - ErrInvalidArgs = errors.New("cli: Invalid Arguments") - ErrInvalidFlagsCombination = errors.New("cli: Invalid combination of flags") - ErrInvalidURL = errors.New("cli: invalid URL format") - ErrUnauthorizedAccess = errors.New("auth: unauthorized access. check credentials") - ErrCannotResetConfigKey = errors.New("cli: cannot reset given config key") - ErrConfigNotFound = errors.New("cli: config with the given name does not exist") - ErrNoURLProvided = errors.New("cli: no URL provided in argument or via config") - ErrIllegalConfigKey = errors.New("cli: given config key is not allowed") - ErrScanNotSupported = errors.New("search: scanning of image media type not supported") - ErrCLITimeout = errors.New("cli: Query timed out while waiting for results") - ErrDuplicateConfigName = errors.New("cli: cli config name already added") - ErrInvalidRoute = errors.New("routes: invalid route prefix") - ErrImgStoreNotFound = errors.New("routes: image store not found corresponding to given route") - ErrEmptyValue = errors.New("cache: empty value") - ErrEmptyRepoList = errors.New("search: no repository found") - ErrInvalidRepositoryName = errors.New("routes: not a repository name") - ErrSyncMissingCatalog = errors.New("sync: couldn't fetch upstream registry's catalog") - ErrMethodNotSupported = errors.New("storage: method not supported") - ErrInvalidMetric = errors.New("metrics: invalid metric func") - ErrInjected = errors.New("test: injected failure") - ErrSyncInvalidUpstreamURL = errors.New("sync: upstream url not found in sync config") - ErrRegistryNoContent = errors.New("sync: could not find a Content that matches localRepo") - ErrSyncReferrerNotFound = errors.New("sync: couldn't find upstream referrer") - ErrSyncReferrer = errors.New("sync: failed to get upstream referrer") - ErrImageLintAnnotations = errors.New("routes: lint checks failed") - ErrParsingAuthHeader = errors.New("auth: failed parsing authorization header") - ErrBadType = errors.New("core: invalid type") - ErrParsingHTTPHeader = errors.New("routes: invalid HTTP header") - ErrBadRange = errors.New("storage: bad range") - ErrBadLayerCount = errors.New("manifest: layers count doesn't correspond to config history") - ErrManifestConflict = errors.New("manifest: multiple manifests found") + ErrBadConfig = errors.New("config: invalid config") + ErrCliBadConfig = errors.New("cli: bad config") + ErrRepoNotFound = errors.New("repository: not found") + ErrRepoIsNotDir = errors.New("repository: not a directory") + ErrRepoBadVersion = errors.New("repository: unsupported layout version") + ErrManifestNotFound = errors.New("manifest: not found") + ErrBadManifest = errors.New("manifest: invalid contents") + ErrBadIndex = errors.New("index: invalid contents") + ErrUploadNotFound = errors.New("uploads: not found") + ErrBadUploadRange = errors.New("uploads: bad range") + ErrBlobNotFound = errors.New("blob: not found") + ErrBadBlob = errors.New("blob: bad blob") + ErrBadBlobDigest = errors.New("blob: bad blob digest") + ErrUnknownCode = errors.New("error: unknown error code") + ErrBadCACert = errors.New("tls: invalid ca cert") + ErrBadUser = errors.New("auth: non-existent user") + ErrEntriesExceeded = errors.New("ldap: too many entries returned") + ErrLDAPEmptyPassphrase = errors.New("ldap: empty passphrase") + ErrLDAPBadConn = errors.New("ldap: bad connection") + ErrLDAPConfig = errors.New("config: invalid LDAP configuration") + ErrCacheRootBucket = errors.New("cache: unable to create/update root bucket") + ErrCacheNoBucket = errors.New("cache: unable to find bucket") + ErrCacheMiss = errors.New("cache: miss") + ErrRequireCred = errors.New("ldap: bind credentials required") + ErrInvalidCred = errors.New("ldap: invalid credentials") + ErrEmptyJSON = errors.New("cli: config json is empty") + ErrInvalidArgs = errors.New("cli: Invalid Arguments") + ErrInvalidFlagsCombination = errors.New("cli: Invalid combination of flags") + ErrInvalidURL = errors.New("cli: invalid URL format") + ErrUnauthorizedAccess = errors.New("auth: unauthorized access. check credentials") + ErrCannotResetConfigKey = errors.New("cli: cannot reset given config key") + ErrConfigNotFound = errors.New("cli: config with the given name does not exist") + ErrNoURLProvided = errors.New("cli: no URL provided in argument or via config") + ErrIllegalConfigKey = errors.New("cli: given config key is not allowed") + ErrScanNotSupported = errors.New("search: scanning of image media type not supported") + ErrCLITimeout = errors.New("cli: Query timed out while waiting for results") + ErrDuplicateConfigName = errors.New("cli: cli config name already added") + ErrInvalidRoute = errors.New("routes: invalid route prefix") + ErrImgStoreNotFound = errors.New("routes: image store not found corresponding to given route") + ErrEmptyValue = errors.New("cache: empty value") + ErrEmptyRepoList = errors.New("search: no repository found") + ErrInvalidRepositoryName = errors.New("routes: not a repository name") + ErrSyncMissingCatalog = errors.New("sync: couldn't fetch upstream registry's catalog") + ErrMethodNotSupported = errors.New("storage: method not supported") + ErrInvalidMetric = errors.New("metrics: invalid metric func") + ErrInjected = errors.New("test: injected failure") + ErrSyncInvalidUpstreamURL = errors.New("sync: upstream url not found in sync config") + ErrRegistryNoContent = errors.New("sync: could not find a Content that matches localRepo") + ErrSyncReferrerNotFound = errors.New("sync: couldn't find upstream referrer") + ErrSyncReferrer = errors.New("sync: failed to get upstream referrer") + ErrImageLintAnnotations = errors.New("routes: lint checks failed") + ErrParsingAuthHeader = errors.New("auth: failed parsing authorization header") + ErrBadType = errors.New("core: invalid type") + ErrParsingHTTPHeader = errors.New("routes: invalid HTTP header") + ErrBadRange = errors.New("storage: bad range") + ErrBadLayerCount = errors.New("manifest: layers count doesn't correspond to config history") + ErrManifestConflict = errors.New("manifest: multiple manifests found") + ErrManifestMetaNotFound = errors.New("repodb: image metadata not found for given manifest digest") + ErrManifestDataNotFound = errors.New("repodb: image data not found for given manifest digest") + ErrRepoMetaNotFound = errors.New("repodb: repo metadata not found for given repo name") + ErrTagMetaNotFound = errors.New("repodb: tag metadata not found for given repo and tag names") + ErrTypeAssertionFailed = errors.New("storage: failed DatabaseDriver type assertion") + ErrInvalidRequestParams = errors.New("resolver: parameter sent has invalid value") + ErrOrphanSignature = errors.New("repodb: signature detected but signed image doesn't exit") + ErrBadCtxFormat = errors.New("type assertion failed") + ErrEmptyRepoName = errors.New("repodb: repo name can't be empty string") + ErrEmptyTag = errors.New("repodb: tag can't be empty string") + ErrEmptyDigest = errors.New("repodb: digest can't be empty string") + ErrInvalidRepoTagFormat = errors.New("invalid format for tag search, not following repo:tag") + ErrLimitIsNegative = errors.New("pageturner: limit has negative value") + ErrOffsetIsNegative = errors.New("pageturner: offset has negative value") + ErrSortCriteriaNotSupported = errors.New("pageturner: the sort criteria is not supported") ) diff --git a/examples/config-all-remote.json b/examples/config-all-remote.json index c3d5c0c4..373f3032 100644 --- a/examples/config-all-remote.json +++ b/examples/config-all-remote.json @@ -16,7 +16,10 @@ "name": "dynamodb", "endpoint": "http://localhost:4566", "region": "us-east-2", - "tableName": "BlobTable" + "cacheTablename": "ZotBlobTable", + "repoMetaTablename": "ZotRepoMetadataTable", + "manifestDataTablename": "ZotManifestDataTable", + "versionTablename": "ZotVersion" } }, "http": { diff --git a/examples/config-dynamodb.json b/examples/config-dynamodb.json index 7123630b..ac884381 100644 --- a/examples/config-dynamodb.json +++ b/examples/config-dynamodb.json @@ -17,7 +17,10 @@ "name": "dynamodb", "endpoint": "http://localhost:4566", "region": "us-east-2", - "tableName": "BlobTable" + "cacheTablename": "ZotBlobTable", + "repoMetaTablename": "ZotRepoMetadataTable", + "manifestDataTablename": "ZotManifestDataTable", + "versionTablename": "ZotVersion" } }, "http": { diff --git a/examples/config-s3.json b/examples/config-s3.json index afe1b061..8929ef23 100644 --- a/examples/config-s3.json +++ b/examples/config-s3.json @@ -15,7 +15,7 @@ "name": "dynamodb", "endpoint": "http://localhost:4566", "region": "us-east-2", - "tableName": "MainTable" + "cacheTablename": "MainTable" }, "subPaths": { "/a": { @@ -59,7 +59,7 @@ "name": "dynamodb", "endpoint": "http://localhost:4566", "region": "us-east-2", - "tableName": "cTable" + "cacheTablename": "cTable" } } } diff --git a/go.mod b/go.mod index b1b286b6..40977655 100644 --- a/go.mod +++ b/go.mod @@ -54,6 +54,7 @@ require ( github.com/aquasecurity/trivy v0.0.0-00010101000000-000000000000 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.17.9 github.com/containers/image/v5 v5.23.0 + github.com/gobwas/glob v0.2.3 github.com/notaryproject/notation-go v0.12.0-beta.1 github.com/opencontainers/distribution-spec/specs-go v0.0.0-20220620172159-4ab4752c3b86 github.com/sigstore/cosign v1.13.1 @@ -207,7 +208,6 @@ require ( github.com/go-playground/validator/v10 v10.11.0 // indirect github.com/go-redis/redis/v8 v8.11.5 // indirect github.com/go-restruct/restruct v0.0.0-20191227155143-5734170a48a1 // indirect - github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect diff --git a/pkg/api/controller.go b/pkg/api/controller.go index 4db05b67..2a975ddc 100644 --- a/pkg/api/controller.go +++ b/pkg/api/controller.go @@ -24,6 +24,10 @@ import ( ext "zotregistry.io/zot/pkg/extensions" "zotregistry.io/zot/pkg/extensions/monitoring" "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" + bolt "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" + dynamoParams "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper/params" + "zotregistry.io/zot/pkg/meta/repodb/repodbfactory" "zotregistry.io/zot/pkg/scheduler" "zotregistry.io/zot/pkg/storage" "zotregistry.io/zot/pkg/storage/cache" @@ -40,6 +44,7 @@ const ( type Controller struct { Config *config.Config Router *mux.Router + RepoDB repodb.RepoDB StoreController storage.StoreController Log log.Logger Audit *log.Logger @@ -162,6 +167,12 @@ func (c *Controller) Run(reloadCtx context.Context) error { return err } + if err := c.InitRepoDB(reloadCtx); err != nil { + return err + } + + c.StartBackgroundTasks(reloadCtx) + monitoring.SetServerInfo(c.Metrics, c.Config.Commit, c.Config.BinaryType, c.Config.GoVersion, c.Config.DistSpecVersion) @@ -248,7 +259,7 @@ func (c *Controller) Run(reloadCtx context.Context) error { return server.Serve(listener) } -func (c *Controller) InitImageStore(reloadCtx context.Context) error { +func (c *Controller) InitImageStore(ctx context.Context) error { c.StoreController = storage.StoreController{} linter := ext.GetLinter(c.Config, c.Log) @@ -327,8 +338,6 @@ func (c *Controller) InitImageStore(reloadCtx context.Context) error { } } - c.StartBackgroundTasks(reloadCtx) - return nil } @@ -464,7 +473,7 @@ func CreateCacheDatabaseDriver(storageConfig config.StorageConfig, log log.Logge dynamoParams := cache.DynamoDBDriverParameters{} dynamoParams.Endpoint, _ = storageConfig.CacheDriver["endpoint"].(string) dynamoParams.Region, _ = storageConfig.CacheDriver["region"].(string) - dynamoParams.TableName, _ = storageConfig.CacheDriver["tablename"].(string) + dynamoParams.TableName, _ = storageConfig.CacheDriver["cachetablename"].(string) driver, _ := storage.Create("dynamodb", dynamoParams, log) @@ -477,6 +486,99 @@ func CreateCacheDatabaseDriver(storageConfig config.StorageConfig, log log.Logge return nil } +func (c *Controller) InitRepoDB(reloadCtx context.Context) error { + if c.Config.Extensions != nil && c.Config.Extensions.Search != nil && *c.Config.Extensions.Search.Enable { + driver, err := CreateRepoDBDriver(c.Config.Storage.StorageConfig, c.Log) //nolint:contextcheck + if err != nil { + return err + } + + err = driver.PatchDB() + if err != nil { + return err + } + + err = repodb.SyncRepoDB(driver, c.StoreController, c.Log) + if err != nil { + return err + } + + c.RepoDB = driver + } + + return nil +} + +func CreateRepoDBDriver(storageConfig config.StorageConfig, log log.Logger) (repodb.RepoDB, error) { + if storageConfig.RemoteCache { + dynamoParams := getDynamoParams(storageConfig.CacheDriver, log) + + return repodbfactory.Create("dynamodb", dynamoParams) //nolint:contextcheck + } + + params := bolt.DBParameters{} + params.RootDir = storageConfig.RootDirectory + + return repodbfactory.Create("boltdb", params) //nolint:contextcheck +} + +func getDynamoParams(cacheDriverConfig map[string]interface{}, log log.Logger) dynamoParams.DBDriverParameters { + allParametersOk := true + + endpoint, ok := toStringIfOk(cacheDriverConfig, "endpoint", log) + allParametersOk = allParametersOk && ok + + region, ok := toStringIfOk(cacheDriverConfig, "region", log) + allParametersOk = allParametersOk && ok + + repoMetaTablename, ok := toStringIfOk(cacheDriverConfig, "repometatablename", log) + allParametersOk = allParametersOk && ok + + manifestDataTablename, ok := toStringIfOk(cacheDriverConfig, "manifestdatatablename", log) + allParametersOk = allParametersOk && ok + + versionTablename, ok := toStringIfOk(cacheDriverConfig, "versiontablename", log) + allParametersOk = allParametersOk && ok + + if !allParametersOk { + panic("dynamo parameters are not specified correctly, can't proceede") + } + + return dynamoParams.DBDriverParameters{ + Endpoint: endpoint, + Region: region, + RepoMetaTablename: repoMetaTablename, + ManifestDataTablename: manifestDataTablename, + VersionTablename: versionTablename, + } +} + +func toStringIfOk(cacheDriverConfig map[string]interface{}, param string, log log.Logger) (string, bool) { + val, ok := cacheDriverConfig[param] + + if !ok { + log.Error().Msgf("parsing CacheDriver config failed, field '%s' is not present", param) + + return "", false + } + + str, ok := val.(string) + + if !ok { + log.Error().Msgf("parsing CacheDriver config failed, parameter '%s' isn't a string", param) + + return "", false + } + + if str == "" { + log.Error().Msgf("parsing CacheDriver config failed, field '%s' is is empty", param) + + return "", false + } + + return str, ok +} + func (c *Controller) LoadNewConfig(reloadCtx context.Context, config *config.Config) { // reload access control config c.Config.AccessControl = config.AccessControl @@ -514,7 +616,7 @@ func (c *Controller) StartBackgroundTasks(reloadCtx context.Context) { // Enable extensions if extension config is provided for DefaultStore if c.Config != nil && c.Config.Extensions != nil { ext.EnableMetricsExtension(c.Config, c.Log, c.Config.Storage.RootDirectory) - ext.EnableSearchExtension(c.Config, c.Log, c.StoreController) + ext.EnableSearchExtension(c.Config, c.StoreController, c.RepoDB, c.Log) } if c.Config.Storage.SubPaths != nil { diff --git a/pkg/api/controller_test.go b/pkg/api/controller_test.go index f73ce07b..cf022777 100644 --- a/pkg/api/controller_test.go +++ b/pkg/api/controller_test.go @@ -162,10 +162,13 @@ func TestCreateCacheDatabaseDriver(t *testing.T) { } conf.Storage.CacheDriver = map[string]interface{}{ - "name": "dynamodb", - "endpoint": "http://localhost:4566", - "region": "us-east-2", - "tableName": "BlobTable", + "name": "dynamodb", + "endpoint": "http://localhost:4566", + "region": "us-east-2", + "cacheTablename": "BlobTable", + "repoMetaTablename": "RepoMetadataTable", + "manifestDataTablename": "ManifestDataTable", + "versionTablename": "Version", } driver := api.CreateCacheDatabaseDriver(conf.Storage.StorageConfig, log) @@ -174,19 +177,25 @@ func TestCreateCacheDatabaseDriver(t *testing.T) { // negative test cases conf.Storage.CacheDriver = map[string]interface{}{ - "endpoint": "http://localhost:4566", - "region": "us-east-2", - "tableName": "BlobTable", + "endpoint": "http://localhost:4566", + "region": "us-east-2", + "cacheTablename": "BlobTable", + "repoMetaTablename": "RepoMetadataTable", + "manifestDataTablename": "ManifestDataTable", + "versionTablename": "Version", } driver = api.CreateCacheDatabaseDriver(conf.Storage.StorageConfig, log) So(driver, ShouldBeNil) conf.Storage.CacheDriver = map[string]interface{}{ - "name": "dummy", - "endpoint": "http://localhost:4566", - "region": "us-east-2", - "tableName": "BlobTable", + "name": "dummy", + "endpoint": "http://localhost:4566", + "region": "us-east-2", + "cacheTablename": "BlobTable", + "repoMetaTablename": "RepoMetadataTable", + "manifestDataTablename": "ManifestDataTable", + "versionTablename": "Version", } driver = api.CreateCacheDatabaseDriver(conf.Storage.StorageConfig, log) @@ -194,6 +203,50 @@ func TestCreateCacheDatabaseDriver(t *testing.T) { }) } +func TestCreateRepoDBDriver(t *testing.T) { + Convey("Test CreateCacheDatabaseDriver dynamo", t, func() { + log := log.NewLogger("debug", "") + dir := t.TempDir() + conf := config.New() + conf.Storage.RootDirectory = dir + conf.Storage.Dedupe = true + conf.Storage.RemoteCache = true + conf.Storage.StorageDriver = map[string]interface{}{ + "name": "s3", + "rootdirectory": "/zot", + "region": "us-east-2", + "bucket": "zot-storage", + "secure": true, + "skipverify": false, + } + + conf.Storage.CacheDriver = map[string]interface{}{ + "name": "dummy", + "endpoint": "http://localhost:4566", + "region": "us-east-2", + "cachetablename": "BlobTable", + "repometatablename": "RepoMetadataTable", + "manifestdatatablename": "ManifestDataTable", + } + + testFunc := func() { _, _ = api.CreateRepoDBDriver(conf.Storage.StorageConfig, log) } + So(testFunc, ShouldPanic) + + conf.Storage.CacheDriver = map[string]interface{}{ + "name": "dummy", + "endpoint": "http://localhost:4566", + "region": "us-east-2", + "cachetablename": "", + "repometatablename": "RepoMetadataTable", + "manifestdatatablename": "ManifestDataTable", + "versiontablename": 1, + } + + testFunc = func() { _, _ = api.CreateRepoDBDriver(conf.Storage.StorageConfig, log) } + So(testFunc, ShouldPanic) + }) +} + func TestRunAlreadyRunningServer(t *testing.T) { Convey("Run server on unavailable port", t, func() { port := test.GetFreePort() @@ -6454,6 +6507,7 @@ func TestSearchRoutes(t *testing.T) { repoName := "testrepo" inaccessibleRepo := "inaccessible" + cfg, layers, manifest, err := test.GetImageComponents(10000) So(err, ShouldBeNil) @@ -6515,7 +6569,7 @@ func TestSearchRoutes(t *testing.T) { Policies: []config.Policy{ { Users: []string{user1}, - Actions: []string{"read"}, + Actions: []string{"read", "create"}, }, }, DefaultPolicy: []string{}, @@ -6523,8 +6577,8 @@ func TestSearchRoutes(t *testing.T) { inaccessibleRepo: config.PolicyGroup{ Policies: []config.Policy{ { - Users: []string{}, - Actions: []string{}, + Users: []string{user1}, + Actions: []string{"create"}, }, }, DefaultPolicy: []string{}, @@ -6542,9 +6596,38 @@ func TestSearchRoutes(t *testing.T) { cm.StartAndWait(port) defer cm.StopServer() + cfg, layers, manifest, err := test.GetImageComponents(10000) + So(err, ShouldBeNil) + + err = test.UploadImageWithBasicAuth( + test.Image{ + Config: cfg, + Layers: layers, + Manifest: manifest, + Tag: "latest", + }, baseURL, repoName, + user1, password1) + + So(err, ShouldBeNil) + + // data for the inaccessible repo + cfg, layers, manifest, err = test.GetImageComponents(10000) + So(err, ShouldBeNil) + + err = test.UploadImageWithBasicAuth( + test.Image{ + Config: cfg, + Layers: layers, + Manifest: manifest, + Tag: "latest", + }, baseURL, inaccessibleRepo, + user1, password1) + + So(err, ShouldBeNil) + query := ` { - GlobalSearch(query:""){ + GlobalSearch(query:"testrepo"){ Repos { Name Score @@ -6569,24 +6652,41 @@ func TestSearchRoutes(t *testing.T) { So(resp, ShouldNotBeNil) So(resp.StatusCode(), ShouldEqual, http.StatusUnauthorized) - // credentials for user unauthorized to access repo - user2 := "notWorking" - password2 := "notWorking" - testString2 := getCredString(user2, password2) - htpasswdPath2 := test.MakeHtpasswdFileFromString(testString2) - defer os.Remove(htpasswdPath2) - - ctlr.Config.HTTP.Auth = &config.AuthConfig{ - HTPasswd: config.AuthHTPasswd{ - Path: htpasswdPath2, + conf.AccessControl = &config.AccessControlConfig{ + Repositories: config.Repositories{ + repoName: config.PolicyGroup{ + Policies: []config.Policy{ + { + Users: []string{user1}, + Actions: []string{}, + }, + }, + DefaultPolicy: []string{}, + }, + inaccessibleRepo: config.PolicyGroup{ + Policies: []config.Policy{ + { + Users: []string{}, + Actions: []string{}, + }, + }, + DefaultPolicy: []string{}, + }, + }, + AdminPolicy: config.Policy{ + Users: []string{}, + Actions: []string{}, }, } + // authenticated, but no access to resource - resp, err = resty.R().SetBasicAuth(user2, password2).Get(baseURL + constants.FullSearchPrefix + + resp, err = resty.R().SetBasicAuth(user1, password1).Get(baseURL + constants.FullSearchPrefix + "?query=" + url.QueryEscape(query)) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) - So(resp.StatusCode(), ShouldEqual, http.StatusUnauthorized) + So(resp.StatusCode(), ShouldEqual, http.StatusOK) + So(string(resp.Body()), ShouldNotContainSubstring, repoName) + So(string(resp.Body()), ShouldNotContainSubstring, inaccessibleRepo) }) }) } diff --git a/pkg/api/routes.go b/pkg/api/routes.go index f7b253cb..2f61d2a1 100644 --- a/pkg/api/routes.go +++ b/pkg/api/routes.go @@ -35,6 +35,7 @@ import ( ext "zotregistry.io/zot/pkg/extensions" "zotregistry.io/zot/pkg/extensions/sync" "zotregistry.io/zot/pkg/log" + repoDBUpdate "zotregistry.io/zot/pkg/meta/repodb/update" localCtx "zotregistry.io/zot/pkg/requestcontext" "zotregistry.io/zot/pkg/storage" "zotregistry.io/zot/pkg/test" //nolint:goimports @@ -124,7 +125,7 @@ func (rh *RouteHandler) SetupRoutes() { } else { // extended build ext.SetupMetricsRoutes(rh.c.Config, rh.c.Router, rh.c.StoreController, rh.c.Log) - ext.SetupSearchRoutes(rh.c.Config, rh.c.Router, rh.c.StoreController, rh.c.Log) + ext.SetupSearchRoutes(rh.c.Config, rh.c.Router, rh.c.StoreController, rh.c.RepoDB, rh.c.Log) gqlPlayground.SetupGQLPlaygroundRoutes(rh.c.Config, rh.c.Router, rh.c.StoreController, rh.c.Log) } } @@ -401,6 +402,18 @@ func (rh *RouteHandler) GetManifest(response http.ResponseWriter, request *http. return } + if rh.c.RepoDB != nil { + err := repoDBUpdate.OnGetManifest(name, reference, digest, content, rh.c.StoreController, rh.c.RepoDB, rh.c.Log) + + if errors.Is(err, zerr.ErrOrphanSignature) { + rh.c.Log.Error().Err(err).Msgf("image is an orphan signature") + } else if err != nil { + response.WriteHeader(http.StatusInternalServerError) + + return + } + } + response.Header().Set(constants.DistContentDigestKey, digest.String()) response.Header().Set("Content-Length", fmt.Sprintf("%d", len(content))) response.Header().Set("Content-Type", mediaType) @@ -601,6 +614,18 @@ func (rh *RouteHandler) UpdateManifest(response http.ResponseWriter, request *ht return } + if rh.c.RepoDB != nil { + err := repoDBUpdate.OnUpdateManifest(name, reference, mediaType, digest, body, rh.c.StoreController, rh.c.RepoDB, + rh.c.Log) + if errors.Is(err, zerr.ErrOrphanSignature) { + rh.c.Log.Error().Err(err).Msgf("pushed image is an orphan signature") + } else if err != nil { + response.WriteHeader(http.StatusInternalServerError) + + return + } + } + response.Header().Set("Location", fmt.Sprintf("/v2/%s/manifests/%s", name, digest)) response.Header().Set(constants.DistContentDigestKey, digest.String()) response.WriteHeader(http.StatusCreated) @@ -647,6 +672,25 @@ func (rh *RouteHandler) DeleteManifest(response http.ResponseWriter, request *ht detectCollision = acCtx.CanDetectManifestCollision(name) } + manifestBlob, manifestDigest, mediaType, err := imgStore.GetImageManifest(name, reference) + if err != nil { + if errors.Is(err, zerr.ErrRepoNotFound) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain + WriteJSON(response, http.StatusBadRequest, + NewErrorList(NewError(NAME_UNKNOWN, map[string]string{"name": name}))) + } else if errors.Is(err, zerr.ErrManifestNotFound) { + WriteJSON(response, http.StatusNotFound, + NewErrorList(NewError(MANIFEST_UNKNOWN, map[string]string{"reference": reference}))) + } else if errors.Is(err, zerr.ErrBadManifest) { + WriteJSON(response, http.StatusBadRequest, + NewErrorList(NewError(UNSUPPORTED, map[string]string{"reference": reference}))) + } else { + rh.c.Log.Error().Err(err).Msg("unexpected error") + response.WriteHeader(http.StatusInternalServerError) + } + + return + } + err = imgStore.DeleteImageManifest(name, reference, detectCollision) if err != nil { if errors.Is(err, zerr.ErrRepoNotFound) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain @@ -669,6 +713,18 @@ func (rh *RouteHandler) DeleteManifest(response http.ResponseWriter, request *ht return } + if rh.c.RepoDB != nil { + err := repoDBUpdate.OnDeleteManifest(name, reference, mediaType, manifestDigest, manifestBlob, + rh.c.StoreController, rh.c.RepoDB, rh.c.Log) + if errors.Is(err, zerr.ErrOrphanSignature) { + rh.c.Log.Error().Err(err).Msgf("pushed image is an orphan signature") + } else if err != nil { + response.WriteHeader(http.StatusInternalServerError) + + return + } + } + response.WriteHeader(http.StatusAccepted) } diff --git a/pkg/cli/image_cmd_test.go b/pkg/cli/image_cmd_test.go index 193912e5..9793cb52 100644 --- a/pkg/cli/image_cmd_test.go +++ b/pkg/cli/image_cmd_test.go @@ -8,7 +8,6 @@ import ( "context" "encoding/json" "fmt" - "io" "log" "os" "os/exec" @@ -31,7 +30,6 @@ import ( zotErrors "zotregistry.io/zot/errors" "zotregistry.io/zot/pkg/api" "zotregistry.io/zot/pkg/api/config" - "zotregistry.io/zot/pkg/api/constants" extconf "zotregistry.io/zot/pkg/extensions/config" "zotregistry.io/zot/pkg/test" ) @@ -1438,129 +1436,43 @@ func TestServerResponse(t *testing.T) { } func TestServerResponseGQLWithoutPermissions(t *testing.T) { - port := test.GetFreePort() - url := test.GetBaseURL(port) - conf := config.New() - conf.HTTP.Port = port + Convey("Test accessing a blobs folder without having permissions fails fast", t, func() { + port := test.GetFreePort() + conf := config.New() + conf.HTTP.Port = port - dir := t.TempDir() + dir := t.TempDir() - err := test.CopyFiles("../../test/data/zot-test", path.Join(dir, "zot-test")) - if err != nil { - panic(err) - } - - err = os.Chmod(path.Join(dir, "zot-test", "blobs"), 0o000) - if err != nil { - panic(err) - } - - conf.Storage.RootDirectory = dir - cveConfig := &extconf.CVEConfig{ - UpdateInterval: 2, - } - defaultVal := true - searchConfig := &extconf.SearchConfig{ - BaseConfig: extconf.BaseConfig{Enable: &defaultVal}, - CVE: cveConfig, - } - conf.Extensions = &extconf.ExtensionConfig{ - Search: searchConfig, - } - - logFile, err := os.CreateTemp(t.TempDir(), "zot-log*.txt") - if err != nil { - panic(err) - } - - logPath := logFile.Name() - defer os.Remove(logPath) - - writers := io.MultiWriter(os.Stdout, logFile) - - ctlr := api.NewController(conf) - ctlr.Log.Logger = ctlr.Log.Output(writers) - - go func(controller *api.Controller) { - // this blocks - if err := controller.Run(context.Background()); err != nil { - return - } - }(ctlr) - // wait till ready - for { - res, err := resty.R().Get(url + constants.FullSearchPrefix) - if err == nil && res.StatusCode() == 422 { - break - } - - time.Sleep(100 * time.Millisecond) - } - - _, err = test.ReadLogFileAndSearchString(logPath, "DB update completed, next update scheduled", 90*time.Second) - if err != nil { - panic(err) - } - - defer func(controller *api.Controller) { - err = os.Chmod(path.Join(dir, "zot-test", "blobs"), 0o777) + err := test.CopyFiles("../../test/data/zot-test", path.Join(dir, "zot-test")) if err != nil { panic(err) } - ctx := context.Background() - _ = controller.Server.Shutdown(ctx) - }(ctlr) - Convey("Test all images", t, func() { - args := []string{"imagetest"} - configPath := makeConfigFile(fmt.Sprintf(`{"configs":[{"_name":"imagetest","url":"%s","showspinner":false}]}`, url)) - defer os.Remove(configPath) - cveCmd := NewImageCommand(new(searchService)) - buff := bytes.NewBufferString("") - cveCmd.SetOut(buff) - cveCmd.SetErr(buff) - cveCmd.SetArgs(args) - err = cveCmd.Execute() - So(err, ShouldNotBeNil) - }) + err = os.Chmod(path.Join(dir, "zot-test", "blobs"), 0o000) + if err != nil { + panic(err) + } - Convey("Test all images verbose", t, func() { - args := []string{"imagetest", "--verbose"} - configPath := makeConfigFile(fmt.Sprintf(`{"configs":[{"_name":"imagetest","url":"%s","showspinner":false}]}`, url)) - defer os.Remove(configPath) - cmd := NewImageCommand(new(searchService)) - buff := bytes.NewBufferString("") - cmd.SetOut(buff) - cmd.SetErr(buff) - cmd.SetArgs(args) - err := cmd.Execute() - So(err, ShouldNotBeNil) - }) + defer func() { + err = os.Chmod(path.Join(dir, "zot-test", "blobs"), 0o777) + if err != nil { + panic(err) + } + }() - Convey("Test image by name", t, func() { - args := []string{"imagetest", "--name", "zot-test"} - configPath := makeConfigFile(fmt.Sprintf(`{"configs":[{"_name":"imagetest","url":"%s","showspinner":false}]}`, url)) - defer os.Remove(configPath) - cmd := NewImageCommand(new(searchService)) - buff := bytes.NewBufferString("") - cmd.SetOut(buff) - cmd.SetErr(buff) - cmd.SetArgs(args) - err := cmd.Execute() - So(err, ShouldNotBeNil) - }) + conf.Storage.RootDirectory = dir + defaultVal := true + searchConfig := &extconf.SearchConfig{ + BaseConfig: extconf.BaseConfig{Enable: &defaultVal}, + } + conf.Extensions = &extconf.ExtensionConfig{ + Search: searchConfig, + } - Convey("Test image by digest", t, func() { - args := []string{"imagetest", "--digest", test.GetTestBlobDigest("zot-test", "manifest").Encoded()} - configPath := makeConfigFile(fmt.Sprintf(`{"configs":[{"_name":"imagetest","url":"%s","showspinner":false}]}`, url)) - defer os.Remove(configPath) - cmd := NewImageCommand(new(searchService)) - buff := bytes.NewBufferString("") - cmd.SetOut(buff) - cmd.SetErr(buff) - cmd.SetArgs(args) - err := cmd.Execute() - So(err, ShouldNotBeNil) + ctlr := api.NewController(conf) + if err := ctlr.Run(context.Background()); err != nil { + So(err, ShouldNotBeNil) + } }) } diff --git a/pkg/cli/root_test.go b/pkg/cli/root_test.go index ad6ecb19..6e16b4f9 100644 --- a/pkg/cli/root_test.go +++ b/pkg/cli/root_test.go @@ -238,7 +238,7 @@ func TestVerify(t *testing.T) { "name":"dynamodb", "endpoint":"http://localhost:4566", "region":"us-east-2", - "tableName":"BlobTable" + "cacheTablename":"BlobTable" } }, "http":{ @@ -305,7 +305,7 @@ func TestVerify(t *testing.T) { "name":"dynamodb", "endpoint":"http://localhost:4566", "region":"us-east-2", - "tableName":"BlobTable" + "cacheTablename":"BlobTable" }, "storageDriver":{ "name":"s3", @@ -389,7 +389,7 @@ func TestVerify(t *testing.T) { "name":"dynamodb", "endpoint":"http://localhost:4566", "region":"us-east-2", - "tableName":"BlobTable" + "cacheTablename":"BlobTable" } } } @@ -468,7 +468,7 @@ func TestVerify(t *testing.T) { "name":"dynamodb", "endpoint":"http://localhost:4566", "region":"us-east-2", - "tableName":"BlobTable" + "cacheTablename":"BlobTable" } } } diff --git a/pkg/extensions/extension_search.go b/pkg/extensions/extension_search.go index da8b9b9c..a42571cf 100644 --- a/pkg/extensions/extension_search.go +++ b/pkg/extensions/extension_search.go @@ -16,6 +16,7 @@ import ( cveinfo "zotregistry.io/zot/pkg/extensions/search/cve" "zotregistry.io/zot/pkg/extensions/search/gql_generated" "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" "zotregistry.io/zot/pkg/storage" ) @@ -24,7 +25,9 @@ import ( // The library doesn't seem to handle concurrency very well internally. var cveInfo cveinfo.CveInfo //nolint:gochecknoglobals -func EnableSearchExtension(config *config.Config, log log.Logger, storeController storage.StoreController) { +func EnableSearchExtension(config *config.Config, storeController storage.StoreController, + repoDB repodb.RepoDB, log log.Logger, +) { if config.Extensions.Search != nil && *config.Extensions.Search.Enable && config.Extensions.Search.CVE != nil { defaultUpdateInterval, _ := time.ParseDuration("2h") @@ -34,7 +37,7 @@ func EnableSearchExtension(config *config.Config, log log.Logger, storeControlle log.Warn().Msg("CVE update interval set to too-short interval < 2h, changing update duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll } - cveInfo = cveinfo.NewCVEInfo(storeController, log) + cveInfo = cveinfo.NewCVEInfo(storeController, repoDB, log) go func() { err := downloadTrivyDB(log, config.Extensions.Search.CVE.UpdateInterval) @@ -63,7 +66,7 @@ func downloadTrivyDB(log log.Logger, updateInterval time.Duration) error { } func SetupSearchRoutes(config *config.Config, router *mux.Router, storeController storage.StoreController, - log log.Logger, + repoDB repodb.RepoDB, log log.Logger, ) { log.Info().Msg("setting up search routes") @@ -74,12 +77,12 @@ func SetupSearchRoutes(config *config.Config, router *mux.Router, storeControlle // cveinfo should already be initialized by this time // as EnableSearchExtension is supposed to be called earlier, but let's be sure if cveInfo == nil { - cveInfo = cveinfo.NewCVEInfo(storeController, log) + cveInfo = cveinfo.NewCVEInfo(storeController, repoDB, log) } - resConfig = search.GetResolverConfig(log, storeController, cveInfo) + resConfig = search.GetResolverConfig(log, storeController, repoDB, cveInfo) } else { - resConfig = search.GetResolverConfig(log, storeController, nil) + resConfig = search.GetResolverConfig(log, storeController, repoDB, nil) } graphqlPrefix := router.PathPrefix(constants.FullSearchPrefix).Methods("OPTIONS", "GET", "POST") diff --git a/pkg/extensions/extension_search_disabled.go b/pkg/extensions/extension_search_disabled.go index fd522799..5edf43cb 100644 --- a/pkg/extensions/extension_search_disabled.go +++ b/pkg/extensions/extension_search_disabled.go @@ -9,18 +9,21 @@ import ( "zotregistry.io/zot/pkg/api/config" "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" "zotregistry.io/zot/pkg/storage" ) // EnableSearchExtension ... -func EnableSearchExtension(config *config.Config, log log.Logger, storeController storage.StoreController) { +func EnableSearchExtension(config *config.Config, storeController storage.StoreController, + repoDB repodb.RepoDB, log log.Logger, +) { log.Warn().Msg("skipping enabling search extension because given zot binary doesn't include this feature," + "please build a binary that does so") } // SetupSearchRoutes ... -func SetupSearchRoutes(conf *config.Config, router *mux.Router, - storeController storage.StoreController, log log.Logger, +func SetupSearchRoutes(config *config.Config, router *mux.Router, storeController storage.StoreController, + repoDB repodb.RepoDB, log log.Logger, ) { log.Warn().Msg("skipping setting up search routes because given zot binary doesn't include this feature," + "please build a binary that does so") diff --git a/pkg/extensions/search/common/common.go b/pkg/extensions/search/common/common.go index 077202d6..dceae749 100644 --- a/pkg/extensions/search/common/common.go +++ b/pkg/extensions/search/common/common.go @@ -18,6 +18,7 @@ const ( LabelAnnotationCreated = "org.label-schema.build-date" LabelAnnotationVendor = "org.label-schema.vendor" LabelAnnotationDescription = "org.label-schema.description" + LabelAnnotationLicenses = "org.label-schema.license" LabelAnnotationTitle = "org.label-schema.name" LabelAnnotationDocumentation = "org.label-schema.usage" LabelAnnotationSource = "org.label-schema.vcs-url" @@ -192,6 +193,10 @@ func GetDescription(annotations map[string]string) string { return GetAnnotationValue(annotations, ispec.AnnotationDescription, LabelAnnotationDescription) } +func GetLicenses(annotations map[string]string) string { + return GetAnnotationValue(annotations, ispec.AnnotationLicenses, LabelAnnotationLicenses) +} + func GetVendor(annotations map[string]string) string { return GetAnnotationValue(annotations, ispec.AnnotationVendor, LabelAnnotationVendor) } @@ -220,12 +225,6 @@ func GetCategories(labels map[string]string) string { return categories } -func GetLicenses(annotations map[string]string) string { - licenses := annotations[ispec.AnnotationLicenses] - - return licenses -} - func GetAnnotations(annotations, labels map[string]string) ImageAnnotations { description := GetDescription(annotations) if description == "" { diff --git a/pkg/extensions/search/common/common_test.go b/pkg/extensions/search/common/common_test.go index aede184e..ac4712c6 100644 --- a/pkg/extensions/search/common/common_test.go +++ b/pkg/extensions/search/common/common_test.go @@ -9,9 +9,9 @@ import ( "errors" "fmt" "io" + "net/http" "net/url" "os" - "os/exec" "path" "strconv" "strings" @@ -19,23 +19,24 @@ import ( "time" dbTypes "github.com/aquasecurity/trivy-db/pkg/types" + "github.com/gobwas/glob" godigest "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" ispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sigstore/cosign/cmd/cosign/cli/generate" - "github.com/sigstore/cosign/cmd/cosign/cli/options" - "github.com/sigstore/cosign/cmd/cosign/cli/sign" + artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1" . "github.com/smartystreets/goconvey/convey" "gopkg.in/resty.v1" + zerr "zotregistry.io/zot/errors" "zotregistry.io/zot/pkg/api" "zotregistry.io/zot/pkg/api/config" "zotregistry.io/zot/pkg/api/constants" extconf "zotregistry.io/zot/pkg/extensions/config" "zotregistry.io/zot/pkg/extensions/monitoring" - "zotregistry.io/zot/pkg/extensions/search" "zotregistry.io/zot/pkg/extensions/search/common" + "zotregistry.io/zot/pkg/extensions/search/convert" "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" "zotregistry.io/zot/pkg/storage" "zotregistry.io/zot/pkg/storage/local" . "zotregistry.io/zot/pkg/test" @@ -44,6 +45,7 @@ import ( const ( graphqlQueryPrefix = constants.FullSearchPrefix + DBFileName = "repo.db" ) var ( @@ -109,6 +111,7 @@ type GlobalSearchResultResp struct { type GlobalSearchResult struct { GlobalSearch GlobalSearch `json:"globalSearch"` } + type GlobalSearch struct { Images []common.ImageSummary `json:"images"` Repos []common.RepoSummary `json:"repos"` @@ -145,90 +148,14 @@ func testSetup(t *testing.T, subpath string) error { //nolint:unparam rootDir = dir - subRootDir = path.Join(subDir, subpath) + subRootDir = subDir err := CopyFiles("../../../../test/data", rootDir) if err != nil { return err } - return CopyFiles("../../../../test/data", subRootDir) -} - -func signUsingCosign(port string) error { - cwd, err := os.Getwd() - So(err, ShouldBeNil) - - defer func() { _ = os.Chdir(cwd) }() - - tdir, err := os.MkdirTemp("", "cosign") - if err != nil { - return err - } - - defer os.RemoveAll(tdir) - - digest := GetTestBlobDigest("zot-cve-test", "manifest").String() - - _ = os.Chdir(tdir) - - // generate a keypair - os.Setenv("COSIGN_PASSWORD", "") - - err = generate.GenerateKeyPairCmd(context.TODO(), "", nil) - if err != nil { - return err - } - - imageURL := fmt.Sprintf("localhost:%s/%s@%s", port, "zot-cve-test", digest) - - // sign the image - return sign.SignCmd(&options.RootOptions{Verbose: true, Timeout: 1 * time.Minute}, - options.KeyOpts{KeyRef: path.Join(tdir, "cosign.key"), PassFunc: generate.GetPass}, - options.RegistryOptions{AllowInsecure: true}, - map[string]interface{}{"tag": "1.0"}, - []string{imageURL}, - "", "", true, "", "", "", false, false, "", true) -} - -func signUsingNotary(port string) error { - cwd, err := os.Getwd() - if err != nil { - return err - } - - defer func() { _ = os.Chdir(cwd) }() - - tdir, err := os.MkdirTemp("", "notation") - if err != nil { - return err - } - - defer os.RemoveAll(tdir) - - _ = os.Chdir(tdir) - - _, err = exec.LookPath("notation") - if err != nil { - return err - } - - os.Setenv("XDG_CONFIG_HOME", tdir) - - // generate a keypair - cmd := exec.Command("notation", "cert", "generate-test", "--trust", "notation-sign-test") - - err = cmd.Run() - if err != nil { - return err - } - - // sign the image - image := fmt.Sprintf("localhost:%s/%s:%s", port, "zot-test", "0.0.1") - - cmd = exec.Command("notation", "sign", "--key", "notation-sign-test", "--plain-http", image) - - return cmd.Run() + return CopyFiles("../../../../test/data", path.Join(subDir, subpath)) } func getTags() ([]common.TagInfo, []common.TagInfo) { @@ -284,129 +211,91 @@ func readFileAndSearchString(filePath string, stringToMatch string, timeout time } } +func verifyRepoSummaryFields(t *testing.T, + actualRepoSummary, expectedRepoSummary *common.RepoSummary, +) { + t.Helper() + + t.Logf("Verify RepoSummary \n%v \nmatches fields of \n%v", + actualRepoSummary, expectedRepoSummary, + ) + + So(actualRepoSummary.Name, ShouldEqual, expectedRepoSummary.Name) + So(actualRepoSummary.LastUpdated, ShouldEqual, expectedRepoSummary.LastUpdated) + So(actualRepoSummary.Size, ShouldEqual, expectedRepoSummary.Size) + So(len(actualRepoSummary.Vendors), ShouldEqual, len(expectedRepoSummary.Vendors)) + + for index, vendor := range actualRepoSummary.Vendors { + So(vendor, ShouldEqual, expectedRepoSummary.Vendors[index]) + } + + So(len(actualRepoSummary.Platforms), ShouldEqual, len(expectedRepoSummary.Platforms)) + + for index, platform := range actualRepoSummary.Platforms { + So(platform.Os, ShouldEqual, expectedRepoSummary.Platforms[index].Os) + So(platform.Arch, ShouldEqual, expectedRepoSummary.Platforms[index].Arch) + } + + So(actualRepoSummary.NewestImage.Tag, ShouldEqual, expectedRepoSummary.NewestImage.Tag) + verifyImageSummaryFields(t, &actualRepoSummary.NewestImage, &expectedRepoSummary.NewestImage) +} + +func verifyImageSummaryFields(t *testing.T, + actualImageSummary, expectedImageSummary *common.ImageSummary, +) { + t.Helper() + + t.Logf("Verify ImageSummary \n%v \nmatches fields of \n%v", + actualImageSummary, expectedImageSummary, + ) + + So(actualImageSummary.Tag, ShouldEqual, expectedImageSummary.Tag) + So(actualImageSummary.LastUpdated, ShouldEqual, expectedImageSummary.LastUpdated) + So(actualImageSummary.Size, ShouldEqual, expectedImageSummary.Size) + So(actualImageSummary.IsSigned, ShouldEqual, expectedImageSummary.IsSigned) + So(actualImageSummary.Vendor, ShouldEqual, expectedImageSummary.Vendor) + So(actualImageSummary.Platform.Os, ShouldEqual, expectedImageSummary.Platform.Os) + So(actualImageSummary.Platform.Arch, ShouldEqual, expectedImageSummary.Platform.Arch) + So(actualImageSummary.Title, ShouldEqual, expectedImageSummary.Title) + So(actualImageSummary.Description, ShouldEqual, expectedImageSummary.Description) + So(actualImageSummary.Source, ShouldEqual, expectedImageSummary.Source) + So(actualImageSummary.Documentation, ShouldEqual, expectedImageSummary.Documentation) + So(actualImageSummary.Licenses, ShouldEqual, expectedImageSummary.Licenses) + So(len(actualImageSummary.History), ShouldEqual, len(expectedImageSummary.History)) + + for index, history := range actualImageSummary.History { + // Digest could be empty string if the history entry is not associated with a layer + So(history.Layer.Digest, ShouldEqual, expectedImageSummary.History[index].Layer.Digest) + So(history.Layer.Size, ShouldEqual, expectedImageSummary.History[index].Layer.Size) + So( + history.HistoryDescription.Author, + ShouldEqual, + expectedImageSummary.History[index].HistoryDescription.Author, + ) + So( + history.HistoryDescription.Created, + ShouldEqual, + expectedImageSummary.History[index].HistoryDescription.Created, + ) + So( + history.HistoryDescription.CreatedBy, + ShouldEqual, + expectedImageSummary.History[index].HistoryDescription.CreatedBy, + ) + So( + history.HistoryDescription.EmptyLayer, + ShouldEqual, + expectedImageSummary.History[index].HistoryDescription.EmptyLayer, + ) + So( + history.HistoryDescription.Comment, + ShouldEqual, + expectedImageSummary.History[index].HistoryDescription.Comment, + ) + } +} + func TestRepoListWithNewestImage(t *testing.T) { - Convey("Test repoListWithNewestImage AddError", t, func() { - subpath := "/a" - err := testSetup(t, subpath) - if err != nil { - panic(err) - } - - err = os.RemoveAll(path.Join(rootDir, "zot-cve-test")) - if err != nil { - panic(err) - } - - err = os.RemoveAll(path.Join(rootDir, subpath)) - if err != nil { - panic(err) - } - - port := GetFreePort() - baseURL := GetBaseURL(port) - conf := config.New() - conf.HTTP.Port = port - conf.Storage.RootDirectory = rootDir - defaultVal := true - conf.Extensions = &extconf.ExtensionConfig{ - Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}}, - } - - conf.Extensions.Search.CVE = nil - - ctlr := api.NewController(conf) - - go func() { - // this blocks - if err := ctlr.Run(context.Background()); err != nil { - return - } - }() - - // wait till ready - for { - _, err := resty.R().Get(baseURL) - if err == nil { - break - } - time.Sleep(100 * time.Millisecond) - } - - // shut down server - defer func() { - ctx := context.Background() - _ = ctlr.Server.Shutdown(ctx) - }() - - resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + - "?query={RepoListWithNewestImage{Name%20NewestImage{Tag}}}") - So(resp, ShouldNotBeNil) - So(err, ShouldBeNil) - So(resp.StatusCode(), ShouldEqual, 200) - - err = os.Remove(path.Join(rootDir, - "zot-test/blobs/sha256", GetTestBlobDigest("zot-test", "manifest").Encoded())) - if err != nil { - panic(err) - } - - resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + - "?query={RepoListWithNewestImage{Name%20NewestImage{Tag}}}") - So(resp, ShouldNotBeNil) - So(err, ShouldBeNil) - body := string(resp.Body()) - So(body, ShouldContainSubstring, "can't get last updated manifest for repo:") - So(resp.StatusCode(), ShouldEqual, 200) - - err = CopyFiles("../../../../test/data/zot-test", path.Join(rootDir, "zot-test")) - if err != nil { - panic(err) - } - - err = os.Remove(path.Join(rootDir, - "zot-test/blobs/sha256/", GetTestBlobDigest("zot-test", "config").Encoded())) - if err != nil { - panic(err) - } - - err = os.Remove(path.Join(rootDir, - "zot-test/blobs/sha256", GetTestBlobDigest("zot-test", "layer").Encoded())) - if err != nil { - panic(err) - } - - resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + - "?query={RepoListWithNewestImage{Name%20NewestImage{Tag}}}") - So(resp, ShouldNotBeNil) - So(err, ShouldBeNil) - body = string(resp.Body()) - So(body, ShouldContainSubstring, "can't get last updated manifest for repo") - So(resp.StatusCode(), ShouldEqual, 200) - - err = CopyFiles("../../../../test/data/zot-test", path.Join(rootDir, "zot-test")) - if err != nil { - panic(err) - } - - err = os.Remove(path.Join(rootDir, "zot-test/index.json")) - if err != nil { - panic(err) - } - //nolint: lll - manifestNoAnnotations := "{\"schemaVersion\":2,\"manifests\":[{\"mediaType\":\"application/vnd.oci.image.manifest.v1+json\",\"digest\":\"" + GetTestBlobDigest("zot-test", "manifest").String() + "\",\"size\":350}]}" - err = os.WriteFile(path.Join(rootDir, "zot-test/index.json"), []byte(manifestNoAnnotations), 0o600) - if err != nil { - panic(err) - } - resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + - "?query={RepoListWithNewestImage{Name%20NewestImage{Tag}}}") - So(resp, ShouldNotBeNil) - So(err, ShouldBeNil) - body = string(resp.Body()) - So(body, ShouldContainSubstring, "reference not found for manifest") - So(resp.StatusCode(), ShouldEqual, 200) - }) - Convey("Test repoListWithNewestImage by tag with HTTP", t, func() { subpath := "/a" err := testSetup(t, subpath) @@ -429,27 +318,9 @@ func TestRepoListWithNewestImage(t *testing.T) { ctlr := api.NewController(conf) - go func() { - // this blocks - if err := ctlr.Run(context.Background()); err != nil { - return - } - }() - - // wait till ready - for { - _, err := resty.R().Get(baseURL) - if err == nil { - break - } - time.Sleep(100 * time.Millisecond) - } - - // shut down server - defer func() { - ctx := context.Background() - _ = ctlr.Server.Shutdown(ctx) - }() + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) resp, err := resty.R().Get(baseURL + "/v2/") So(resp, ShouldNotBeNil) @@ -475,6 +346,31 @@ func TestRepoListWithNewestImage(t *testing.T) { images := responseStruct.RepoListWithNewestImage.Repos So(images[0].NewestImage.Tag, ShouldEqual, "0.0.1") + query := `{ + RepoListWithNewestImage(requestedPage: { + limit:1 + offset:0 + sortBy: UPDATE_TIME + }){ + Name + NewestImage{ + Tag + } + } + }` + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + err = json.Unmarshal(resp.Body(), &responseStruct) + So(err, ShouldBeNil) + So(len(responseStruct.RepoListWithNewestImage.Repos), ShouldEqual, 1) + + repos := responseStruct.RepoListWithNewestImage.Repos + So(repos[0].NewestImage.Tag, ShouldEqual, "0.0.1") + // Verify we don't return any vulnerabilities if CVE scanning is disabled resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query={RepoListWithNewestImage{Name%20NewestImage{Tag%20Vulnerabilities{MaxSeverity%20Count}}}}") @@ -509,7 +405,7 @@ func TestRepoListWithNewestImage(t *testing.T) { err = json.Unmarshal(resp.Body(), &responseStruct) So(err, ShouldBeNil) - So(responseStruct.Errors, ShouldNotBeNil) + So(responseStruct.Errors, ShouldBeNil) // Even if permissions fail data is coming from the DB err = os.Chmod(rootDir, 0o755) if err != nil { @@ -521,7 +417,7 @@ func TestRepoListWithNewestImage(t *testing.T) { manifestDigest, configDigest, _ = GetOciLayoutDigests("../../../../test/data/zot-test") // Delete config blob and try. - err = os.Remove(path.Join(subRootDir, "zot-test/blobs/sha256", configDigest.Encoded())) + err = os.Remove(path.Join(subRootDir, "a/zot-test/blobs/sha256", configDigest.Encoded())) if err != nil { panic(err) } @@ -532,7 +428,7 @@ func TestRepoListWithNewestImage(t *testing.T) { So(err, ShouldBeNil) So(resp.StatusCode(), ShouldEqual, 200) - err = os.Remove(path.Join(subRootDir, "zot-test/blobs/sha256", + err = os.Remove(path.Join(subRootDir, "a/zot-test/blobs/sha256", manifestDigest.Encoded())) if err != nil { panic(err) @@ -607,29 +503,11 @@ func TestRepoListWithNewestImage(t *testing.T) { ctlr := api.NewController(conf) ctlr.Log.Logger = ctlr.Log.Output(writers) - go func() { - // this blocks - if err := ctlr.Run(context.Background()); err != nil { - return - } - }() + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) - // wait till ready - for { - _, err := resty.R().Get(baseURL) - if err == nil { - break - } - time.Sleep(100 * time.Millisecond) - } - - // shut down server - defer func() { - ctx := context.Background() - _ = ctlr.Server.Shutdown(ctx) - }() - - substring := "\"Extensions\":{\"Search\":{\"Enable\":true,\"CVE\":{\"UpdateInterval\":3600000000000}},\"Sync\":null,\"Metrics\":null,\"Scrub\":null,\"Lint\":null}" //nolint:lll // gofumpt conflicts with lll + substring := "{\"Search\":{\"Enable\":true,\"CVE\":{\"UpdateInterval\":3600000000000}}" found, err := readFileAndSearchString(logPath, substring, 2*time.Minute) So(found, ShouldBeTrue) So(err, ShouldBeNil) @@ -835,46 +713,31 @@ func TestExpandedRepoInfo(t *testing.T) { ctlr := api.NewController(conf) - go startServer(ctlr) - defer stopServer(ctlr) - WaitTillServerReady(baseURL) + imageStore := local.NewImageStore(tempDir, false, 0, false, false, + log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil) - config, layers, manifest, err := GetImageComponents(1000) - So(err, ShouldBeNil) + storeController := storage.StoreController{ + DefaultStore: imageStore, + } - err = UploadImage( - Image{ - Manifest: manifest, - Config: config, - Layers: layers, - Tag: "1.0", - }, - baseURL, - repo1) - So(err, ShouldBeNil) + // init storage layout with 3 images + for i := 1; i <= 3; i++ { + config, layers, manifest, err := GetImageComponents(100) + So(err, ShouldBeNil) - err = UploadImage( - Image{ - Manifest: manifest, - Config: config, - Layers: layers, - Tag: "2.0", - }, - baseURL, - repo1) - So(err, ShouldBeNil) - - err = UploadImage( - Image{ - Manifest: manifest, - Config: config, - Layers: layers, - Tag: tagToBeRemoved, - }, - baseURL, - repo1) - So(err, ShouldBeNil) + err = WriteImageToFileSystem( + Image{ + Manifest: manifest, + Config: config, + Layers: layers, + Tag: fmt.Sprintf("%d.0", i), + }, + repo1, + storeController) + So(err, ShouldBeNil) + } + // remote a tag from index.json indexPath := path.Join(tempDir, repo1, "index.json") indexFile, err := os.Open(indexPath) So(err, ShouldBeNil) @@ -897,6 +760,10 @@ func TestExpandedRepoInfo(t *testing.T) { err = os.WriteFile(indexPath, buf, 0o600) So(err, ShouldBeNil) + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) + query := "{ExpandedRepoInfo(repo:\"test1\"){Summary%20{Name%20LastUpdated%20Size%20Platforms%20{Os%20Arch}%20Vendors%20Score}%20Images%20{Digest%20IsSigned%20Tag%20Layers%20{Size%20Digest}}}}" //nolint: lll resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + query) @@ -935,27 +802,14 @@ func TestExpandedRepoInfo(t *testing.T) { ctlr := api.NewController(conf) - go func() { - // this blocks - if err := ctlr.Run(context.Background()); err != nil { - return - } - }() + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) - // wait till ready - for { - _, err := resty.R().Get(baseURL) - if err == nil { - break - } - time.Sleep(100 * time.Millisecond) - } - - // shut down server - defer func() { - ctx := context.Background() - _ = ctlr.Server.Shutdown(ctx) - }() + log := log.NewLogger("debug", "") + metrics := monitoring.NewMetricsServer(false, log) + testStorage := local.NewImageStore("../../../../test/data", false, storage.DefaultGCDelay, + false, false, log, metrics, nil, nil) resp, err := resty.R().Get(baseURL + "/v2/") So(resp, ShouldNotBeNil) @@ -980,7 +834,6 @@ func TestExpandedRepoInfo(t *testing.T) { So(err, ShouldBeNil) So(responseStruct.ExpandedRepoInfo.RepoInfo.Summary, ShouldNotBeEmpty) So(responseStruct.ExpandedRepoInfo.RepoInfo.Summary.Name, ShouldEqual, "zot-cve-test") - So(responseStruct.ExpandedRepoInfo.RepoInfo.Summary.Score, ShouldEqual, -1) query = "{ExpandedRepoInfo(repo:\"zot-cve-test\"){Images%20{Digest%20IsSigned%20Tag%20Layers%20{Size%20Digest}}}}" @@ -995,16 +848,20 @@ func TestExpandedRepoInfo(t *testing.T) { So(err, ShouldBeNil) So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries), ShouldNotEqual, 0) So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries[0].Layers), ShouldNotEqual, 0) + + _, testManifestDigest, _, err := testStorage.GetImageManifest("zot-cve-test", "0.0.1") + So(err, ShouldBeNil) + found := false for _, m := range responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries { - if m.Digest == GetTestBlobDigest("zot-cve-test", "manifest").String() { + if m.Digest == testManifestDigest.String() { found = true So(m.IsSigned, ShouldEqual, false) } } So(found, ShouldEqual, true) - err = signUsingCosign(port) + err = SignImageUsingCosign("zot-cve-test:0.0.1", port) So(err, ShouldBeNil) resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + query) @@ -1016,9 +873,13 @@ func TestExpandedRepoInfo(t *testing.T) { So(err, ShouldBeNil) So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries), ShouldNotEqual, 0) So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries[0].Layers), ShouldNotEqual, 0) + + _, testManifestDigest, _, err = testStorage.GetImageManifest("zot-cve-test", "0.0.1") + So(err, ShouldBeNil) + found = false for _, m := range responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries { - if m.Digest == GetTestBlobDigest("zot-cve-test", "manifest").String() { + if m.Digest == testManifestDigest.String() { found = true So(m.IsSigned, ShouldEqual, true) } @@ -1042,16 +903,20 @@ func TestExpandedRepoInfo(t *testing.T) { So(err, ShouldBeNil) So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries), ShouldNotEqual, 0) So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries[0].Layers), ShouldNotEqual, 0) + + _, testManifestDigest, _, err = testStorage.GetImageManifest("zot-test", "0.0.1") + So(err, ShouldBeNil) + found = false for _, m := range responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries { - if m.Digest == GetTestBlobDigest("zot-test", "manifest").String() { + if m.Digest == testManifestDigest.String() { found = true So(m.IsSigned, ShouldEqual, false) } } So(found, ShouldEqual, true) - err = signUsingNotary(port) + err = SignImageUsingCosign("zot-test:0.0.1", port) So(err, ShouldBeNil) resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "/query?query=" + query) @@ -1063,9 +928,13 @@ func TestExpandedRepoInfo(t *testing.T) { So(err, ShouldBeNil) So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries), ShouldNotEqual, 0) So(len(responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries[0].Layers), ShouldNotEqual, 0) + + _, testManifestDigest, _, err = testStorage.GetImageManifest("zot-test", "0.0.1") + So(err, ShouldBeNil) + found = false for _, m := range responseStruct.ExpandedRepoInfo.RepoInfo.ImageSummaries { - if m.Digest == GetTestBlobDigest("zot-test", "manifest").String() { + if m.Digest == testManifestDigest.String() { found = true So(m.IsSigned, ShouldEqual, true) } @@ -1581,29 +1450,9 @@ func TestDerivedImageListNoRepos(t *testing.T) { ctlr := api.NewController(conf) - go func() { - // this blocks - if err := ctlr.Run(context.Background()); err != nil { - return - } - }() - - // wait till ready - for { - _, err := resty.R().Get(baseURL) - if err == nil { - break - } - - time.Sleep(100 * time.Millisecond) - } - - // shut down server - - defer func() { - ctx := context.Background() - _ = ctlr.Server.Shutdown(ctx) - }() + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) query := ` { @@ -2128,29 +1977,9 @@ func TestBaseImageListNoRepos(t *testing.T) { ctlr := api.NewController(conf) - go func() { - // this blocks - if err := ctlr.Run(context.Background()); err != nil { - return - } - }() - - // wait till ready - for { - _, err := resty.R().Get(baseURL) - if err == nil { - break - } - - time.Sleep(100 * time.Millisecond) - } - - // shut down server - - defer func() { - ctx := context.Background() - _ = ctlr.Server.Shutdown(ctx) - }() + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) query := ` { @@ -2218,29 +2047,9 @@ func TestGlobalSearchImageAuthor(t *testing.T) { ctlr := api.NewController(conf) - go func() { - // this blocks - if err := ctlr.Run(context.Background()); err != nil { - return - } - }() - - // wait till ready - for { - _, err := resty.R().Get(baseURL) - if err == nil { - break - } - - time.Sleep(100 * time.Millisecond) - } - - // shut down server - - defer func() { - ctx := context.Background() - _ = ctlr.Server.Shutdown(ctx) - }() + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) Convey("Test global search with author in manifest's annotations", t, func() { cfg, layers, manifest, err := GetImageComponents(10000) @@ -2260,23 +2069,10 @@ func TestGlobalSearchImageAuthor(t *testing.T) { query := ` { - GlobalSearch(query:""){ + GlobalSearch(query:"repowithauthor:latest"){ Images { - RepoName - Tag - LastUpdated - Size - IsSigned - Vendor - Score - Platform { - Os - Arch - } - Vulnerabilities { - Count - MaxSeverity - } + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } Authors } } @@ -2286,12 +2082,40 @@ func TestGlobalSearchImageAuthor(t *testing.T) { So(err, ShouldBeNil) So(resp.StatusCode(), ShouldEqual, 200) - responseStruct := &GlobalSearchResultResp{} + responseStructImages := &GlobalSearchResultResp{} - err = json.Unmarshal(resp.Body(), responseStruct) + err = json.Unmarshal(resp.Body(), responseStructImages) So(err, ShouldBeNil) - So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].Authors, ShouldEqual, "author name") + So(responseStructImages.GlobalSearchResult.GlobalSearch.Images[0].Authors, ShouldEqual, "author name") + + query = ` + { + GlobalSearch(query:"repowithauthor"){ + Repos { + Name LastUpdated Size + Platforms { Os Arch } + Vendors Score + NewestImage { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + Authors + } + } + } + }` + + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStructRepos := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStructRepos) + So(err, ShouldBeNil) + + So(responseStructRepos.GlobalSearchResult.GlobalSearch.Repos[0].NewestImage.Authors, ShouldEqual, "author name") }) Convey("Test global search with author in manifest's config", t, func() { @@ -2310,23 +2134,10 @@ func TestGlobalSearchImageAuthor(t *testing.T) { query := ` { - GlobalSearch(query:""){ + GlobalSearch(query:"repowithauthorconfig:latest"){ Images { - RepoName - Tag - LastUpdated - Size - IsSigned - Vendor - Score - Platform { - Os - Arch - } - Vulnerabilities { - Count - MaxSeverity - } + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } Authors } } @@ -2336,29 +2147,57 @@ func TestGlobalSearchImageAuthor(t *testing.T) { So(err, ShouldBeNil) So(resp.StatusCode(), ShouldEqual, 200) - responseStruct := &GlobalSearchResultResp{} + responseStructImages := &GlobalSearchResultResp{} - err = json.Unmarshal(resp.Body(), responseStruct) + err = json.Unmarshal(resp.Body(), responseStructImages) So(err, ShouldBeNil) - So(responseStruct.GlobalSearchResult.GlobalSearch.Images[1].Authors, ShouldEqual, "ZotUser") + So(responseStructImages.GlobalSearchResult.GlobalSearch.Images[0].Authors, ShouldEqual, "ZotUser") + + query = ` + { + GlobalSearch(query:"repowithauthorconfig"){ + Repos { + Name LastUpdated Size + Platforms { Os Arch } + Vendors Score + NewestImage { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + Authors + } + } + } + }` + + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStructRepos := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStructRepos) + So(err, ShouldBeNil) + + So(responseStructRepos.GlobalSearchResult.GlobalSearch.Repos[0].NewestImage.Authors, ShouldEqual, "ZotUser") }) } func TestGlobalSearch(t *testing.T) { - Convey("Test global search", t, func() { + Convey("Test searching for repos with vulnerabitity scanning disabled", t, func() { subpath := "/a" - err := testSetup(t, subpath) - if err != nil { - panic(err) - } + dir := t.TempDir() + subDir := t.TempDir() + + subRootDir = path.Join(subDir, subpath) port := GetFreePort() baseURL := GetBaseURL(port) conf := config.New() conf.HTTP.Port = port - conf.Storage.RootDirectory = rootDir + conf.Storage.RootDirectory = dir conf.Storage.SubPaths = make(map[string]config.StorageConfig) conf.Storage.SubPaths[subpath] = config.StorageConfig{RootDirectory: subRootDir} defaultVal := true @@ -2370,82 +2209,164 @@ func TestGlobalSearch(t *testing.T) { ctlr := api.NewController(conf) - go func() { - // this blocks - if err := ctlr.Run(context.Background()); err != nil { - return - } - }() + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) - // wait till ready - for { - _, err := resty.R().Get(baseURL) - if err == nil { - break - } + // push test images to repo 1 image 1 + config1, layers1, manifest1, err := GetImageComponents(100) + So(err, ShouldBeNil) + createdTime := time.Date(2010, 1, 1, 12, 0, 0, 0, time.UTC) + createdTimeL2 := time.Date(2010, 2, 1, 12, 0, 0, 0, time.UTC) + config1.History = append( + config1.History, + ispec.History{ + Created: &createdTime, + CreatedBy: "go test data", + Author: "ZotUser", + Comment: "Test history comment", + EmptyLayer: true, + }, + ispec.History{ + Created: &createdTimeL2, + CreatedBy: "go test data 2", + Author: "ZotUser", + Comment: "Test history comment2", + EmptyLayer: false, + }, + ) + manifest1, err = updateManifestConfig(manifest1, config1) + So(err, ShouldBeNil) - time.Sleep(100 * time.Millisecond) + layersSize1 := 0 + for _, l := range layers1 { + layersSize1 += len(l) } - // shut down server + err = UploadImage( + Image{ + Manifest: manifest1, + Config: config1, + Layers: layers1, + Tag: "1.0.1", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) - defer func() { - ctx := context.Background() - _ = ctlr.Server.Shutdown(ctx) - }() + // push test images to repo 1 image 2 + config2, layers2, manifest2, err := GetImageComponents(200) + So(err, ShouldBeNil) + createdTime2 := time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC) + createdTimeL2 = time.Date(2009, 2, 1, 12, 0, 0, 0, time.UTC) + config2.History = append( + config2.History, + ispec.History{ + Created: &createdTime2, + CreatedBy: "go test data", + Author: "ZotUser", + Comment: "Test history comment", + EmptyLayer: true, + }, + ispec.History{ + Created: &createdTimeL2, + CreatedBy: "go test data 2", + Author: "ZotUser", + Comment: "Test history comment2", + EmptyLayer: false, + }, + ) + manifest2, err = updateManifestConfig(manifest2, config2) + So(err, ShouldBeNil) + + layersSize2 := 0 + for _, l := range layers2 { + layersSize2 += len(l) + } + + err = UploadImage( + Image{ + Manifest: manifest2, + Config: config2, + Layers: layers2, + Tag: "1.0.2", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + + // push test images to repo 2 image 1 + config3, layers3, manifest3, err := GetImageComponents(300) + So(err, ShouldBeNil) + createdTime3 := time.Date(2009, 2, 1, 12, 0, 0, 0, time.UTC) + config3.History = append(config3.History, ispec.History{Created: &createdTime3}) + manifest3, err = updateManifestConfig(manifest3, config3) + So(err, ShouldBeNil) + + layersSize3 := 0 + for _, l := range layers3 { + layersSize3 += len(l) + } + + err = UploadImage( + Image{ + Manifest: manifest3, + Config: config3, + Layers: layers3, + Tag: "1.0.0", + }, + baseURL, + "repo2", + ) + So(err, ShouldBeNil) + + olu := common.NewBaseOciLayoutUtils(ctlr.StoreController, log.NewLogger("debug", "")) + + // Initialize the objects containing the expected data + repos, err := olu.GetRepositories() + So(err, ShouldBeNil) + + allExpectedRepoInfoMap := make(map[string]common.RepoInfo) + allExpectedImageSummaryMap := make(map[string]common.ImageSummary) + for _, repo := range repos { + repoInfo, err := olu.GetExpandedRepoInfo(repo) + So(err, ShouldBeNil) + allExpectedRepoInfoMap[repo] = repoInfo + for _, image := range repoInfo.ImageSummaries { + imageName := fmt.Sprintf("%s:%s", repo, image.Tag) + allExpectedImageSummaryMap[imageName] = image + } + } query := ` { - GlobalSearch(query:""){ + GlobalSearch(query:"repo"){ Images { - RepoName - Tag - LastUpdated - Size - IsSigned - Vendor - Score - Platform { - Os - Arch - } - Vulnerabilities { - Count - MaxSeverity + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + Vulnerabilities { Count MaxSeverity } + History { + Layer { Size Digest } + HistoryDescription { Author Comment Created CreatedBy EmptyLayer } } } Repos { - Name - LastUpdated - Size - Platforms { - Os - Arch - } - Vendors - Score + Name LastUpdated Size + Platforms { Os Arch } + Vendors Score NewestImage { - RepoName - Tag - LastUpdated - Size - IsSigned - Vendor - Score - Platform { - Os - Arch - } - Vulnerabilities { - Count - MaxSeverity + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + Vulnerabilities { Count MaxSeverity } + History { + Layer { Size Digest } + HistoryDescription { Author Comment Created CreatedBy EmptyLayer } } } } - Layers { - Digest - Size - } + Layers { Digest Size } } }` resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) @@ -2458,71 +2379,79 @@ func TestGlobalSearch(t *testing.T) { err = json.Unmarshal(resp.Body(), responseStruct) So(err, ShouldBeNil) - // There are 2 repos: zot-cve-test and zot-test, each having an image with tag 0.0.1 - imageStore := ctlr.StoreController.DefaultStore - - repos, err := imageStore.GetRepositories() - So(err, ShouldBeNil) - expectedRepoCount := len(repos) - - allExpectedTagMap := make(map[string][]string, expectedRepoCount) - expectedImageCount := 0 - for _, repo := range repos { - tags, err := imageStore.GetImageTags(repo) - So(err, ShouldBeNil) - - allExpectedTagMap[repo] = tags - expectedImageCount += len(tags) - } - // Make sure the repo/image counts match before comparing actual content So(responseStruct.GlobalSearchResult.GlobalSearch.Images, ShouldNotBeNil) t.Logf("returned images: %v", responseStruct.GlobalSearchResult.GlobalSearch.Images) - So(len(responseStruct.GlobalSearchResult.GlobalSearch.Images), ShouldEqual, expectedImageCount) + So(responseStruct.GlobalSearchResult.GlobalSearch.Images, ShouldBeEmpty) t.Logf("returned repos: %v", responseStruct.GlobalSearchResult.GlobalSearch.Repos) - So(len(responseStruct.GlobalSearchResult.GlobalSearch.Repos), ShouldEqual, expectedRepoCount) + So(len(responseStruct.GlobalSearchResult.GlobalSearch.Repos), ShouldEqual, 2) t.Logf("returned layers: %v", responseStruct.GlobalSearchResult.GlobalSearch.Layers) - So(len(responseStruct.GlobalSearchResult.GlobalSearch.Layers), ShouldNotBeEmpty) + So(responseStruct.GlobalSearchResult.GlobalSearch.Layers, ShouldBeEmpty) newestImageMap := make(map[string]common.ImageSummary) - for _, image := range responseStruct.GlobalSearchResult.GlobalSearch.Images { - // Make sure all returned results are supposed to be in the repo - So(allExpectedTagMap[image.RepoName], ShouldContain, image.Tag) - // Identify the newest image in each repo - if newestImage, ok := newestImageMap[image.RepoName]; ok { - if newestImage.LastUpdated.Before(image.LastUpdated) { - newestImageMap[image.RepoName] = image - } - } else { - newestImageMap[image.RepoName] = image - } - } - t.Logf("expected results for newest images in repos: %v", newestImageMap) - + actualRepoMap := make(map[string]common.RepoSummary) for _, repo := range responseStruct.GlobalSearchResult.GlobalSearch.Repos { - image := newestImageMap[repo.Name] - So(repo.Name, ShouldEqual, image.RepoName) - So(repo.LastUpdated, ShouldEqual, image.LastUpdated) - So(repo.Size, ShouldEqual, image.Size) - So(repo.Vendors[0], ShouldEqual, image.Vendor) - So(repo.Platforms[0].Os, ShouldEqual, image.Platform.Os) - So(repo.Platforms[0].Arch, ShouldEqual, image.Platform.Arch) - So(repo.NewestImage.RepoName, ShouldEqual, image.RepoName) - So(repo.NewestImage.Tag, ShouldEqual, image.Tag) - So(repo.NewestImage.LastUpdated, ShouldEqual, image.LastUpdated) - So(repo.NewestImage.Size, ShouldEqual, image.Size) - So(repo.NewestImage.IsSigned, ShouldEqual, image.IsSigned) - So(repo.NewestImage.Vendor, ShouldEqual, image.Vendor) - So(repo.NewestImage.Platform.Os, ShouldEqual, image.Platform.Os) - So(repo.NewestImage.Platform.Arch, ShouldEqual, image.Platform.Arch) - So(repo.NewestImage.Vulnerabilities.Count, ShouldEqual, 0) - So(repo.NewestImage.Vulnerabilities.MaxSeverity, ShouldEqual, "") + newestImageMap[repo.Name] = repo.NewestImage + actualRepoMap[repo.Name] = repo } - // GetRepositories fail + // Tag 1.0.2 has a history entry which is older compare to 1.0.1 + So(newestImageMap["repo1"].Tag, ShouldEqual, "1.0.1") + So(newestImageMap["repo1"].LastUpdated, ShouldEqual, time.Date(2010, 2, 1, 12, 0, 0, 0, time.UTC)) - err = os.Chmod(rootDir, 0o333) - So(err, ShouldBeNil) + So(newestImageMap["repo2"].Tag, ShouldEqual, "1.0.0") + So(newestImageMap["repo2"].LastUpdated, ShouldEqual, time.Date(2009, 2, 1, 12, 0, 0, 0, time.UTC)) + + for repoName, repoSummary := range actualRepoMap { + repoSummary := repoSummary + + // Check if data in NewestImage is consistent with the data in RepoSummary + So(repoName, ShouldEqual, repoSummary.NewestImage.RepoName) + So(repoSummary.Name, ShouldEqual, repoSummary.NewestImage.RepoName) + So(repoSummary.LastUpdated, ShouldEqual, repoSummary.NewestImage.LastUpdated) + + // The data in the RepoSummary returned from the request matches the data returned from the disk + repoInfo := allExpectedRepoInfoMap[repoName] + + t.Logf("Validate repo summary returned by global search with vulnerability scanning disabled") + verifyRepoSummaryFields(t, &repoSummary, &repoInfo.Summary) + + // RepoInfo object does not provide vulnerability information so we need to check differently + // No vulnerabilities should be detected since trivy is disabled + t.Logf("Found vulnerability summary %v", repoSummary.NewestImage.Vulnerabilities) + So(repoSummary.NewestImage.Vulnerabilities.Count, ShouldEqual, 0) + So(repoSummary.NewestImage.Vulnerabilities.MaxSeverity, ShouldEqual, "") + } + + query = ` + { + GlobalSearch(query:"repo1:1.0.1"){ + Images { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + Vulnerabilities { Count MaxSeverity } + History { + Layer { Size Digest } + HistoryDescription { Author Comment Created CreatedBy EmptyLayer } + } + } + Repos { + Name LastUpdated Size + Platforms { Os Arch } + Vendors Score + NewestImage { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + Vulnerabilities { Count MaxSeverity } + History { + Layer { Size Digest } + HistoryDescription { Author Comment Created CreatedBy EmptyLayer } + } + } + } + Layers { Digest Size } + } + }` resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) So(resp, ShouldNotBeNil) @@ -2530,27 +2459,44 @@ func TestGlobalSearch(t *testing.T) { So(resp.StatusCode(), ShouldEqual, 200) responseStruct = &GlobalSearchResultResp{} + err = json.Unmarshal(resp.Body(), responseStruct) So(err, ShouldBeNil) - So(responseStruct.Errors, ShouldNotBeEmpty) - err = os.Chmod(rootDir, 0o777) - So(err, ShouldBeNil) + So(responseStruct.GlobalSearchResult.GlobalSearch.Images, ShouldNotBeEmpty) + So(responseStruct.GlobalSearchResult.GlobalSearch.Repos, ShouldBeEmpty) + So(responseStruct.GlobalSearchResult.GlobalSearch.Layers, ShouldBeEmpty) + + So(len(responseStruct.GlobalSearchResult.GlobalSearch.Images), ShouldEqual, 1) + actualImageSummary := responseStruct.GlobalSearchResult.GlobalSearch.Images[0] + So(actualImageSummary.Tag, ShouldEqual, "1.0.1") + + expectedImageSummary, ok := allExpectedImageSummaryMap["repo1:1.0.1"] + So(ok, ShouldEqual, true) + + t.Logf("Validate image summary returned by global search with vulnerability scanning disabled") + verifyImageSummaryFields(t, &actualImageSummary, &expectedImageSummary) + + // RepoInfo object does not provide vulnerability information so we need to check differently + // 0 vulnerabilities should be detected since trivy is disabled + t.Logf("Found vulnerability summary %v", actualImageSummary.Vulnerabilities) + So(actualImageSummary.Vulnerabilities.Count, ShouldEqual, 0) + So(actualImageSummary.Vulnerabilities.MaxSeverity, ShouldEqual, "") }) - Convey("Test global search with vulnerabitity scanning enabled", t, func() { + Convey("Test global search with real images and vulnerabitity scanning enabled", t, func() { subpath := "/a" - err := testSetup(t, subpath) - if err != nil { - panic(err) - } + dir := t.TempDir() + subDir := t.TempDir() + + subRootDir = path.Join(subDir, subpath) port := GetFreePort() baseURL := GetBaseURL(port) conf := config.New() conf.HTTP.Port = port - conf.Storage.RootDirectory = rootDir + conf.Storage.RootDirectory = dir conf.Storage.SubPaths = make(map[string]config.StorageConfig) conf.Storage.SubPaths[subpath] = config.StorageConfig{RootDirectory: subRootDir} defaultVal := true @@ -2579,32 +2525,12 @@ func TestGlobalSearch(t *testing.T) { ctlr := api.NewController(conf) ctlr.Log.Logger = ctlr.Log.Output(writers) - go func() { - // this blocks - if err := ctlr.Run(context.Background()); err != nil { - return - } - }() - - // wait till ready - for { - _, err := resty.R().Get(baseURL) - if err == nil { - break - } - - time.Sleep(100 * time.Millisecond) - } - - // shut down server - - defer func() { - ctx := context.Background() - _ = ctlr.Server.Shutdown(ctx) - }() + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) // Wait for trivy db to download - substring := "\"Extensions\":{\"Search\":{\"Enable\":true,\"CVE\":{\"UpdateInterval\":3600000000000}},\"Sync\":null,\"Metrics\":null,\"Scrub\":null,\"Lint\":null}" //nolint:lll // gofumpt conflicts with lll + substring := "{\"Search\":{\"Enable\":true,\"CVE\":{\"UpdateInterval\":3600000000000}}" found, err := readFileAndSearchString(logPath, substring, 2*time.Minute) So(found, ShouldBeTrue) So(err, ShouldBeNil) @@ -2617,58 +2543,126 @@ func TestGlobalSearch(t *testing.T) { So(found, ShouldBeTrue) So(err, ShouldBeNil) + // push test images to repo 1 image 1 + config1, layers1, manifest1, err := GetImageComponents(100) + So(err, ShouldBeNil) + createdTime := time.Date(2010, 1, 1, 12, 0, 0, 0, time.UTC) + config1.History = append(config1.History, ispec.History{Created: &createdTime}) + manifest1, err = updateManifestConfig(manifest1, config1) + So(err, ShouldBeNil) + + layersSize1 := 0 + for _, l := range layers1 { + layersSize1 += len(l) + } + + err = UploadImage( + Image{ + Manifest: manifest1, + Config: config1, + Layers: layers1, + Tag: "1.0.1", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + + // push test images to repo 1 image 2 + config2, layers2, manifest2, err := GetImageComponents(200) + So(err, ShouldBeNil) + createdTime2 := time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC) + config2.History = append(config2.History, ispec.History{Created: &createdTime2}) + manifest2, err = updateManifestConfig(manifest2, config2) + So(err, ShouldBeNil) + + layersSize2 := 0 + for _, l := range layers2 { + layersSize2 += len(l) + } + + err = UploadImage( + Image{ + Manifest: manifest2, + Config: config2, + Layers: layers2, + Tag: "1.0.2", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + + // push test images to repo 2 image 1 + config3, layers3, manifest3, err := GetImageComponents(300) + So(err, ShouldBeNil) + createdTime3 := time.Date(2009, 2, 1, 12, 0, 0, 0, time.UTC) + config3.History = append(config3.History, ispec.History{Created: &createdTime3}) + manifest3, err = updateManifestConfig(manifest3, config3) + So(err, ShouldBeNil) + + layersSize3 := 0 + for _, l := range layers3 { + layersSize3 += len(l) + } + + err = UploadImage( + Image{ + Manifest: manifest3, + Config: config3, + Layers: layers3, + Tag: "1.0.0", + }, + baseURL, + "repo2", + ) + So(err, ShouldBeNil) + + olu := common.NewBaseOciLayoutUtils(ctlr.StoreController, log.NewLogger("debug", "")) + + // Initialize the objects containing the expected data + repos, err := olu.GetRepositories() + So(err, ShouldBeNil) + + allExpectedRepoInfoMap := make(map[string]common.RepoInfo) + allExpectedImageSummaryMap := make(map[string]common.ImageSummary) + for _, repo := range repos { + repoInfo, err := olu.GetExpandedRepoInfo(repo) + So(err, ShouldBeNil) + allExpectedRepoInfoMap[repo] = repoInfo + for _, image := range repoInfo.ImageSummaries { + imageName := fmt.Sprintf("%s:%s", repo, image.Tag) + allExpectedImageSummaryMap[imageName] = image + } + } + query := ` { - GlobalSearch(query:""){ + GlobalSearch(query:"repo"){ Images { - RepoName - Tag - LastUpdated - Size - IsSigned - Vendor - Score - Platform { - Os - Arch - } - Vulnerabilities { - Count - MaxSeverity + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + Vulnerabilities { Count MaxSeverity } + History { + Layer { Size Digest } + HistoryDescription { Author Comment Created CreatedBy EmptyLayer } } } Repos { - Name - LastUpdated - Size - Platforms { - Os - Arch - } - Vendors - Score + Name LastUpdated Size + Platforms { Os Arch } + Vendors Score NewestImage { - RepoName - Tag - LastUpdated - Size - IsSigned - Vendor - Score - Platform { - Os - Arch - } - Vulnerabilities { - Count - MaxSeverity + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + Vulnerabilities { Count MaxSeverity } + History { + Layer { Size Digest } + HistoryDescription { Author Comment Created CreatedBy EmptyLayer } } } } - Layers { - Digest - Size - } + Layers { Digest Size } } }` resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) @@ -2681,75 +2675,79 @@ func TestGlobalSearch(t *testing.T) { err = json.Unmarshal(resp.Body(), responseStruct) So(err, ShouldBeNil) - // There are 2 repos: zot-cve-test and zot-test, each having an image with tag 0.0.1 - imageStore := ctlr.StoreController.DefaultStore - - repos, err := imageStore.GetRepositories() - So(err, ShouldBeNil) - expectedRepoCount := len(repos) - - allExpectedTagMap := make(map[string][]string, expectedRepoCount) - expectedImageCount := 0 - for _, repo := range repos { - tags, err := imageStore.GetImageTags(repo) - So(err, ShouldBeNil) - - allExpectedTagMap[repo] = tags - expectedImageCount += len(tags) - } - // Make sure the repo/image counts match before comparing actual content So(responseStruct.GlobalSearchResult.GlobalSearch.Images, ShouldNotBeNil) t.Logf("returned images: %v", responseStruct.GlobalSearchResult.GlobalSearch.Images) - So(len(responseStruct.GlobalSearchResult.GlobalSearch.Images), ShouldEqual, expectedImageCount) + So(responseStruct.GlobalSearchResult.GlobalSearch.Images, ShouldBeEmpty) t.Logf("returned repos: %v", responseStruct.GlobalSearchResult.GlobalSearch.Repos) - So(len(responseStruct.GlobalSearchResult.GlobalSearch.Repos), ShouldEqual, expectedRepoCount) + So(len(responseStruct.GlobalSearchResult.GlobalSearch.Repos), ShouldEqual, 2) t.Logf("returned layers: %v", responseStruct.GlobalSearchResult.GlobalSearch.Layers) - So(len(responseStruct.GlobalSearchResult.GlobalSearch.Layers), ShouldNotBeEmpty) + So(responseStruct.GlobalSearchResult.GlobalSearch.Layers, ShouldBeEmpty) newestImageMap := make(map[string]common.ImageSummary) - for _, image := range responseStruct.GlobalSearchResult.GlobalSearch.Images { - // Make sure all returned results are supposed to be in the repo - So(allExpectedTagMap[image.RepoName], ShouldContain, image.Tag) - // Identify the newest image in each repo - if newestImage, ok := newestImageMap[image.RepoName]; ok { - if newestImage.LastUpdated.Before(image.LastUpdated) { - newestImageMap[image.RepoName] = image - } - } else { - newestImageMap[image.RepoName] = image - } - } - t.Logf("expected results for newest images in repos: %v", newestImageMap) - + actualRepoMap := make(map[string]common.RepoSummary) for _, repo := range responseStruct.GlobalSearchResult.GlobalSearch.Repos { - image := newestImageMap[repo.Name] - So(repo.Name, ShouldEqual, image.RepoName) - So(repo.LastUpdated, ShouldEqual, image.LastUpdated) - So(repo.Size, ShouldEqual, image.Size) - So(repo.Vendors[0], ShouldEqual, image.Vendor) - So(repo.Platforms[0].Os, ShouldEqual, image.Platform.Os) - So(repo.Platforms[0].Arch, ShouldEqual, image.Platform.Arch) - So(repo.NewestImage.RepoName, ShouldEqual, image.RepoName) - So(repo.NewestImage.Tag, ShouldEqual, image.Tag) - So(repo.NewestImage.LastUpdated, ShouldEqual, image.LastUpdated) - So(repo.NewestImage.Size, ShouldEqual, image.Size) - So(repo.NewestImage.IsSigned, ShouldEqual, image.IsSigned) - So(repo.NewestImage.Vendor, ShouldEqual, image.Vendor) - So(repo.NewestImage.Platform.Os, ShouldEqual, image.Platform.Os) - So(repo.NewestImage.Platform.Arch, ShouldEqual, image.Platform.Arch) - t.Logf("Found vulnerability summary %v", repo.NewestImage.Vulnerabilities) - So(repo.NewestImage.Vulnerabilities.Count, ShouldEqual, image.Vulnerabilities.Count) - So(repo.NewestImage.Vulnerabilities.Count, ShouldBeGreaterThan, 1) - So(repo.NewestImage.Vulnerabilities.MaxSeverity, ShouldEqual, image.Vulnerabilities.MaxSeverity) - // This really depends on the test data, but with the current test images it's CRITICAL - So(repo.NewestImage.Vulnerabilities.MaxSeverity, ShouldEqual, "CRITICAL") + newestImageMap[repo.Name] = repo.NewestImage + actualRepoMap[repo.Name] = repo } - // GetRepositories fail + // Tag 1.0.2 has a history entry which is older compare to 1.0.1 + So(newestImageMap["repo1"].Tag, ShouldEqual, "1.0.1") + So(newestImageMap["repo1"].LastUpdated, ShouldEqual, time.Date(2010, 1, 1, 12, 0, 0, 0, time.UTC)) - err = os.Chmod(rootDir, 0o333) - So(err, ShouldBeNil) + So(newestImageMap["repo2"].Tag, ShouldEqual, "1.0.0") + So(newestImageMap["repo2"].LastUpdated, ShouldEqual, time.Date(2009, 2, 1, 12, 0, 0, 0, time.UTC)) + + for repoName, repoSummary := range actualRepoMap { + repoSummary := repoSummary + + // Check if data in NewestImage is consistent with the data in RepoSummary + So(repoName, ShouldEqual, repoSummary.NewestImage.RepoName) + So(repoSummary.Name, ShouldEqual, repoSummary.NewestImage.RepoName) + So(repoSummary.LastUpdated, ShouldEqual, repoSummary.NewestImage.LastUpdated) + + // The data in the RepoSummary returned from the request matches the data returned from the disk + repoInfo := allExpectedRepoInfoMap[repoName] + + t.Logf("Validate repo summary returned by global search with vulnerability scanning enabled") + verifyRepoSummaryFields(t, &repoSummary, &repoInfo.Summary) + + // RepoInfo object does not provide vulnerability information so we need to check differently + t.Logf("Found vulnerability summary %v", repoSummary.NewestImage.Vulnerabilities) + So(repoSummary.NewestImage.Vulnerabilities.Count, ShouldEqual, 0) + // There are 0 vulnerabilities this data used in tests + So(repoSummary.NewestImage.Vulnerabilities.MaxSeverity, ShouldEqual, "NONE") + } + + query = ` + { + GlobalSearch(query:"repo1:1.0.1"){ + Images { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + Vulnerabilities { Count MaxSeverity } + History { + Layer { Size Digest } + HistoryDescription { Author Comment Created CreatedBy EmptyLayer } + } + } + Repos { + Name LastUpdated Size + Platforms { Os Arch } + Vendors Score + NewestImage { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + Vulnerabilities { Count MaxSeverity } + History { + Layer { Size Digest } + HistoryDescription { Author Comment Created CreatedBy EmptyLayer } + } + } + } + Layers { Digest Size } + } + }` resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) So(resp, ShouldNotBeNil) @@ -2757,12 +2755,278 @@ func TestGlobalSearch(t *testing.T) { So(resp.StatusCode(), ShouldEqual, 200) responseStruct = &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Images, ShouldNotBeEmpty) + So(responseStruct.GlobalSearchResult.GlobalSearch.Repos, ShouldBeEmpty) + So(responseStruct.GlobalSearchResult.GlobalSearch.Layers, ShouldBeEmpty) + + So(len(responseStruct.GlobalSearchResult.GlobalSearch.Images), ShouldEqual, 1) + actualImageSummary := responseStruct.GlobalSearchResult.GlobalSearch.Images[0] + So(actualImageSummary.Tag, ShouldEqual, "1.0.1") + + expectedImageSummary, ok := allExpectedImageSummaryMap["repo1:1.0.1"] + So(ok, ShouldEqual, true) + + t.Logf("Validate image summary returned by global search with vulnerability scanning enable") + verifyImageSummaryFields(t, &actualImageSummary, &expectedImageSummary) + + // RepoInfo object does not provide vulnerability information so we need to check differently + t.Logf("Found vulnerability summary %v", actualImageSummary.Vulnerabilities) + // There are 0 vulnerabilities this data used in tests + So(actualImageSummary.Vulnerabilities.Count, ShouldEqual, 0) + So(actualImageSummary.Vulnerabilities.MaxSeverity, ShouldEqual, "NONE") + }) +} + +func TestCleaningFilteringParamsGlobalSearch(t *testing.T) { + Convey("Test cleaning filtering parameters for global search", t, func() { + dir := t.TempDir() + + port := GetFreePort() + baseURL := GetBaseURL(port) + conf := config.New() + conf.HTTP.Port = port + conf.Storage.RootDirectory = dir + defaultVal := true + conf.Extensions = &extconf.ExtensionConfig{ + Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}}, + } + + ctlr := api.NewController(conf) + + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) + + config, layers, manifest, err := GetImageWithConfig(ispec.Image{ + Platform: ispec.Platform{ + OS: "windows", + Architecture: "amd64", + }, + }) + So(err, ShouldBeNil) + + err = UploadImage( + Image{ + Manifest: manifest, + Config: config, + Layers: layers, + Tag: "0.0.1", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + + config, layers, manifest, err = GetImageWithConfig(ispec.Image{ + Platform: ispec.Platform{ + OS: "linux", + Architecture: "amd64", + }, + }) + So(err, ShouldBeNil) + + err = UploadImage( + Image{ + Manifest: manifest, + Config: config, + Layers: layers, + Tag: "0.0.1", + }, + baseURL, + "repo2", + ) + So(err, ShouldBeNil) + + query := ` + { + GlobalSearch(query:"repo", requestedPage:{limit: 3, offset: 0, sortBy:RELEVANCE}, + filter:{Os:[" linux", "Windows ", " "], Arch:["","aMd64 "]}) { + Repos { + Name + } + } + }` + + resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + }) +} + +func TestGlobalSearchFiltering(t *testing.T) { + Convey("Global search HasToBeSigned filtering", t, func() { + dir := t.TempDir() + port := GetFreePort() + baseURL := GetBaseURL(port) + conf := config.New() + conf.HTTP.Port = port + conf.Storage.RootDirectory = dir + + defaultVal := true + conf.Extensions = &extconf.ExtensionConfig{ + Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}}, + } + + ctlr := api.NewController(conf) + + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) + + config, layers, manifest, err := GetRandomImageComponents(100) + So(err, ShouldBeNil) + + err = UploadImage( + Image{ + Config: config, + Layers: layers, + Manifest: manifest, + Tag: "test", + }, + baseURL, + "unsigned-repo", + ) + So(err, ShouldBeNil) + + config, layers, manifest, err = GetRandomImageComponents(100) + So(err, ShouldBeNil) + + err = UploadImage( + Image{ + Config: config, + Layers: layers, + Manifest: manifest, + Tag: "test", + }, + baseURL, + "signed-repo", + ) + So(err, ShouldBeNil) + + err = SignImageUsingCosign("signed-repo:test", port) + So(err, ShouldBeNil) + + query := `{ + GlobalSearch(query:"repo", + filter:{HasToBeSigned:true}) { + Repos { + Name + } + } + }` + + resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Repos, ShouldNotBeEmpty) + So(responseStruct.GlobalSearchResult.GlobalSearch.Repos[0].Name, ShouldResemble, "signed-repo") + }) +} + +func TestGlobalSearchWithInvalidInput(t *testing.T) { + Convey("Global search with invalid input", t, func() { + dir := t.TempDir() + + port := GetFreePort() + baseURL := GetBaseURL(port) + conf := config.New() + conf.HTTP.Port = port + conf.Storage.RootDirectory = dir + defaultVal := true + conf.Extensions = &extconf.ExtensionConfig{ + Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}}, + } + + ctlr := api.NewController(conf) + + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) + + longString := RandomString(1000) + + query := fmt.Sprintf(` + { + GlobalSearch(query:"%s", requestedPage:{limit: 3, offset: 4, sortBy:RELEVANCE}, + filter:{Os:["linux", "Windows", ""], Arch:["","amd64"]}) { + Repos { + Name + } + } + }`, longString) + + resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct := &GlobalSearchResultResp{} + err = json.Unmarshal(resp.Body(), responseStruct) So(err, ShouldBeNil) So(responseStruct.Errors, ShouldNotBeEmpty) - err = os.Chmod(rootDir, 0o777) + + query = fmt.Sprintf(` + { + GlobalSearch(query:"repo", requestedPage:{limit: 3, offset: 4, sortBy:RELEVANCE}, + filter:{Os:["%s", "Windows", ""], Arch:["","amd64"]}) { + Repos { + Name + } + } + }`, longString) + + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct = &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.Errors, ShouldNotBeEmpty) + + query = fmt.Sprintf(` + { + GlobalSearch(query:"repo", requestedPage:{limit: 3, offset: 4, sortBy:RELEVANCE}, + filter:{Os:["", "Windows", ""], Arch:["","%s"]}) { + Repos { + Name + } + } + }`, longString) + + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct = &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.Errors, ShouldNotBeEmpty) }) } @@ -2815,22 +3079,22 @@ func TestImageList(t *testing.T) { So(err, ShouldBeNil) query := fmt.Sprintf(`{ - ImageList(repo:"%s"){ - History{ - HistoryDescription{ - Author - Comment - Created - CreatedBy - EmptyLayer - }, - Layer{ - Digest - Size + ImageList(repo:"%s"){ + History{ + HistoryDescription{ + Author + Comment + Created + CreatedBy + EmptyLayer + }, + Layer{ + Digest + Size + } } } - } - }`, repos[0]) + }`, repos[0]) resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) So(err, ShouldBeNil) @@ -2928,22 +3192,22 @@ func TestImageList(t *testing.T) { So(err, ShouldBeNil) query := fmt.Sprintf(`{ - ImageList(repo:"%s"){ - History{ - HistoryDescription{ - Author - Comment - Created - CreatedBy - EmptyLayer - }, - Layer{ - Digest - Size + ImageList(repo:"%s"){ + History{ + HistoryDescription{ + Author + Comment + Created + CreatedBy + EmptyLayer + }, + Layer{ + Digest + Size + } } } - } - }`, invalid) + }`, invalid) resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) So(err, ShouldBeNil) @@ -2957,6 +3221,72 @@ func TestImageList(t *testing.T) { }) } +func TestGlobalSearchPagination(t *testing.T) { + Convey("Test global search pagination", t, func() { + dir := t.TempDir() + + port := GetFreePort() + baseURL := GetBaseURL(port) + conf := config.New() + conf.HTTP.Port = port + conf.Storage.RootDirectory = dir + defaultVal := true + conf.Extensions = &extconf.ExtensionConfig{ + Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}}, + } + + ctlr := api.NewController(conf) + + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) + + for i := 0; i < 1; i++ { + config, layers, manifest, err := GetImageComponents(10) + So(err, ShouldBeNil) + + err = UploadImage( + Image{ + Manifest: manifest, + Config: config, + Layers: layers, + Tag: "0.0.1", + }, + baseURL, + fmt.Sprintf("repo%d", i), + ) + So(err, ShouldBeNil) + } + + Convey("Limit is bigger than the repo count", func() { + query := ` + { + GlobalSearch(query:"repo", requestedPage:{limit: 9, offset: 0, sortBy:RELEVANCE}){ + Repos { + Name + } + } + }` + + resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Images, ShouldBeEmpty) + So(responseStruct.GlobalSearchResult.GlobalSearch.Repos, ShouldNotBeEmpty) + So(responseStruct.GlobalSearchResult.GlobalSearch.Layers, ShouldBeEmpty) + + So(len(responseStruct.GlobalSearchResult.GlobalSearch.Repos), ShouldEqual, 1) + }) + }) +} + func TestBuildImageInfo(t *testing.T) { Convey("Check image summary when layer count does not match history", t, func() { invalid := "invalid" @@ -2987,8 +3317,8 @@ func TestBuildImageInfo(t *testing.T) { config := ispec.Image{ Platform: ispec.Platform{ - Architecture: "amd64", OS: "linux", + Architecture: "amd64", }, RootFS: ispec.RootFS{ Type: "layers", @@ -3050,24 +3380,801 @@ func TestBuildImageInfo(t *testing.T) { ) So(err, ShouldBeNil) - manifest, err := olu.GetImageBlobManifest(invalid, manifestDigest) - So(err, ShouldBeNil) - imageConfig, err := olu.GetImageConfigInfo(invalid, manifestDigest) So(err, ShouldBeNil) isSigned := false - imageSummary := search.BuildImageInfo(invalid, invalid, manifestDigest, manifest, + imageSummary := convert.BuildImageInfo(invalid, invalid, manifestDigest, ispecManifest, imageConfig, isSigned) - So(len(imageSummary.Layers), ShouldEqual, len(manifest.Layers)) + So(len(imageSummary.Layers), ShouldEqual, len(ispecManifest.Layers)) imageSummaryLayerSize, err := strconv.Atoi(*imageSummary.Size) So(err, ShouldBeNil) So(imageSummaryLayerSize, ShouldEqual, manifestLayersSize) }) } +func TestRepoDBWhenSigningImages(t *testing.T) { + Convey("SigningImages", t, func() { + subpath := "/a" + + dir := t.TempDir() + subDir := t.TempDir() + + subRootDir = path.Join(subDir, subpath) + + port := GetFreePort() + baseURL := GetBaseURL(port) + conf := config.New() + conf.HTTP.Port = port + conf.Storage.RootDirectory = dir + conf.Storage.SubPaths = make(map[string]config.StorageConfig) + conf.Storage.SubPaths[subpath] = config.StorageConfig{RootDirectory: subRootDir} + defaultVal := true + conf.Extensions = &extconf.ExtensionConfig{ + Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}}, + } + + conf.Extensions.Search.CVE = nil + + ctlr := api.NewController(conf) + + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) + + // push test images to repo 1 image 1 + config1, layers1, manifest1, err := GetImageComponents(100) + So(err, ShouldBeNil) + createdTime := time.Date(2010, 1, 1, 12, 0, 0, 0, time.UTC) + config1.History = append(config1.History, ispec.History{Created: &createdTime}) + manifest1, err = updateManifestConfig(manifest1, config1) + So(err, ShouldBeNil) + + layersSize1 := 0 + for _, l := range layers1 { + layersSize1 += len(l) + } + + err = UploadImage( + Image{ + Manifest: manifest1, + Config: config1, + Layers: layers1, + Tag: "1.0.1", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + + err = UploadImage( + Image{ + Manifest: manifest1, + Config: config1, + Layers: layers1, + Tag: "2.0.2", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + + manifestBlob, err := json.Marshal(manifest1) + So(err, ShouldBeNil) + + manifestDigest := godigest.FromBytes(manifestBlob) + + queryImage1 := ` + { + GlobalSearch(query:"repo1:1.0"){ + Images { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + } + } + }` + + queryImage2 := ` + { + GlobalSearch(query:"repo1:2.0"){ + Images { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + } + } + }` + + Convey("Sign with cosign", func() { + err = SignImageUsingCosign("repo1:1.0.1", port) + So(err, ShouldBeNil) + + resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(queryImage1)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].IsSigned, ShouldBeTrue) + + // check image 2 is signed also because it has the same manifest + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(queryImage2)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct = &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].IsSigned, ShouldBeTrue) + + // delete the signature + resp, err = resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" + + fmt.Sprintf("sha256-%s.sig", manifestDigest.Encoded())) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusAccepted) + + // check image 2 is not signed anymore + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(queryImage2)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct = &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].IsSigned, ShouldBeFalse) + }) + + Convey("Cover errors when signing with cosign", func() { + Convey("imageIsSignature fails", func() { + // make image store ignore the wrong format of the input + ctlr.StoreController.DefaultStore = mocks.MockedImageStore{ + PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (godigest.Digest, error) { + return "", nil + }, + DeleteImageManifestFn: func(repo, reference string, dc bool) error { + return ErrTestError + }, + } + + // push bad manifest blob + resp, err := resty.R(). + SetHeader("Content-type", "application/vnd.oci.image.manifest.v1+json"). + SetBody([]byte("unmashable manifest blob")). + Put(baseURL + "/v2/" + "repo" + "/manifests/" + "tag") + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusInternalServerError) + }) + + Convey("image is a signature, AddManifestSignature fails", func() { + ctlr.RepoDB = mocks.RepoDBMock{ + AddManifestSignatureFn: func(repo string, signedManifestDigest godigest.Digest, + sm repodb.SignatureMetadata, + ) error { + return ErrTestError + }, + } + + err := SignImageUsingCosign("repo1:1.0.1", port) + So(err, ShouldNotBeNil) + }) + }) + + Convey("Sign with notation", func() { + err = SignImageUsingNotary("repo1:1.0.1", port) + So(err, ShouldBeNil) + + resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(queryImage1)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].IsSigned, ShouldBeTrue) + }) + }) +} + +func TestRepoDBWhenPushingImages(t *testing.T) { + Convey("Cover errors when pushing", t, func() { + dir := t.TempDir() + + port := GetFreePort() + baseURL := GetBaseURL(port) + conf := config.New() + conf.HTTP.Port = port + conf.Storage.RootDirectory = dir + defaultVal := true + conf.Extensions = &extconf.ExtensionConfig{ + Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}}, + } + + ctlr := api.NewController(conf) + + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) + + Convey("SetManifestMeta fails", func() { + ctlr.RepoDB = mocks.RepoDBMock{ + SetManifestMetaFn: func(repo string, manifestDigest godigest.Digest, mm repodb.ManifestMetadata) error { + return ErrTestError + }, + } + config1, layers1, manifest1, err := GetImageComponents(100) + So(err, ShouldBeNil) + + configBlob, err := json.Marshal(config1) + So(err, ShouldBeNil) + + ctlr.StoreController.DefaultStore = mocks.MockedImageStore{ + NewBlobUploadFn: ctlr.StoreController.DefaultStore.NewBlobUpload, + PutBlobChunkFn: ctlr.StoreController.DefaultStore.PutBlobChunk, + GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) { + return configBlob, nil + }, + DeleteImageManifestFn: func(repo, reference string, dc bool) error { + return ErrTestError + }, + } + + err = UploadImage( + Image{ + Manifest: manifest1, + Config: config1, + Layers: layers1, + Tag: "1.0.1", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + }) + + Convey("SetManifestMeta succeeds but SetRepoTag fails", func() { + ctlr.RepoDB = mocks.RepoDBMock{ + SetRepoTagFn: func(repo, tag string, manifestDigest godigest.Digest, mediaType string) error { + return ErrTestError + }, + } + + config1, layers1, manifest1, err := GetImageComponents(100) + So(err, ShouldBeNil) + + configBlob, err := json.Marshal(config1) + So(err, ShouldBeNil) + + ctlr.StoreController.DefaultStore = mocks.MockedImageStore{ + NewBlobUploadFn: ctlr.StoreController.DefaultStore.NewBlobUpload, + PutBlobChunkFn: ctlr.StoreController.DefaultStore.PutBlobChunk, + GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) { + return configBlob, nil + }, + } + + err = UploadImage( + Image{ + Manifest: manifest1, + Config: config1, + Layers: layers1, + Tag: "1.0.1", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + }) + }) +} + +func TestRepoDBWhenReadingImages(t *testing.T) { + Convey("Push test image", t, func() { + dir := t.TempDir() + + port := GetFreePort() + baseURL := GetBaseURL(port) + conf := config.New() + conf.HTTP.Port = port + conf.Storage.RootDirectory = dir + defaultVal := true + conf.Extensions = &extconf.ExtensionConfig{ + Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}}, + } + + ctlr := api.NewController(conf) + + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) + + config1, layers1, manifest1, err := GetImageComponents(100) + So(err, ShouldBeNil) + + err = UploadImage( + Image{ + Manifest: manifest1, + Config: config1, + Layers: layers1, + Tag: "1.0.1", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + + Convey("Download 3 times", func() { + resp, err := resty.R().Get(baseURL + "/v2/" + "repo1" + "/manifests/" + "1.0.1") + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusOK) + + resp, err = resty.R().Get(baseURL + "/v2/" + "repo1" + "/manifests/" + "1.0.1") + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusOK) + + resp, err = resty.R().Get(baseURL + "/v2/" + "repo1" + "/manifests/" + "1.0.1") + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusOK) + + query := ` + { + GlobalSearch(query:"repo1:1.0"){ + Images { + RepoName Tag DownloadCount + } + } + }` + + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusOK) + + responseStruct := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + So(responseStruct.GlobalSearchResult.GlobalSearch.Images, ShouldNotBeEmpty) + So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].DownloadCount, ShouldEqual, 3) + }) + + Convey("Error when incrementing", func() { + ctlr.RepoDB = mocks.RepoDBMock{ + IncrementImageDownloadsFn: func(repo string, tag string) error { + return ErrTestError + }, + } + + resp, err := resty.R().Get(baseURL + "/v2/" + "repo1" + "/manifests/" + "1.0.1") + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusInternalServerError) + }) + }) +} + +func TestRepoDBWhenDeletingImages(t *testing.T) { + Convey("Setting up zot repo with test images", t, func() { + subpath := "/a" + + dir := t.TempDir() + subDir := t.TempDir() + + subRootDir = path.Join(subDir, subpath) + + port := GetFreePort() + baseURL := GetBaseURL(port) + conf := config.New() + conf.HTTP.Port = port + conf.Storage.RootDirectory = dir + conf.Storage.SubPaths = make(map[string]config.StorageConfig) + conf.Storage.SubPaths[subpath] = config.StorageConfig{RootDirectory: subRootDir} + defaultVal := true + conf.Extensions = &extconf.ExtensionConfig{ + Search: &extconf.SearchConfig{BaseConfig: extconf.BaseConfig{Enable: &defaultVal}}, + } + + conf.Extensions.Search.CVE = nil + + ctlr := api.NewController(conf) + + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) + + // push test images to repo 1 image 1 + config1, layers1, manifest1, err := GetImageComponents(100) + So(err, ShouldBeNil) + + layersSize1 := 0 + for _, l := range layers1 { + layersSize1 += len(l) + } + + err = UploadImage( + Image{ + Manifest: manifest1, + Config: config1, + Layers: layers1, + Tag: "1.0.1", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + + // push test images to repo 1 image 2 + config2, layers2, manifest2, err := GetImageComponents(200) + So(err, ShouldBeNil) + createdTime2 := time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC) + config2.History = append(config2.History, ispec.History{Created: &createdTime2}) + manifest2, err = updateManifestConfig(manifest2, config2) + So(err, ShouldBeNil) + + layersSize2 := 0 + for _, l := range layers2 { + layersSize2 += len(l) + } + + err = UploadImage( + Image{ + Manifest: manifest2, + Config: config2, + Layers: layers2, + Tag: "1.0.2", + }, + baseURL, + "repo1", + ) + So(err, ShouldBeNil) + + query := ` + { + GlobalSearch(query:"repo1:1.0"){ + Images { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + } + Repos { + Name LastUpdated Size + Platforms { Os Arch } + Vendors Score + NewestImage { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { + Os + Arch + } + } + } + Layers { + Digest + Size + } + } + }` + + resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusOK) + + responseStruct := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(len(responseStruct.GlobalSearchResult.GlobalSearch.Images), ShouldEqual, 2) + + Convey("Delete a normal tag", func() { + resp, err := resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" + "1.0.1") + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusAccepted) + + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusOK) + + responseStruct := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(len(responseStruct.GlobalSearchResult.GlobalSearch.Images), ShouldEqual, 1) + So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].Tag, ShouldEqual, "1.0.2") + }) + + Convey("Delete a cosign signature", func() { + repo := "repo1" + err := SignImageUsingCosign("repo1:1.0.1", port) + So(err, ShouldBeNil) + + query := ` + { + GlobalSearch(query:"repo1:1.0.1"){ + Images { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + } + } + }` + + resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].IsSigned, ShouldBeTrue) + + // get signatur digest + log := log.NewLogger("debug", "") + metrics := monitoring.NewMetricsServer(false, log) + storage := local.NewImageStore(dir, false, storage.DefaultGCDelay, + false, false, log, metrics, nil, nil) + + indexBlob, err := storage.GetIndexContent(repo) + So(err, ShouldBeNil) + + var indexContent ispec.Index + + err = json.Unmarshal(indexBlob, &indexContent) + So(err, ShouldBeNil) + + signatureTag := "" + + for _, manifest := range indexContent.Manifests { + tag := manifest.Annotations[ispec.AnnotationRefName] + + cosignTagRule := glob.MustCompile("sha256-*.sig") + + if cosignTagRule.Match(tag) { + signatureTag = tag + } + } + + // delete the signature using the digest + resp, err = resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" + signatureTag) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusAccepted) + + // verify isSigned again and it should be false + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct = &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].IsSigned, ShouldBeFalse) + }) + + Convey("Delete a notary signature", func() { + repo := "repo1" + err := SignImageUsingNotary("repo1:1.0.1", port) + So(err, ShouldBeNil) + + query := ` + { + GlobalSearch(query:"repo1:1.0.1"){ + Images { + RepoName Tag LastUpdated Size IsSigned Vendor Score + Platform { Os Arch } + } + } + }` + + // test if it's signed + resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct := &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].IsSigned, ShouldBeTrue) + + // get signatur digest + log := log.NewLogger("debug", "") + metrics := monitoring.NewMetricsServer(false, log) + storage := local.NewImageStore(dir, false, storage.DefaultGCDelay, + false, false, log, metrics, nil, nil) + + indexBlob, err := storage.GetIndexContent(repo) + So(err, ShouldBeNil) + + var indexContent ispec.Index + + err = json.Unmarshal(indexBlob, &indexContent) + So(err, ShouldBeNil) + + signatureReference := "" + + var sigManifestContent artifactspec.Manifest + + for _, manifest := range indexContent.Manifests { + if manifest.MediaType == artifactspec.MediaTypeArtifactManifest { + signatureReference = manifest.Digest.String() + manifestBlob, _, _, err := storage.GetImageManifest(repo, signatureReference) + So(err, ShouldBeNil) + err = json.Unmarshal(manifestBlob, &sigManifestContent) + So(err, ShouldBeNil) + } + } + + So(sigManifestContent, ShouldNotBeZeroValue) + // check notation signature + manifest1Blob, err := json.Marshal(manifest1) + So(err, ShouldBeNil) + manifest1Digest := godigest.FromBytes(manifest1Blob) + So(sigManifestContent.Subject, ShouldNotBeNil) + So(sigManifestContent.Subject.Digest.String(), ShouldEqual, manifest1Digest.String()) + + // delete the signature using the digest + resp, err = resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" + signatureReference) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusAccepted) + + // verify isSigned again and it should be false + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + + responseStruct = &GlobalSearchResultResp{} + + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + + So(responseStruct.GlobalSearchResult.GlobalSearch.Images[0].IsSigned, ShouldBeFalse) + }) + + Convey("Deleting causes errors", func() { + Convey("error while backing up the manifest", func() { + ctlr.StoreController.DefaultStore = mocks.MockedImageStore{ + GetImageManifestFn: func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte{}, "", "", zerr.ErrRepoNotFound + }, + } + resp, err = resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" + "signatureReference") + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + ctlr.StoreController.DefaultStore = mocks.MockedImageStore{ + GetImageManifestFn: func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte{}, "", "", zerr.ErrBadManifest + }, + } + resp, err = resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" + "signatureReference") + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + + ctlr.StoreController.DefaultStore = mocks.MockedImageStore{ + GetImageManifestFn: func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte{}, "", "", zerr.ErrRepoNotFound + }, + } + resp, err = resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" + "signatureReference") + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + }) + + Convey("imageIsSignature fails", func() { + ctlr.StoreController.DefaultStore = mocks.MockedImageStore{ + PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (godigest.Digest, error) { + return "", nil + }, + DeleteImageManifestFn: func(repo, reference string, dc bool) error { + return nil + }, + } + + resp, err = resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" + "signatureReference") + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusInternalServerError) + }) + + Convey("image is a signature, DeleteSignature fails", func() { + ctlr.StoreController.DefaultStore = mocks.MockedImageStore{ + NewBlobUploadFn: ctlr.StoreController.DefaultStore.NewBlobUpload, + PutBlobChunkFn: ctlr.StoreController.DefaultStore.PutBlobChunk, + GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) { + configBlob, err := json.Marshal(ispec.Image{}) + So(err, ShouldBeNil) + + return configBlob, nil + }, + PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (godigest.Digest, error) { + return "", nil + }, + DeleteImageManifestFn: func(repo, reference string, dc bool) error { + return nil + }, + GetImageManifestFn: func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte("{}"), "1", "1", nil + }, + } + + resp, err = resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" + + "sha256-343ebab94a7674da181c6ea3da013aee4f8cbe357870f8dcaf6268d5343c3474.sig") + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusInternalServerError) + }) + + Convey("image is a signature, PutImageManifest fails", func() { + ctlr.StoreController.DefaultStore = mocks.MockedImageStore{ + NewBlobUploadFn: ctlr.StoreController.DefaultStore.NewBlobUpload, + PutBlobChunkFn: ctlr.StoreController.DefaultStore.PutBlobChunk, + GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) { + configBlob, err := json.Marshal(ispec.Image{}) + So(err, ShouldBeNil) + + return configBlob, nil + }, + PutImageManifestFn: func(repo, reference, mediaType string, body []byte) (godigest.Digest, error) { + return "", ErrTestError + }, + DeleteImageManifestFn: func(repo, reference string, dc bool) error { + return nil + }, + GetImageManifestFn: func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte("{}"), "1", "1", nil + }, + } + + ctlr.RepoDB = mocks.RepoDBMock{ + DeleteRepoTagFn: func(repo, tag string) error { return ErrTestError }, + } + + resp, err = resty.R().Delete(baseURL + "/v2/" + "repo1" + "/manifests/" + + "343ebab94a7674da181c6ea3da013aee4f8cbe357870f8dcaf6268d5343c3474.sig") + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, http.StatusInternalServerError) + }) + }) + }) +} + +func updateManifestConfig(manifest ispec.Manifest, config ispec.Image) (ispec.Manifest, error) { + configBlob, err := json.Marshal(config) + + configDigest := godigest.FromBytes(configBlob) + configSize := len(configBlob) + + manifest.Config.Digest = configDigest + manifest.Config.Size = int64(configSize) + + return manifest, err +} + func TestBaseOciLayoutUtils(t *testing.T) { manifestDigest := GetTestBlobDigest("zot-test", "config").String() @@ -3147,6 +4254,105 @@ func TestBaseOciLayoutUtils(t *testing.T) { _, err := olu.GetRepoLastUpdated("") So(err, ShouldNotBeNil) }) + + Convey("GetImageTagsWithTimestamp: GetImageBlobManifest fails", t, func() { + index := ispec.Index{ + Manifests: []ispec.Descriptor{ + {Annotations: map[string]string{ispec.AnnotationRefName: "w"}}, {}, + }, + } + + indexBlob, err := json.Marshal(index) + So(err, ShouldBeNil) + + mockStoreController := mocks.MockedImageStore{ + GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) { + return nil, ErrTestError + }, + GetIndexContentFn: func(repo string) ([]byte, error) { + return indexBlob, nil + }, + } + + storeController := storage.StoreController{DefaultStore: mockStoreController} + olu := common.NewBaseOciLayoutUtils(storeController, log.NewLogger("debug", "")) + + _, err = olu.GetImageTagsWithTimestamp("rep") + So(err, ShouldNotBeNil) + }) + + Convey("GetExpandedRepoInfo: fails", t, func() { + index := ispec.Index{ + Manifests: []ispec.Descriptor{ + {}, + { + Annotations: map[string]string{ + ispec.AnnotationRefName: "w", + ispec.AnnotationVendor: "vend", + }, + }, + }, + } + + indexBlob, err := json.Marshal(index) + So(err, ShouldBeNil) + + manifest := ispec.Manifest{ + Annotations: map[string]string{ + ispec.AnnotationRefName: "w", + ispec.AnnotationVendor: "vend", + }, + Layers: []ispec.Descriptor{ + {}, + {}, + }, + } + + manifestBlob, err := json.Marshal(manifest) + So(err, ShouldBeNil) + + mockStoreController := mocks.MockedImageStore{ + GetIndexContentFn: func(repo string) ([]byte, error) { + return nil, ErrTestError + }, + } + + storeController := storage.StoreController{DefaultStore: mockStoreController} + olu := common.NewBaseOciLayoutUtils(storeController, log.NewLogger("debug", "")) + + _, err = olu.GetExpandedRepoInfo("rep") + So(err, ShouldNotBeNil) + + // GetRepoLastUpdated fails + mockStoreController = mocks.MockedImageStore{ + GetIndexContentFn: func(repo string) ([]byte, error) { + return indexBlob, nil + }, + } + + storeController = storage.StoreController{DefaultStore: mockStoreController} + olu = common.NewBaseOciLayoutUtils(storeController, log.NewLogger("debug", "")) + + _, err = olu.GetExpandedRepoInfo("rep") + So(err, ShouldNotBeNil) + + // anotations + + mockStoreController = mocks.MockedImageStore{ + GetIndexContentFn: func(repo string) ([]byte, error) { + return indexBlob, nil + }, + GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) { + return manifestBlob, nil + }, + } + + storeController = storage.StoreController{DefaultStore: mockStoreController} + olu = common.NewBaseOciLayoutUtils(storeController, log.NewLogger("debug", "")) + + _, err = olu.GetExpandedRepoInfo("rep") + So(err, ShouldBeNil) + }) } func TestSearchSize(t *testing.T) { @@ -3199,19 +4405,19 @@ func TestSearchSize(t *testing.T) { So(err, ShouldBeNil) query := ` - { - GlobalSearch(query:"test"){ - Images { RepoName Tag LastUpdated Size Score } - Repos { - Name LastUpdated Size Vendors Score - Platforms { - Os - Arch - } + { + GlobalSearch(query:"testrepo:"){ + Images { RepoName Tag LastUpdated Size Score } + Repos { + Name LastUpdated Size Vendors Score + Platforms { + Os + Arch } - Layers { Digest Size } } - }` + Layers { Digest Size } + } + }` resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) So(err, ShouldBeNil) So(configSize+layersSize+manifestSize, ShouldNotBeZeroValue) @@ -3225,12 +4431,34 @@ func TestSearchSize(t *testing.T) { size, err := strconv.Atoi(image.Size) So(err, ShouldBeNil) - So(size, ShouldAlmostEqual, configSize+layersSize+manifestSize) + So(size, ShouldEqual, configSize+layersSize+manifestSize) + + query = ` + { + GlobalSearch(query:"testrepo"){ + Images { RepoName Tag LastUpdated Size Score } + Repos { + Name LastUpdated Size Vendors Score + Platforms { + Os + Arch + } + } + Layers { Digest Size } + } + }` + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(err, ShouldBeNil) + So(configSize+layersSize+manifestSize, ShouldNotBeZeroValue) + + responseStruct = &GlobalSearchResultResp{} + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) repo := responseStruct.GlobalSearchResult.GlobalSearch.Repos[0] size, err = strconv.Atoi(repo.Size) So(err, ShouldBeNil) - So(size, ShouldAlmostEqual, configSize+layersSize+manifestSize) + So(size, ShouldEqual, configSize+layersSize+manifestSize) // add the same image with different tag err = UploadImage( @@ -3245,6 +4473,22 @@ func TestSearchSize(t *testing.T) { ) So(err, ShouldBeNil) + // query for images + query = ` + { + GlobalSearch(query:"testrepo:"){ + Images { RepoName Tag LastUpdated Size Score } + Repos { + Name LastUpdated Size Vendors Score + Platforms { + Os + Arch + } + } + Layers { Digest Size } + } + }` + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) So(err, ShouldBeNil) So(configSize+layersSize+manifestSize, ShouldNotBeZeroValue) @@ -3255,10 +4499,34 @@ func TestSearchSize(t *testing.T) { So(len(responseStruct.GlobalSearchResult.GlobalSearch.Images), ShouldEqual, 2) // check that the repo size is the same + // query for repos + query = ` + { + GlobalSearch(query:"testrepo"){ + Images { RepoName Tag LastUpdated Size Score } + Repos { + Name LastUpdated Size Vendors Score + Platforms { + Os + Arch + } + } + Layers { Digest Size } + } + }` + + resp, err = resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query)) + So(err, ShouldBeNil) + So(configSize+layersSize+manifestSize, ShouldNotBeZeroValue) + + responseStruct = &GlobalSearchResultResp{} + err = json.Unmarshal(resp.Body(), responseStruct) + So(err, ShouldBeNil) + repo = responseStruct.GlobalSearchResult.GlobalSearch.Repos[0] size, err = strconv.Atoi(repo.Size) So(err, ShouldBeNil) - So(size, ShouldAlmostEqual, configSize+layersSize+manifestSize) + So(size, ShouldEqual, configSize+layersSize+manifestSize) }) } @@ -3282,14 +4550,20 @@ func TestImageSummary(t *testing.T) { gqlQuery := ` { Image(image:"%s:%s"){ - RepoName, - Tag, - Digest, - ConfigDigest, - LastUpdated, - IsSigned, + RepoName + Tag + Digest + ConfigDigest + LastUpdated + IsSigned Size + Platform { Os Arch } Layers { Digest Size } + Vulnerabilities { Count MaxSeverity } + History { + HistoryDescription { Created } + Layer { Digest Size } + } } }` @@ -3310,6 +4584,10 @@ func TestImageSummary(t *testing.T) { gqlEndpoint := fmt.Sprintf("%s%s?query=", baseURL, graphqlQueryPrefix) config, layers, manifest, err := GetImageComponents(100) So(err, ShouldBeNil) + createdTime := time.Date(2010, 1, 1, 12, 0, 0, 0, time.UTC) + config.History = append(config.History, ispec.History{Created: &createdTime}) + manifest, err = updateManifestConfig(manifest, config) + So(err, ShouldBeNil) configBlob, errConfig := json.Marshal(config) configDigest := godigest.FromBytes(configBlob) @@ -3380,14 +4658,23 @@ func TestImageSummary(t *testing.T) { So(imgSummaryResponse, ShouldNotBeNil) So(imgSummaryResponse.SingleImageSummary, ShouldNotBeNil) So(imgSummaryResponse.SingleImageSummary.ImageSummary, ShouldNotBeNil) - imgSummary := imgSummaryResponse.SingleImageSummary.ImageSummary So(imgSummary.RepoName, ShouldContainSubstring, repoName) + So(imgSummary.Tag, ShouldContainSubstring, tagTarget) So(imgSummary.ConfigDigest, ShouldContainSubstring, configDigest.Encoded()) So(imgSummary.Digest, ShouldContainSubstring, manifestDigest.Encoded()) So(len(imgSummary.Layers), ShouldEqual, 1) So(imgSummary.Layers[0].Digest, ShouldContainSubstring, godigest.FromBytes(layers[0]).Encoded()) + So(imgSummary.LastUpdated, ShouldEqual, createdTime) + So(imgSummary.IsSigned, ShouldEqual, false) + So(imgSummary.Platform.Os, ShouldEqual, "linux") + So(imgSummary.Platform.Arch, ShouldEqual, "amd64") + So(len(imgSummary.History), ShouldEqual, 1) + So(imgSummary.History[0].HistoryDescription.Created, ShouldEqual, createdTime) + // No vulnerabilities should be detected since trivy is disabled + So(imgSummary.Vulnerabilities.Count, ShouldEqual, 0) + So(imgSummary.Vulnerabilities.MaxSeverity, ShouldEqual, "") t.Log("starting Test retrieve duplicated image same layers based on image identifier") // gqlEndpoint @@ -3407,7 +4694,7 @@ func TestImageSummary(t *testing.T) { So(len(imgSummaryResponse.Errors), ShouldEqual, 1) So(imgSummaryResponse.Errors[0].Message, - ShouldContainSubstring, "repository: not found") + ShouldContainSubstring, "repodb: repo metadata not found for given repo name") t.Log("starting Test retrieve image with bad tag") // gql is parametrized with the repo. @@ -3427,7 +4714,125 @@ func TestImageSummary(t *testing.T) { So(len(imgSummaryResponse.Errors), ShouldEqual, 1) So(imgSummaryResponse.Errors[0].Message, - ShouldContainSubstring, "manifest: not found") + ShouldContainSubstring, "can't find image: test-repo:nonexisttag") + }) + + Convey("GraphQL query ImageSummary with Vulnerability scan enabled", t, func() { + port := GetFreePort() + baseURL := GetBaseURL(port) + conf := config.New() + conf.HTTP.Port = port + conf.Storage.RootDirectory = t.TempDir() + + defaultVal := true + updateDuration, _ := time.ParseDuration("1h") + cveConfig := &extconf.CVEConfig{ + UpdateInterval: updateDuration, + } + searchConfig := &extconf.SearchConfig{ + BaseConfig: extconf.BaseConfig{Enable: &defaultVal}, + CVE: cveConfig, + } + conf.Extensions = &extconf.ExtensionConfig{ + Search: searchConfig, + } + + ctlr := api.NewController(conf) + + gqlQuery := ` + { + Image(image:"%s:%s"){ + RepoName + Tag + Digest + ConfigDigest + LastUpdated + IsSigned + Size + Platform { Os Arch } + Layers { Digest Size } + Vulnerabilities { Count MaxSeverity } + History { + HistoryDescription { Created } + Layer { Digest Size } + } + } + }` + + gqlEndpoint := fmt.Sprintf("%s%s?query=", baseURL, graphqlQueryPrefix) + config, layers, manifest, err := GetImageComponents(100) + So(err, ShouldBeNil) + createdTime := time.Date(2010, 1, 1, 12, 0, 0, 0, time.UTC) + config.History = append(config.History, ispec.History{Created: &createdTime}) + manifest, err = updateManifestConfig(manifest, config) + So(err, ShouldBeNil) + + configBlob, errConfig := json.Marshal(config) + configDigest := godigest.FromBytes(configBlob) + So(errConfig, ShouldBeNil) // marshall success, config is valid JSON + go startServer(ctlr) + defer stopServer(ctlr) + WaitTillServerReady(baseURL) + + manifestBlob, errMarsal := json.Marshal(manifest) + So(errMarsal, ShouldBeNil) + So(manifestBlob, ShouldNotBeNil) + manifestDigest := godigest.FromBytes(manifestBlob) + repoName := "test-repo" //nolint:goconst + + tagTarget := "latest" + err = UploadImage( + Image{ + Manifest: manifest, + Config: config, + Layers: layers, + Tag: tagTarget, + }, + baseURL, + repoName, + ) + So(err, ShouldBeNil) + var ( + imgSummaryResponse ImageSummaryResult + strQuery string + targetURL string + resp *resty.Response + ) + + t.Log("starting Test retrieve image based on image identifier") + // gql is parametrized with the repo. + strQuery = fmt.Sprintf(gqlQuery, repoName, tagTarget) + targetURL = fmt.Sprintf("%s%s", gqlEndpoint, url.QueryEscape(strQuery)) + + resp, err = resty.R().Get(targetURL) + So(resp, ShouldNotBeNil) + So(err, ShouldBeNil) + So(resp.StatusCode(), ShouldEqual, 200) + So(resp.Body(), ShouldNotBeNil) + + err = json.Unmarshal(resp.Body(), &imgSummaryResponse) + So(err, ShouldBeNil) + So(imgSummaryResponse, ShouldNotBeNil) + So(imgSummaryResponse.SingleImageSummary, ShouldNotBeNil) + So(imgSummaryResponse.SingleImageSummary.ImageSummary, ShouldNotBeNil) + + imgSummary := imgSummaryResponse.SingleImageSummary.ImageSummary + So(imgSummary.RepoName, ShouldContainSubstring, repoName) + So(imgSummary.Tag, ShouldContainSubstring, tagTarget) + So(imgSummary.ConfigDigest, ShouldContainSubstring, configDigest.Encoded()) + So(imgSummary.Digest, ShouldContainSubstring, manifestDigest.Encoded()) + So(len(imgSummary.Layers), ShouldEqual, 1) + So(imgSummary.Layers[0].Digest, ShouldContainSubstring, + godigest.FromBytes(layers[0]).Encoded()) + So(imgSummary.LastUpdated, ShouldEqual, createdTime) + So(imgSummary.IsSigned, ShouldEqual, false) + So(imgSummary.Platform.Os, ShouldEqual, "linux") + So(imgSummary.Platform.Arch, ShouldEqual, "amd64") + So(len(imgSummary.History), ShouldEqual, 1) + So(imgSummary.History[0].HistoryDescription.Created, ShouldEqual, createdTime) + So(imgSummary.Vulnerabilities.Count, ShouldEqual, 0) + // There are 0 vulnerabilities this data used in tests + So(imgSummary.Vulnerabilities.MaxSeverity, ShouldEqual, "NONE") }) } diff --git a/pkg/extensions/search/common/oci_layout.go b/pkg/extensions/search/common/oci_layout.go index 2ebc2bfa..7392110a 100644 --- a/pkg/extensions/search/common/oci_layout.go +++ b/pkg/extensions/search/common/oci_layout.go @@ -7,6 +7,7 @@ import ( "fmt" "path" "strconv" + "strings" "time" notreg "github.com/notaryproject/notation-go/registry" @@ -22,7 +23,7 @@ type OciLayoutUtils interface { //nolint: interfacebloat GetImageManifest(repo string, reference string) (ispec.Manifest, godigest.Digest, error) GetImageManifests(repo string) ([]ispec.Descriptor, error) GetImageBlobManifest(repo string, digest godigest.Digest) (ispec.Manifest, error) - GetImageInfo(repo string, digest godigest.Digest) (ispec.Image, error) + GetImageInfo(repo string, configDigest godigest.Digest) (ispec.Image, error) GetImageTagsWithTimestamp(repo string) ([]TagInfo, error) GetImagePlatform(imageInfo ispec.Image) (string, string) GetImageManifestSize(repo string, manifestDigest godigest.Digest) int64 @@ -147,7 +148,7 @@ func (olu BaseOciLayoutUtils) GetImageBlobManifest(repo string, digest godigest. return blobIndex, nil } -func (olu BaseOciLayoutUtils) GetImageInfo(repo string, digest godigest.Digest) (ispec.Image, error) { +func (olu BaseOciLayoutUtils) GetImageInfo(repo string, configDigest godigest.Digest) (ispec.Image, error) { var imageInfo ispec.Image var lockLatency time.Time @@ -157,7 +158,7 @@ func (olu BaseOciLayoutUtils) GetImageInfo(repo string, digest godigest.Digest) imageStore.RLock(&lockLatency) defer imageStore.RUnlock(&lockLatency) - blobBuf, err := imageStore.GetBlobContent(repo, digest) + blobBuf, err := imageStore.GetBlobContent(repo, configDigest) if err != nil { olu.Log.Error().Err(err).Msg("unable to open image layers file") @@ -230,6 +231,10 @@ func (olu BaseOciLayoutUtils) checkNotarySignature(name string, digest godigest. // check cosign signature corresponding to manifest. func (olu BaseOciLayoutUtils) checkCosignSignature(name string, digest godigest.Digest) bool { + if digest.Validate() != nil { + return false + } + imageStore := olu.StoreController.GetImageStore(name) // if manifest is signed using cosign mechanism, cosign adds a new manifest. @@ -342,8 +347,8 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(name string) (RepoInfo, error) return RepoInfo{}, err } - repoPlatforms := make([]OsArch, 0) - repoVendors := make([]string, 0, len(manifestList)) + repoVendorsSet := make(map[string]bool, len(manifestList)) + repoPlatformsSet := make(map[string]OsArch, len(manifestList)) var lastUpdatedImageSummary ImageSummary @@ -381,13 +386,16 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(name string) (RepoInfo, error) continue } - os, arch := olu.GetImagePlatform(imageConfigInfo) + opSys, arch := olu.GetImagePlatform(imageConfigInfo) osArch := OsArch{ - Os: os, + Os: opSys, Arch: arch, } - repoPlatforms = append(repoPlatforms, osArch) + if opSys != "" || arch != "" { + osArchString := strings.TrimSpace(fmt.Sprintf("%s %s", opSys, arch)) + repoPlatformsSet[osArchString] = osArch + } layers := make([]LayerSummary, 0) @@ -410,7 +418,53 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(name string) (RepoInfo, error) // get image info from manifest annotation, if not found get from image config labels. annotations := GetAnnotations(manifest.Annotations, imageConfigInfo.Config.Labels) - repoVendors = append(repoVendors, annotations.Vendor) + if annotations.Vendor != "" { + repoVendorsSet[annotations.Vendor] = true + } + + imageConfigHistory := imageConfigInfo.History + allHistory := []LayerHistory{} + + if len(imageConfigHistory) == 0 { + for _, layer := range layers { + allHistory = append(allHistory, LayerHistory{ + Layer: layer, + HistoryDescription: HistoryDescription{}, + }) + } + } else { + // iterator over manifest layers + var layersIterator int + // since we are appending pointers, it is important to iterate with an index over slice + for i := range imageConfigHistory { + allHistory = append(allHistory, LayerHistory{ + HistoryDescription: HistoryDescription{ + Created: *imageConfigHistory[i].Created, + CreatedBy: imageConfigHistory[i].CreatedBy, + Author: imageConfigHistory[i].Author, + Comment: imageConfigHistory[i].Comment, + EmptyLayer: imageConfigHistory[i].EmptyLayer, + }, + }) + + if imageConfigHistory[i].EmptyLayer { + continue + } + + if layersIterator+1 > len(layers) { + olu.Log.Error().Err(errors.ErrBadLayerCount). + Msgf("error on creating layer history for imaeg %s %s", name, man.Digest) + + break + } + + allHistory[i].Layer = layers[layersIterator] + + layersIterator++ + } + } + + olu.Log.Debug().Msgf("all history %v", allHistory) size := strconv.Itoa(int(imageSize)) manifestDigest := man.Digest.String() @@ -436,6 +490,7 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(name string) (RepoInfo, error) Labels: annotations.Labels, Source: annotations.Source, Layers: layers, + History: allHistory, } imageSummaries = append(imageSummaries, imageSummary) @@ -453,6 +508,19 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(name string) (RepoInfo, error) size := strconv.FormatInt(repoSize, 10) + repoPlatforms := make([]OsArch, 0, len(repoPlatformsSet)) + + for _, osArch := range repoPlatformsSet { + repoPlatforms = append(repoPlatforms, osArch) + } + + repoVendors := make([]string, 0, len(repoVendorsSet)) + + for vendor := range repoVendorsSet { + vendor := vendor + repoVendors = append(repoVendors, vendor) + } + summary := RepoSummary{ Name: name, LastUpdated: lastUpdatedTag.Timestamp, diff --git a/pkg/extensions/search/convert/convert_test.go b/pkg/extensions/search/convert/convert_test.go new file mode 100644 index 00000000..3175e1d0 --- /dev/null +++ b/pkg/extensions/search/convert/convert_test.go @@ -0,0 +1,75 @@ +package convert_test + +import ( + "context" + "encoding/json" + "errors" + "testing" + + "github.com/99designs/gqlgen/graphql" + godigest "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + . "github.com/smartystreets/goconvey/convey" + + "zotregistry.io/zot/pkg/extensions/search/convert" + cveinfo "zotregistry.io/zot/pkg/extensions/search/cve" + "zotregistry.io/zot/pkg/meta/repodb" + bolt "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" + "zotregistry.io/zot/pkg/test/mocks" +) + +var ErrTestError = errors.New("TestError") + +func TestConvertErrors(t *testing.T) { + Convey("", t, func() { + repoDB, err := bolt.NewBoltDBWrapper(bolt.DBParameters{ + RootDir: t.TempDir(), + }) + So(err, ShouldBeNil) + + configBlob, err := json.Marshal(ispec.Image{}) + So(err, ShouldBeNil) + + manifestBlob, err := json.Marshal(ispec.Manifest{ + Layers: []ispec.Descriptor{ + { + MediaType: ispec.MediaTypeImageLayerGzip, + Size: 0, + Digest: godigest.NewDigestFromEncoded(godigest.SHA256, "digest"), + }, + }, + }) + So(err, ShouldBeNil) + + repoMeta11 := repodb.ManifestMetadata{ + ManifestBlob: manifestBlob, + ConfigBlob: configBlob, + } + + digest11 := godigest.FromString("abc1") + err = repoDB.SetManifestMeta("repo1", digest11, repoMeta11) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("repo1", "0.1.0", digest11, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + repoMetas, manifestMetaMap, err := repoDB.SearchRepos(context.Background(), "", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + + ctx := graphql.WithResponseContext(context.Background(), + graphql.DefaultErrorPresenter, graphql.DefaultRecover) + + _ = convert.RepoMeta2RepoSummary( + ctx, + repoMetas[0], + manifestMetaMap, + convert.SkipQGLField{}, + mocks.CveInfoMock{ + GetCVESummaryForImageFn: func(image string) (cveinfo.ImageCVESummary, error) { + return cveinfo.ImageCVESummary{}, ErrTestError + }, + }, + ) + + So(graphql.GetErrors(ctx).Error(), ShouldContainSubstring, "unable to run vulnerability scan on tag") + }) +} diff --git a/pkg/extensions/search/convert/oci.go b/pkg/extensions/search/convert/oci.go new file mode 100644 index 00000000..c819a32e --- /dev/null +++ b/pkg/extensions/search/convert/oci.go @@ -0,0 +1,280 @@ +package convert + +import ( + "strconv" + + godigest "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + + zerr "zotregistry.io/zot/errors" + "zotregistry.io/zot/pkg/extensions/search/common" + "zotregistry.io/zot/pkg/extensions/search/gql_generated" + "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" +) + +func BuildImageInfo(repo string, tag string, manifestDigest godigest.Digest, + manifest ispec.Manifest, imageConfig ispec.Image, isSigned bool, +) *gql_generated.ImageSummary { + layers := []*gql_generated.LayerSummary{} + size := int64(0) + log := log.NewLogger("debug", "") + allHistory := []*gql_generated.LayerHistory{} + formattedManifestDigest := manifestDigest.String() + configDigest := manifest.Config.Digest.String() + annotations := common.GetAnnotations(manifest.Annotations, imageConfig.Config.Labels) + lastUpdated := common.GetImageLastUpdated(imageConfig) + + authors := annotations.Authors + if authors == "" { + authors = imageConfig.Author + } + + history := imageConfig.History + if len(history) == 0 { + for _, layer := range manifest.Layers { + size += layer.Size + digest := layer.Digest.String() + layerSize := strconv.FormatInt(layer.Size, 10) + + layer := &gql_generated.LayerSummary{ + Size: &layerSize, + Digest: &digest, + } + + layers = append( + layers, + layer, + ) + + allHistory = append(allHistory, &gql_generated.LayerHistory{ + Layer: layer, + HistoryDescription: &gql_generated.HistoryDescription{}, + }) + } + + formattedSize := strconv.FormatInt(size, 10) + + imageInfo := &gql_generated.ImageSummary{ + RepoName: &repo, + Tag: &tag, + Digest: &formattedManifestDigest, + ConfigDigest: &configDigest, + Size: &formattedSize, + Layers: layers, + History: allHistory, + Vendor: &annotations.Vendor, + Description: &annotations.Description, + Title: &annotations.Title, + Documentation: &annotations.Documentation, + Licenses: &annotations.Licenses, + Labels: &annotations.Labels, + Source: &annotations.Source, + Authors: &authors, + LastUpdated: &lastUpdated, + IsSigned: &isSigned, + Platform: &gql_generated.OsArch{ + Os: &imageConfig.OS, + Arch: &imageConfig.Architecture, + }, + } + + return imageInfo + } + + // iterator over manifest layers + var layersIterator int + // since we are appending pointers, it is important to iterate with an index over slice + for i := range history { + allHistory = append(allHistory, &gql_generated.LayerHistory{ + HistoryDescription: &gql_generated.HistoryDescription{ + Created: history[i].Created, + CreatedBy: &history[i].CreatedBy, + Author: &history[i].Author, + Comment: &history[i].Comment, + EmptyLayer: &history[i].EmptyLayer, + }, + }) + + if history[i].EmptyLayer { + continue + } + + if layersIterator+1 > len(manifest.Layers) { + formattedSize := strconv.FormatInt(size, 10) + + log.Error().Err(zerr.ErrBadLayerCount).Msg("error on creating layer history for ImageSummary") + + return &gql_generated.ImageSummary{ + RepoName: &repo, + Tag: &tag, + Digest: &formattedManifestDigest, + ConfigDigest: &configDigest, + Size: &formattedSize, + Layers: layers, + History: allHistory, + Vendor: &annotations.Vendor, + Description: &annotations.Description, + Title: &annotations.Title, + Documentation: &annotations.Documentation, + Licenses: &annotations.Licenses, + Labels: &annotations.Labels, + Source: &annotations.Source, + Authors: &authors, + LastUpdated: &lastUpdated, + IsSigned: &isSigned, + Platform: &gql_generated.OsArch{ + Os: &imageConfig.OS, + Arch: &imageConfig.Architecture, + }, + } + } + + size += manifest.Layers[layersIterator].Size + digest := manifest.Layers[layersIterator].Digest.String() + layerSize := strconv.FormatInt(manifest.Layers[layersIterator].Size, 10) + + layer := &gql_generated.LayerSummary{ + Size: &layerSize, + Digest: &digest, + } + + layers = append( + layers, + layer, + ) + + allHistory[i].Layer = layer + + layersIterator++ + } + + formattedSize := strconv.FormatInt(size, 10) + + imageInfo := &gql_generated.ImageSummary{ + RepoName: &repo, + Tag: &tag, + Digest: &formattedManifestDigest, + ConfigDigest: &configDigest, + Size: &formattedSize, + Layers: layers, + History: allHistory, + Vendor: &annotations.Vendor, + Description: &annotations.Description, + Title: &annotations.Title, + Documentation: &annotations.Documentation, + Licenses: &annotations.Licenses, + Labels: &annotations.Labels, + Source: &annotations.Source, + Authors: &authors, + LastUpdated: &lastUpdated, + IsSigned: &isSigned, + Platform: &gql_generated.OsArch{ + Os: &imageConfig.OS, + Arch: &imageConfig.Architecture, + }, + } + + return imageInfo +} + +// updateRepoBlobsMap adds all the image blobs and their respective size to the repo blobs map +// and returnes the total size of the image. +func updateRepoBlobsMap(manifestDigest string, manifestSize int64, configDigest string, configSize int64, + layers []ispec.Descriptor, repoBlob2Size map[string]int64, +) int64 { + imgSize := int64(0) + + // add config size + imgSize += configSize + repoBlob2Size[configDigest] = configSize + + // add manifest size + imgSize += manifestSize + repoBlob2Size[manifestDigest] = manifestSize + + // add layers size + for _, layer := range layers { + repoBlob2Size[layer.Digest.String()] = layer.Size + imgSize += layer.Size + } + + return imgSize +} + +func getLayersSummaries(manifestContent ispec.Manifest) []*gql_generated.LayerSummary { + layers := make([]*gql_generated.LayerSummary, 0, len(manifestContent.Layers)) + + for _, layer := range manifestContent.Layers { + size := strconv.FormatInt(layer.Size, 10) + digest := layer.Digest.String() + + layers = append(layers, &gql_generated.LayerSummary{ + Size: &size, + Digest: &digest, + }) + } + + return layers +} + +func getAllHistory(manifestContent ispec.Manifest, configContent ispec.Image) ( + []*gql_generated.LayerHistory, error, +) { + allHistory := []*gql_generated.LayerHistory{} + layerSummaries := getLayersSummaries(manifestContent) + + history := configContent.History + if len(history) == 0 { + // We don't have any image history metadata + // let's make due with just the layer metadata + for _, layer := range layerSummaries { + allHistory = append(allHistory, &gql_generated.LayerHistory{ + Layer: layer, + HistoryDescription: &gql_generated.HistoryDescription{}, + }) + } + + return allHistory, nil + } + + // Iterator over manifest layers + var layersIterator int + // Since we are appending pointers, it is important to iterate with an index over slice + for i := range history { + allHistory = append(allHistory, &gql_generated.LayerHistory{ + HistoryDescription: &gql_generated.HistoryDescription{ + Created: history[i].Created, + CreatedBy: &history[i].CreatedBy, + Author: &history[i].Author, + Comment: &history[i].Comment, + EmptyLayer: &history[i].EmptyLayer, + }, + }) + + if history[i].EmptyLayer { + continue + } + + if layersIterator+1 > len(manifestContent.Layers) { + return allHistory, zerr.ErrBadLayerCount + } + + allHistory[i].Layer = layerSummaries[layersIterator] + + layersIterator++ + } + + return allHistory, nil +} + +func imageHasSignatures(signatures repodb.ManifestSignatures) bool { + // (sigType, signatures) + for _, sigs := range signatures { + if len(sigs) > 0 { + return true + } + } + + return false +} diff --git a/pkg/extensions/search/convert/repodb.go b/pkg/extensions/search/convert/repodb.go new file mode 100644 index 00000000..714b063b --- /dev/null +++ b/pkg/extensions/search/convert/repodb.go @@ -0,0 +1,546 @@ +package convert + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/99designs/gqlgen/graphql" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/vektah/gqlparser/v2/gqlerror" + + "zotregistry.io/zot/pkg/extensions/search/common" + cveinfo "zotregistry.io/zot/pkg/extensions/search/cve" + "zotregistry.io/zot/pkg/extensions/search/gql_generated" + "zotregistry.io/zot/pkg/meta/repodb" +) + +type SkipQGLField struct { + Vulnerabilities bool +} + +func RepoMeta2RepoSummary(ctx context.Context, repoMeta repodb.RepoMetadata, + manifestMetaMap map[string]repodb.ManifestMetadata, skip SkipQGLField, cveInfo cveinfo.CveInfo, +) *gql_generated.RepoSummary { + var ( + repoLastUpdatedTimestamp = time.Time{} + repoPlatformsSet = map[string]*gql_generated.OsArch{} + repoVendorsSet = map[string]bool{} + lastUpdatedImageSummary *gql_generated.ImageSummary + repoStarCount = repoMeta.Stars + isBookmarked = false + isStarred = false + repoDownloadCount = 0 + repoName = repoMeta.Name + + // map used to keep track of all blobs of a repo without dublicates as + // some images may have the same layers + repoBlob2Size = make(map[string]int64, 10) + + // made up of all manifests, configs and image layers + size = int64(0) + ) + + for tag, descriptor := range repoMeta.Tags { + var ( + manifestContent ispec.Manifest + manifestDigest = descriptor.Digest + imageSignatures = repoMeta.Signatures[descriptor.Digest] + ) + + err := json.Unmarshal(manifestMetaMap[manifestDigest].ManifestBlob, &manifestContent) + if err != nil { + graphql.AddError(ctx, gqlerror.Errorf("can't unmarshal manifest blob for image: %s:%s, manifest digest: %s, "+ + "error: %s", repoMeta.Name, tag, manifestDigest, err.Error())) + + continue + } + + var configContent ispec.Image + + err = json.Unmarshal(manifestMetaMap[manifestDigest].ConfigBlob, &configContent) + if err != nil { + graphql.AddError(ctx, gqlerror.Errorf("can't unmarshal config blob for image: %s:%s, manifest digest: %s, error: %s", + repoMeta.Name, tag, manifestDigest, err.Error())) + + continue + } + + var ( + tag = tag + isSigned = imageHasSignatures(imageSignatures) + configDigest = manifestContent.Config.Digest.String() + configSize = manifestContent.Config.Size + opSys = configContent.OS + arch = configContent.Architecture + osArch = gql_generated.OsArch{Os: &opSys, Arch: &arch} + imageLastUpdated = common.GetImageLastUpdated(configContent) + downloadCount = repoMeta.Statistics[descriptor.Digest].DownloadCount + + size = updateRepoBlobsMap( + manifestDigest, int64(len(manifestMetaMap[manifestDigest].ManifestBlob)), + configDigest, configSize, + manifestContent.Layers, + repoBlob2Size) + imageSize = strconv.FormatInt(size, 10) + ) + + annotations := common.GetAnnotations(manifestContent.Annotations, configContent.Config.Labels) + + authors := annotations.Authors + if authors == "" { + authors = configContent.Author + } + + historyEntries, err := getAllHistory(manifestContent, configContent) + if err != nil { + graphql.AddError(ctx, gqlerror.Errorf("error generating history on tag %s in repo %s: "+ + "manifest digest: %s, error: %s", tag, repoMeta.Name, manifestDigest, err.Error())) + } + + imageCveSummary := cveinfo.ImageCVESummary{} + + imageSummary := gql_generated.ImageSummary{ + RepoName: &repoName, + Tag: &tag, + Digest: &manifestDigest, + ConfigDigest: &configDigest, + LastUpdated: &imageLastUpdated, + IsSigned: &isSigned, + Size: &imageSize, + Platform: &osArch, + Vendor: &annotations.Vendor, + DownloadCount: &downloadCount, + Layers: getLayersSummaries(manifestContent), + Description: &annotations.Description, + Title: &annotations.Title, + Documentation: &annotations.Documentation, + Licenses: &annotations.Licenses, + Labels: &annotations.Labels, + Source: &annotations.Source, + Authors: &authors, + History: historyEntries, + Vulnerabilities: &gql_generated.ImageVulnerabilitySummary{ + MaxSeverity: &imageCveSummary.MaxSeverity, + Count: &imageCveSummary.Count, + }, + } + + if annotations.Vendor != "" { + repoVendorsSet[annotations.Vendor] = true + } + + if opSys != "" || arch != "" { + osArchString := strings.TrimSpace(fmt.Sprintf("%s %s", opSys, arch)) + repoPlatformsSet[osArchString] = &gql_generated.OsArch{Os: &opSys, Arch: &arch} + } + + if repoLastUpdatedTimestamp.Equal(time.Time{}) { + // initialize with first time value + repoLastUpdatedTimestamp = imageLastUpdated + lastUpdatedImageSummary = &imageSummary + } else if repoLastUpdatedTimestamp.Before(imageLastUpdated) { + repoLastUpdatedTimestamp = imageLastUpdated + lastUpdatedImageSummary = &imageSummary + } + + repoDownloadCount += repoMeta.Statistics[descriptor.Digest].DownloadCount + } + + // calculate repo size = sum all manifest, config and layer blobs sizes + for _, blobSize := range repoBlob2Size { + size += blobSize + } + + repoSize := strconv.FormatInt(size, 10) + score := 0 + + repoPlatforms := make([]*gql_generated.OsArch, 0, len(repoPlatformsSet)) + for _, osArch := range repoPlatformsSet { + repoPlatforms = append(repoPlatforms, osArch) + } + + repoVendors := make([]*string, 0, len(repoVendorsSet)) + + for vendor := range repoVendorsSet { + vendor := vendor + repoVendors = append(repoVendors, &vendor) + } + + // We only scan the latest image on the repo for performance reasons + // Check if vulnerability scanning is disabled + if cveInfo != nil && lastUpdatedImageSummary != nil && !skip.Vulnerabilities { + imageName := fmt.Sprintf("%s:%s", repoMeta.Name, *lastUpdatedImageSummary.Tag) + + imageCveSummary, err := cveInfo.GetCVESummaryForImage(imageName) + if err != nil { + // Log the error, but we should still include the image in results + graphql.AddError( + ctx, + gqlerror.Errorf( + "unable to run vulnerability scan on tag %s in repo %s: error: %s", + *lastUpdatedImageSummary.Tag, repoMeta.Name, err.Error(), + ), + ) + } + + lastUpdatedImageSummary.Vulnerabilities = &gql_generated.ImageVulnerabilitySummary{ + MaxSeverity: &imageCveSummary.MaxSeverity, + Count: &imageCveSummary.Count, + } + } + + return &gql_generated.RepoSummary{ + Name: &repoName, + LastUpdated: &repoLastUpdatedTimestamp, + Size: &repoSize, + Platforms: repoPlatforms, + Vendors: repoVendors, + Score: &score, + NewestImage: lastUpdatedImageSummary, + DownloadCount: &repoDownloadCount, + StarCount: &repoStarCount, + IsBookmarked: &isBookmarked, + IsStarred: &isStarred, + } +} + +func RepoMeta2ImageSummaries(ctx context.Context, repoMeta repodb.RepoMetadata, + manifestMetaMap map[string]repodb.ManifestMetadata, skip SkipQGLField, cveInfo cveinfo.CveInfo, +) []*gql_generated.ImageSummary { + imageSummaries := make([]*gql_generated.ImageSummary, 0, len(repoMeta.Tags)) + + for tag, descriptor := range repoMeta.Tags { + var ( + manifestContent ispec.Manifest + manifestDigest = descriptor.Digest + imageSignatures = repoMeta.Signatures[descriptor.Digest] + ) + + err := json.Unmarshal(manifestMetaMap[manifestDigest].ManifestBlob, &manifestContent) + if err != nil { + graphql.AddError(ctx, gqlerror.Errorf("can't unmarshal manifest blob for image: %s:%s, "+ + "manifest digest: %s, error: %s", repoMeta.Name, tag, manifestDigest, err.Error())) + + continue + } + + var configContent ispec.Image + + err = json.Unmarshal(manifestMetaMap[manifestDigest].ConfigBlob, &configContent) + if err != nil { + graphql.AddError(ctx, gqlerror.Errorf("can't unmarshal config blob for image: %s:%s, "+ + "manifest digest: %s, error: %s", repoMeta.Name, tag, manifestDigest, err.Error())) + + continue + } + + imageCveSummary := cveinfo.ImageCVESummary{} + // Check if vulnerability scanning is disabled + if cveInfo != nil && !skip.Vulnerabilities { + imageName := fmt.Sprintf("%s:%s", repoMeta.Name, tag) + imageCveSummary, err = cveInfo.GetCVESummaryForImage(imageName) + + if err != nil { + // Log the error, but we should still include the manifest in results + graphql.AddError(ctx, gqlerror.Errorf("unable to run vulnerability scan on tag %s in repo %s: "+ + "manifest digest: %s, error: %s", tag, repoMeta.Name, manifestDigest, err.Error())) + } + } + + imgSize := int64(0) + imgSize += manifestContent.Config.Size + imgSize += int64(len(manifestMetaMap[manifestDigest].ManifestBlob)) + + for _, layer := range manifestContent.Layers { + imgSize += layer.Size + } + + var ( + repoName = repoMeta.Name + tag = tag + configDigest = manifestContent.Config.Digest.String() + imageLastUpdated = common.GetImageLastUpdated(configContent) + isSigned = imageHasSignatures(imageSignatures) + imageSize = strconv.FormatInt(imgSize, 10) + os = configContent.OS + arch = configContent.Architecture + osArch = gql_generated.OsArch{Os: &os, Arch: &arch} + downloadCount = repoMeta.Statistics[descriptor.Digest].DownloadCount + ) + + annotations := common.GetAnnotations(manifestContent.Annotations, configContent.Config.Labels) + + authors := annotations.Authors + if authors == "" { + authors = configContent.Author + } + + historyEntries, err := getAllHistory(manifestContent, configContent) + if err != nil { + graphql.AddError(ctx, gqlerror.Errorf("error generating history on tag %s in repo %s: "+ + "manifest digest: %s, error: %s", tag, repoMeta.Name, manifestDigest, err.Error())) + } + + imageSummary := gql_generated.ImageSummary{ + RepoName: &repoName, + Tag: &tag, + Digest: &manifestDigest, + ConfigDigest: &configDigest, + LastUpdated: &imageLastUpdated, + IsSigned: &isSigned, + Size: &imageSize, + Platform: &osArch, + Vendor: &annotations.Vendor, + DownloadCount: &downloadCount, + Layers: getLayersSummaries(manifestContent), + Description: &annotations.Description, + Title: &annotations.Title, + Documentation: &annotations.Documentation, + Licenses: &annotations.Licenses, + Labels: &annotations.Labels, + Source: &annotations.Source, + Authors: &authors, + History: historyEntries, + Vulnerabilities: &gql_generated.ImageVulnerabilitySummary{ + MaxSeverity: &imageCveSummary.MaxSeverity, + Count: &imageCveSummary.Count, + }, + } + + imageSummaries = append(imageSummaries, &imageSummary) + } + + return imageSummaries +} + +func RepoMeta2ExpandedRepoInfo(ctx context.Context, repoMeta repodb.RepoMetadata, + manifestMetaMap map[string]repodb.ManifestMetadata, skip SkipQGLField, cveInfo cveinfo.CveInfo, +) (*gql_generated.RepoSummary, []*gql_generated.ImageSummary) { + var ( + repoLastUpdatedTimestamp = time.Time{} + repoPlatformsSet = map[string]*gql_generated.OsArch{} + repoVendorsSet = map[string]bool{} + lastUpdatedImageSummary *gql_generated.ImageSummary + repoStarCount = repoMeta.Stars + isBookmarked = false + isStarred = false + repoDownloadCount = 0 + repoName = repoMeta.Name + + // map used to keep track of all blobs of a repo without dublicates as + // some images may have the same layers + repoBlob2Size = make(map[string]int64, 10) + + // made up of all manifests, configs and image layers + size = int64(0) + + imageSummaries = make([]*gql_generated.ImageSummary, 0, len(repoMeta.Tags)) + ) + + for tag, descriptor := range repoMeta.Tags { + var ( + manifestContent ispec.Manifest + manifestDigest = descriptor.Digest + imageSignatures = repoMeta.Signatures[descriptor.Digest] + ) + + err := json.Unmarshal(manifestMetaMap[manifestDigest].ManifestBlob, &manifestContent) + if err != nil { + graphql.AddError(ctx, gqlerror.Errorf("can't unmarshal manifest blob for image: %s:%s, manifest digest: %s, "+ + "error: %s", repoMeta.Name, tag, manifestDigest, err.Error())) + + continue + } + + var configContent ispec.Image + + err = json.Unmarshal(manifestMetaMap[manifestDigest].ConfigBlob, &configContent) + if err != nil { + graphql.AddError(ctx, gqlerror.Errorf("can't unmarshal config blob for image: %s:%s, manifest digest: %s, error: %s", + repoMeta.Name, tag, manifestDigest, err.Error())) + + continue + } + + var ( + tag = tag + isSigned = imageHasSignatures(imageSignatures) + configDigest = manifestContent.Config.Digest.String() + configSize = manifestContent.Config.Size + opSys = configContent.OS + arch = configContent.Architecture + osArch = gql_generated.OsArch{Os: &opSys, Arch: &arch} + imageLastUpdated = common.GetImageLastUpdated(configContent) + downloadCount = repoMeta.Statistics[descriptor.Digest].DownloadCount + + size = updateRepoBlobsMap( + manifestDigest, int64(len(manifestMetaMap[manifestDigest].ManifestBlob)), + configDigest, configSize, + manifestContent.Layers, + repoBlob2Size) + imageSize = strconv.FormatInt(size, 10) + ) + + annotations := common.GetAnnotations(manifestContent.Annotations, configContent.Config.Labels) + + authors := annotations.Authors + if authors == "" { + authors = configContent.Author + } + + imageCveSummary := cveinfo.ImageCVESummary{} + + imageSummary := gql_generated.ImageSummary{ + RepoName: &repoName, + Tag: &tag, + Digest: &manifestDigest, + ConfigDigest: &configDigest, + LastUpdated: &imageLastUpdated, + IsSigned: &isSigned, + Size: &imageSize, + Platform: &osArch, + Vendor: &annotations.Vendor, + DownloadCount: &downloadCount, + Layers: getLayersSummaries(manifestContent), + Description: &annotations.Description, + Title: &annotations.Title, + Documentation: &annotations.Documentation, + Licenses: &annotations.Licenses, + Labels: &annotations.Labels, + Source: &annotations.Source, + Authors: &authors, + Vulnerabilities: &gql_generated.ImageVulnerabilitySummary{ + MaxSeverity: &imageCveSummary.MaxSeverity, + Count: &imageCveSummary.Count, + }, + } + + imageSummaries = append(imageSummaries, &imageSummary) + + if annotations.Vendor != "" { + repoVendorsSet[annotations.Vendor] = true + } + + if opSys != "" || arch != "" { + osArchString := strings.TrimSpace(fmt.Sprintf("%s %s", opSys, arch)) + repoPlatformsSet[osArchString] = &gql_generated.OsArch{Os: &opSys, Arch: &arch} + } + + if repoLastUpdatedTimestamp.Equal(time.Time{}) { + // initialize with first time value + repoLastUpdatedTimestamp = imageLastUpdated + lastUpdatedImageSummary = &imageSummary + } else if repoLastUpdatedTimestamp.Before(imageLastUpdated) { + repoLastUpdatedTimestamp = imageLastUpdated + lastUpdatedImageSummary = &imageSummary + } + + repoDownloadCount += repoMeta.Statistics[descriptor.Digest].DownloadCount + } + + // calculate repo size = sum all manifest, config and layer blobs sizes + for _, blobSize := range repoBlob2Size { + size += blobSize + } + + repoSize := strconv.FormatInt(size, 10) + score := 0 + + repoPlatforms := make([]*gql_generated.OsArch, 0, len(repoPlatformsSet)) + for _, osArch := range repoPlatformsSet { + repoPlatforms = append(repoPlatforms, osArch) + } + + repoVendors := make([]*string, 0, len(repoVendorsSet)) + + for vendor := range repoVendorsSet { + vendor := vendor + repoVendors = append(repoVendors, &vendor) + } + + // We only scan the latest image on the repo for performance reasons + // Check if vulnerability scanning is disabled + if cveInfo != nil && lastUpdatedImageSummary != nil && !skip.Vulnerabilities { + imageName := fmt.Sprintf("%s:%s", repoMeta.Name, *lastUpdatedImageSummary.Tag) + + imageCveSummary, err := cveInfo.GetCVESummaryForImage(imageName) + if err != nil { + // Log the error, but we should still include the image in results + graphql.AddError( + ctx, + gqlerror.Errorf( + "unable to run vulnerability scan on tag %s in repo %s: error: %s", + *lastUpdatedImageSummary.Tag, repoMeta.Name, err.Error(), + ), + ) + } + + lastUpdatedImageSummary.Vulnerabilities = &gql_generated.ImageVulnerabilitySummary{ + MaxSeverity: &imageCveSummary.MaxSeverity, + Count: &imageCveSummary.Count, + } + } + + summary := &gql_generated.RepoSummary{ + Name: &repoName, + LastUpdated: &repoLastUpdatedTimestamp, + Size: &repoSize, + Platforms: repoPlatforms, + Vendors: repoVendors, + Score: &score, + NewestImage: lastUpdatedImageSummary, + DownloadCount: &repoDownloadCount, + StarCount: &repoStarCount, + IsBookmarked: &isBookmarked, + IsStarred: &isStarred, + } + + return summary, imageSummaries +} + +func GetPreloads(ctx context.Context) map[string]bool { + if !graphql.HasOperationContext(ctx) { + return map[string]bool{} + } + + nestedPreloads := GetNestedPreloads( + graphql.GetOperationContext(ctx), + graphql.CollectFieldsCtx(ctx, nil), + "", + ) + + preloads := map[string]bool{} + + for _, str := range nestedPreloads { + preloads[str] = true + } + + return preloads +} + +func GetNestedPreloads(ctx *graphql.OperationContext, fields []graphql.CollectedField, prefix string, +) []string { + preloads := []string{} + + for _, column := range fields { + prefixColumn := GetPreloadString(prefix, column.Name) + preloads = append(preloads, prefixColumn) + preloads = append(preloads, + GetNestedPreloads(ctx, graphql.CollectFields(ctx, column.Selections, nil), prefixColumn)..., + ) + } + + return preloads +} + +func GetPreloadString(prefix, name string) string { + if len(prefix) > 0 { + return prefix + "." + name + } + + return name +} diff --git a/pkg/extensions/search/cve/cve.go b/pkg/extensions/search/cve/cve.go index 7e3afc8d..75f789ab 100644 --- a/pkg/extensions/search/cve/cve.go +++ b/pkg/extensions/search/cve/cve.go @@ -1,6 +1,7 @@ package cveinfo import ( + "encoding/json" "fmt" godigest "github.com/opencontainers/go-digest" @@ -10,6 +11,7 @@ import ( cvemodel "zotregistry.io/zot/pkg/extensions/search/cve/model" "zotregistry.io/zot/pkg/extensions/search/cve/trivy" "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" "zotregistry.io/zot/pkg/storage" ) @@ -40,30 +42,59 @@ type ImageCVESummary struct { } type BaseCveInfo struct { - Log log.Logger - Scanner Scanner - LayoutUtils common.OciLayoutUtils + Log log.Logger + Scanner Scanner + RepoDB repodb.RepoDB } -func NewCVEInfo(storeController storage.StoreController, log log.Logger) *BaseCveInfo { - layoutUtils := common.NewBaseOciLayoutUtils(storeController, log) - scanner := trivy.NewScanner(storeController, layoutUtils, log) +func NewCVEInfo(storeController storage.StoreController, repoDB repodb.RepoDB, + log log.Logger, +) *BaseCveInfo { + scanner := trivy.NewScanner(storeController, repoDB, log) - return &BaseCveInfo{Log: log, Scanner: scanner, LayoutUtils: layoutUtils} + return &BaseCveInfo{ + Log: log, + Scanner: scanner, + RepoDB: repoDB, + } } func (cveinfo BaseCveInfo) GetImageListForCVE(repo, cveID string) ([]ImageInfoByCVE, error) { imgList := make([]ImageInfoByCVE, 0) - manifests, err := cveinfo.LayoutUtils.GetImageManifests(repo) + repoMeta, err := cveinfo.RepoDB.GetRepoMeta(repo) if err != nil { - cveinfo.Log.Error().Err(err).Str("repo", repo).Msg("unable to get list of tags from repo") + cveinfo.Log.Error().Err(err).Str("repo", repo).Str("cve-id", cveID). + Msg("unable to get list of tags from repo") return imgList, err } - for _, manifest := range manifests { - tag := manifest.Annotations[ispec.AnnotationRefName] + for tag, descriptor := range repoMeta.Tags { + manifestDigestStr := descriptor.Digest + + manifestDigest, err := godigest.Parse(manifestDigestStr) + if err != nil { + cveinfo.Log.Error().Err(err).Str("repo", repo).Str("tag", tag). + Str("cve-id", cveID).Str("digest", manifestDigestStr).Msg("unable to parse digest") + + return nil, err + } + + manifestMeta, err := cveinfo.RepoDB.GetManifestMeta(repo, manifestDigest) + if err != nil { + return nil, err + } + + var manifestContent ispec.Manifest + + err = json.Unmarshal(manifestMeta.ManifestBlob, &manifestContent) + if err != nil { + cveinfo.Log.Error().Err(err).Str("repo", repo).Str("tag", tag). + Str("cve-id", cveID).Msg("unable to unmashal manifest blob") + + continue + } image := fmt.Sprintf("%s:%s", repo, tag) @@ -79,19 +110,10 @@ func (cveinfo BaseCveInfo) GetImageListForCVE(repo, cveID string) ([]ImageInfoBy for id := range cveMap { if id == cveID { - digest := manifest.Digest - - imageBlobManifest, err := cveinfo.LayoutUtils.GetImageBlobManifest(repo, digest) - if err != nil { - cveinfo.Log.Error().Err(err).Msg("unable to read image blob manifest") - - return []ImageInfoByCVE{}, err - } - imgList = append(imgList, ImageInfoByCVE{ Tag: tag, - Digest: digest, - Manifest: imageBlobManifest, + Digest: manifestDigest, + Manifest: manifestContent, }) break @@ -103,24 +125,59 @@ func (cveinfo BaseCveInfo) GetImageListForCVE(repo, cveID string) ([]ImageInfoBy } func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(repo, cveID string) ([]common.TagInfo, error) { - tagsInfo, err := cveinfo.LayoutUtils.GetImageTagsWithTimestamp(repo) + repoMeta, err := cveinfo.RepoDB.GetRepoMeta(repo) if err != nil { - cveinfo.Log.Error().Err(err).Str("repo", repo).Msg("unable to get list of tags from repo") + cveinfo.Log.Error().Err(err).Str("repo", repo).Str("cve-id", cveID). + Msg("unable to get list of tags from repo") return []common.TagInfo{}, err } vulnerableTags := make([]common.TagInfo, 0) + allTags := make([]common.TagInfo, 0) - var hasCVE bool + for tag, descriptor := range repoMeta.Tags { + manifestDigestStr := descriptor.Digest - for _, tag := range tagsInfo { - image := fmt.Sprintf("%s:%s", repo, tag.Name) - tagInfo := common.TagInfo{Name: tag.Name, Timestamp: tag.Timestamp, Digest: tag.Digest} + manifestDigest, err := godigest.Parse(manifestDigestStr) + if err != nil { + cveinfo.Log.Error().Err(err).Str("repo", repo).Str("tag", tag). + Str("cve-id", cveID).Str("digest", manifestDigestStr).Msg("unable to parse digest") + + continue + } + + manifestMeta, err := cveinfo.RepoDB.GetManifestMeta(repo, manifestDigest) + if err != nil { + cveinfo.Log.Error().Err(err).Str("repo", repo).Str("tag", tag). + Str("cve-id", cveID).Msg("unable to obtain manifest meta") + + continue + } + + var configContent ispec.Image + + err = json.Unmarshal(manifestMeta.ConfigBlob, &configContent) + if err != nil { + cveinfo.Log.Error().Err(err).Str("repo", repo).Str("tag", tag). + Str("cve-id", cveID).Msg("unable to unmashal manifest blob") + + continue + } + + tagInfo := common.TagInfo{ + Name: tag, + Timestamp: common.GetImageLastUpdated(configContent), + Digest: manifestDigest, + } + + allTags = append(allTags, tagInfo) + + image := fmt.Sprintf("%s:%s", repo, tag) isValidImage, _ := cveinfo.Scanner.IsImageFormatScannable(image) if !isValidImage { - cveinfo.Log.Debug().Str("image", image). + cveinfo.Log.Debug().Str("image", image).Str("cve-id", cveID). Msg("image media type not supported for scanning, adding as a vulnerable image") vulnerableTags = append(vulnerableTags, tagInfo) @@ -130,7 +187,7 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(repo, cveID string) ([]commo cveMap, err := cveinfo.Scanner.ScanImage(image) if err != nil { - cveinfo.Log.Debug().Str("image", image). + cveinfo.Log.Debug().Str("image", image).Str("cve-id", cveID). Msg("scanning failed, adding as a vulnerable image") vulnerableTags = append(vulnerableTags, tagInfo) @@ -138,31 +195,24 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(repo, cveID string) ([]commo continue } - hasCVE = false - - for id := range cveMap { - if id == cveID { - hasCVE = true - - break - } - } - - if hasCVE { + if _, hasCVE := cveMap[cveID]; hasCVE { vulnerableTags = append(vulnerableTags, tagInfo) } } - if len(vulnerableTags) != 0 { - cveinfo.Log.Info().Str("repo", repo).Msg("comparing fixed tags timestamp") + var fixedTags []common.TagInfo - tagsInfo = common.GetFixedTags(tagsInfo, vulnerableTags) + if len(vulnerableTags) != 0 { + cveinfo.Log.Info().Str("repo", repo).Str("cve-id", cveID).Msgf("Vulnerable tags: %v", vulnerableTags) + fixedTags = common.GetFixedTags(allTags, vulnerableTags) + cveinfo.Log.Info().Str("repo", repo).Str("cve-id", cveID).Msgf("Fixed tags: %v", fixedTags) } else { cveinfo.Log.Info().Str("repo", repo).Str("cve-id", cveID). Msg("image does not contain any tag that have given cve") + fixedTags = allTags } - return tagsInfo, nil + return fixedTags, nil } func (cveinfo BaseCveInfo) GetCVEListForImage(image string) (map[string]cvemodel.CVE, error) { diff --git a/pkg/extensions/search/cve/cve_test.go b/pkg/extensions/search/cve/cve_test.go index 96e5eaff..829c1374 100644 --- a/pkg/extensions/search/cve/cve_test.go +++ b/pkg/extensions/search/cve/cve_test.go @@ -22,7 +22,7 @@ import ( . "github.com/smartystreets/goconvey/convey" "gopkg.in/resty.v1" - "zotregistry.io/zot/errors" + zerr "zotregistry.io/zot/errors" "zotregistry.io/zot/pkg/api" "zotregistry.io/zot/pkg/api/config" "zotregistry.io/zot/pkg/api/constants" @@ -31,21 +31,15 @@ import ( "zotregistry.io/zot/pkg/extensions/search/common" cveinfo "zotregistry.io/zot/pkg/extensions/search/cve" cvemodel "zotregistry.io/zot/pkg/extensions/search/cve/model" - "zotregistry.io/zot/pkg/extensions/search/cve/trivy" "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" + bolt "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" "zotregistry.io/zot/pkg/storage" "zotregistry.io/zot/pkg/storage/local" . "zotregistry.io/zot/pkg/test" "zotregistry.io/zot/pkg/test/mocks" ) -//nolint:gochecknoglobals -var ( - cve cveinfo.CveInfo - dbDir string - updateDuration time.Duration -) - const ( username = "test" passphrase = "test" @@ -82,42 +76,24 @@ type CVEResultForImage struct { CVEList []cvemodel.CVE `json:"CVEList"` } -func testSetup() error { - dir, err := os.MkdirTemp("", "util_test") +func testSetup(t *testing.T) (string, error) { + t.Helper() + dir := t.TempDir() + + err := generateTestData(dir) if err != nil { - return err + return "", err } - log := log.NewLogger("debug", "") - metrics := monitoring.NewMetricsServer(false, log) - - conf := config.New() - conf.Extensions = &extconf.ExtensionConfig{} - conf.Extensions.Lint = &extconf.LintConfig{} - - storeController := storage.StoreController{DefaultStore: local.NewImageStore(dir, false, storage.DefaultGCDelay, false, false, log, metrics, nil, nil)} - - layoutUtils := common.NewBaseOciLayoutUtils(storeController, log) - scanner := trivy.NewScanner(storeController, layoutUtils, log) - - cve = &cveinfo.BaseCveInfo{Log: log, Scanner: scanner, LayoutUtils: layoutUtils} - - dbDir = dir - - err = generateTestData() + err = CopyFiles("../../../../test/data", dir) if err != nil { - return err + return "", err } - err = CopyFiles("../../../../test/data", dbDir) - if err != nil { - return err - } - - return nil + return dir, nil } -func generateTestData() error { //nolint: gocyclo +func generateTestData(dbDir string) error { //nolint: gocyclo // Image dir with no files err := os.Mkdir(path.Join(dbDir, "zot-noindex-test"), 0o755) if err != nil { @@ -331,29 +307,38 @@ func makeTestFile(fileName, content string) error { func TestImageFormat(t *testing.T) { Convey("Test valid image", t, func() { log := log.NewLogger("debug", "") - dbDir := "../../../../test/data" + imgDir := "../../../../test/data" + dbDir := t.TempDir() conf := config.New() conf.Extensions = &extconf.ExtensionConfig{} conf.Extensions.Lint = &extconf.LintConfig{} metrics := monitoring.NewMetricsServer(false, log) - defaultStore := local.NewImageStore(dbDir, false, storage.DefaultGCDelay, + defaultStore := local.NewImageStore(imgDir, false, storage.DefaultGCDelay, false, false, log, metrics, nil, nil) storeController := storage.StoreController{DefaultStore: defaultStore} - cveInfo := cveinfo.NewCVEInfo(storeController, log) + repoDB, err := bolt.NewBoltDBWrapper(bolt.DBParameters{ + RootDir: dbDir, + }) + So(err, ShouldBeNil) + + err = repodb.SyncRepoDB(repoDB, storeController, log) + So(err, ShouldBeNil) + + cveInfo := cveinfo.NewCVEInfo(storeController, repoDB, log) isValidImage, err := cveInfo.Scanner.IsImageFormatScannable("zot-test") - So(err, ShouldBeNil) - So(isValidImage, ShouldEqual, true) + So(err, ShouldNotBeNil) + So(isValidImage, ShouldEqual, false) isValidImage, err = cveInfo.Scanner.IsImageFormatScannable("zot-test:0.0.1") So(err, ShouldBeNil) So(isValidImage, ShouldEqual, true) isValidImage, err = cveInfo.Scanner.IsImageFormatScannable("zot-test:0.0.") - So(err, ShouldBeNil) + So(err, ShouldNotBeNil) So(isValidImage, ShouldEqual, false) isValidImage, err = cveInfo.Scanner.IsImageFormatScannable("zot-noindex-test") @@ -390,18 +375,9 @@ func TestImageFormat(t *testing.T) { }) } -func TestDownloadDB(t *testing.T) { - Convey("Download DB passing invalid dir", t, func() { - err := testSetup() - So(err, ShouldBeNil) - }) -} - func TestCVESearch(t *testing.T) { Convey("Test image vulnerability scanning", t, func() { - updateDuration, _ = time.ParseDuration("1h") - dbDir := "../../../../test/data" - + updateDuration, _ := time.ParseDuration("1h") port := GetFreePort() baseURL := GetBaseURL(port) conf := config.New() @@ -409,6 +385,9 @@ func TestCVESearch(t *testing.T) { htpasswdPath := MakeHtpasswdFile() defer os.Remove(htpasswdPath) + dbDir, err := testSetup(t) + So(err, ShouldBeNil) + conf.HTTP.Auth = &config.AuthConfig{ HTPasswd: config.AuthHTPasswd{ Path: htpasswdPath, @@ -793,174 +772,242 @@ func TestHTTPOptionsResponse(t *testing.T) { func TestCVEStruct(t *testing.T) { Convey("Unit test the CVE struct", t, func() { - // Setup test image data in mock storage - layoutUtils := mocks.OciLayoutUtilsMock{ - GetImageManifestsFn: func(repo string) ([]ispec.Descriptor, error) { - // Valid image for scanning - if repo == "repo1" { //nolint: goconst - return []ispec.Descriptor{ - { - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(0), - Annotations: map[string]string{ - ispec.AnnotationRefName: "0.1.0", - }, - Digest: godigest.FromString("abcc"), - }, - { - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(0), - Annotations: map[string]string{ - ispec.AnnotationRefName: "1.0.0", - }, - Digest: godigest.FromString("abcd"), - }, - { - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(0), - Annotations: map[string]string{ - ispec.AnnotationRefName: "1.1.0", - }, - Digest: godigest.FromString("abce"), - }, - { - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(0), - Annotations: map[string]string{ - ispec.AnnotationRefName: "1.0.1", - }, - Digest: godigest.FromString("abcf"), - }, - }, nil - } + repoDB, err := bolt.NewBoltDBWrapper(bolt.DBParameters{ + RootDir: t.TempDir(), + }) + So(err, ShouldBeNil) - // Image with non-scannable blob - if repo == "repo2" { //nolint: goconst - return []ispec.Descriptor{ - { - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(0), - Annotations: map[string]string{ - ispec.AnnotationRefName: "1.0.0", - }, - Digest: godigest.FromString("abcd"), - }, - }, nil - } + // Create repodb data for scannable image with vulnerabilities + timeStamp11 := time.Date(2008, 1, 1, 12, 0, 0, 0, time.UTC) - // Image with no CVEs - if repo == "repo4" { //nolint: goconst - return []ispec.Descriptor{ - { - MediaType: "application/vnd.oci.image.manifest.v1+json", - Size: int64(0), - Annotations: map[string]string{ - ispec.AnnotationRefName: "1.0.0", - }, - Digest: godigest.FromString("abc"), - }, - }, nil - } + configBlob11, err := json.Marshal(ispec.Image{ + Created: &timeStamp11, + }) + So(err, ShouldBeNil) - // By default the image is not found - return nil, errors.ErrRepoNotFound + manifestBlob11, err := json.Marshal(ispec.Manifest{ + Layers: []ispec.Descriptor{ + { + MediaType: ispec.MediaTypeImageLayerGzip, + Size: 0, + Digest: godigest.NewDigestFromEncoded(godigest.SHA256, "digest"), + }, }, - GetImageTagsWithTimestampFn: func(repo string) ([]common.TagInfo, error) { - // Valid image for scanning - if repo == "repo1" { //nolint: goconst - return []common.TagInfo{ - { - Name: "0.1.0", - Digest: godigest.FromString("abcc"), - Timestamp: time.Date(2008, 1, 1, 12, 0, 0, 0, time.UTC), - }, - { - Name: "1.0.0", - Digest: godigest.FromString("abcd"), - Timestamp: time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC), - }, - { - Name: "1.1.0", - Digest: godigest.FromString("abce"), - Timestamp: time.Date(2010, 1, 1, 12, 0, 0, 0, time.UTC), - }, - { - Name: "1.0.1", - Digest: godigest.FromString("abcf"), - Timestamp: time.Date(2011, 1, 1, 12, 0, 0, 0, time.UTC), - }, - }, nil - } - - // Image with non-scannable blob - if repo == "repo2" { //nolint: goconst - return []common.TagInfo{ - { - Name: "1.0.0", - Digest: godigest.FromString("abcd"), - Timestamp: time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC), - }, - }, nil - } - - // Image with no vulnerabilities, repo3 is for tests on missing images - if repo == "repo4" { //nolint: goconst - return []common.TagInfo{ - { - Name: "1.0.0", - Digest: godigest.FromString("abc"), - Timestamp: time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC), - }, - }, nil - } - - // By default do not return any tags - return []common.TagInfo{}, errors.ErrRepoNotFound + Config: ispec.Descriptor{ + Digest: godigest.FromBytes(configBlob11), }, - GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (ispec.Manifest, error) { - // Valid image for scanning - if imageDir == "repo1" { //nolint: goconst - return ispec.Manifest{ - Layers: []ispec.Descriptor{ - { - MediaType: ispec.MediaTypeImageLayer, - Size: 0, - Digest: godigest.Digest(""), - }, - }, - }, nil - } + }) + So(err, ShouldBeNil) - // Image with non-scannable blob - if imageDir == "repo2" { //nolint: goconst - return ispec.Manifest{ - Layers: []ispec.Descriptor{ - { - MediaType: string(regTypes.OCIRestrictedLayer), - Size: 0, - Digest: godigest.Digest(""), - }, - }, - }, nil - } - - // Image with no CVEs - if imageDir == "repo4" { //nolint: goconst - return ispec.Manifest{ - Layers: []ispec.Descriptor{ - { - MediaType: string(ispec.MediaTypeImageLayer), - Size: 0, - Digest: godigest.Digest(""), - }, - }, - }, nil - } - - return ispec.Manifest{}, errors.ErrBlobNotFound - }, + repoMeta11 := repodb.ManifestMetadata{ + ManifestBlob: manifestBlob11, + ConfigBlob: configBlob11, + DownloadCount: 0, + Signatures: repodb.ManifestSignatures{}, } + digest11 := godigest.FromBytes(manifestBlob11) + err = repoDB.SetManifestMeta("repo1", digest11, repoMeta11) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("repo1", "0.1.0", digest11, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + timeStamp12 := time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC) + + configBlob12, err := json.Marshal(ispec.Image{ + Created: &timeStamp12, + }) + So(err, ShouldBeNil) + + manifestBlob12, err := json.Marshal(ispec.Manifest{ + Layers: []ispec.Descriptor{ + { + MediaType: ispec.MediaTypeImageLayerGzip, + Size: 0, + Digest: godigest.NewDigestFromEncoded(godigest.SHA256, "digest"), + }, + }, + Config: ispec.Descriptor{ + Digest: godigest.FromBytes(configBlob12), + }, + }) + So(err, ShouldBeNil) + + repoMeta12 := repodb.ManifestMetadata{ + ManifestBlob: manifestBlob12, + ConfigBlob: configBlob12, + DownloadCount: 0, + Signatures: repodb.ManifestSignatures{}, + } + + digest12 := godigest.FromBytes(manifestBlob12) + err = repoDB.SetManifestMeta("repo1", digest12, repoMeta12) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("repo1", "1.0.0", digest12, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + timeStamp13 := time.Date(2010, 1, 1, 12, 0, 0, 0, time.UTC) + + configBlob13, err := json.Marshal(ispec.Image{ + Created: &timeStamp13, + }) + So(err, ShouldBeNil) + + manifestBlob13, err := json.Marshal(ispec.Manifest{ + Layers: []ispec.Descriptor{ + { + MediaType: ispec.MediaTypeImageLayerGzip, + Size: 0, + Digest: godigest.NewDigestFromEncoded(godigest.SHA256, "digest"), + }, + }, + Config: ispec.Descriptor{ + Digest: godigest.FromBytes(configBlob13), + }, + }) + So(err, ShouldBeNil) + + repoMeta13 := repodb.ManifestMetadata{ + ManifestBlob: manifestBlob13, + ConfigBlob: configBlob13, + } + + digest13 := godigest.FromBytes(manifestBlob13) + err = repoDB.SetManifestMeta("repo1", digest13, repoMeta13) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("repo1", "1.1.0", digest13, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + timeStamp14 := time.Date(2011, 1, 1, 12, 0, 0, 0, time.UTC) + + configBlob14, err := json.Marshal(ispec.Image{ + Created: &timeStamp14, + }) + So(err, ShouldBeNil) + + manifestBlob14, err := json.Marshal(ispec.Manifest{ + Layers: []ispec.Descriptor{ + { + MediaType: ispec.MediaTypeImageLayerGzip, + Size: 0, + Digest: godigest.NewDigestFromEncoded(godigest.SHA256, "digest"), + }, + }, + Config: ispec.Descriptor{ + Digest: godigest.FromBytes(configBlob14), + }, + }) + So(err, ShouldBeNil) + + repoMeta14 := repodb.ManifestMetadata{ + ManifestBlob: manifestBlob14, + ConfigBlob: configBlob14, + } + + digest14 := godigest.FromBytes(manifestBlob14) + err = repoDB.SetManifestMeta("repo1", digest14, repoMeta14) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("repo1", "1.0.1", digest14, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + // Create repodb data for scannable image with no vulnerabilities + timeStamp61 := time.Date(2011, 1, 1, 12, 0, 0, 0, time.UTC) + + configBlob61, err := json.Marshal(ispec.Image{ + Created: &timeStamp61, + }) + So(err, ShouldBeNil) + + manifestBlob61, err := json.Marshal(ispec.Manifest{ + Layers: []ispec.Descriptor{ + { + MediaType: ispec.MediaTypeImageLayerGzip, + Size: 0, + Digest: godigest.NewDigestFromEncoded(godigest.SHA256, "digest"), + }, + }, + Config: ispec.Descriptor{ + Digest: godigest.FromBytes(configBlob61), + }, + }) + So(err, ShouldBeNil) + + repoMeta61 := repodb.ManifestMetadata{ + ManifestBlob: manifestBlob61, + ConfigBlob: configBlob61, + } + + digest61 := godigest.FromBytes(manifestBlob61) + err = repoDB.SetManifestMeta("repo6", digest61, repoMeta61) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("repo6", "1.0.0", digest61, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + // Create repodb data for image not supporting scanning + timeStamp21 := time.Date(2009, 1, 1, 12, 0, 0, 0, time.UTC) + + configBlob21, err := json.Marshal(ispec.Image{ + Created: &timeStamp21, + }) + So(err, ShouldBeNil) + + manifestBlob21, err := json.Marshal(ispec.Manifest{ + Layers: []ispec.Descriptor{ + { + MediaType: ispec.MediaTypeImageLayerNonDistributableGzip, + Size: 0, + Digest: godigest.NewDigestFromEncoded(godigest.SHA256, "digest"), + }, + }, + Config: ispec.Descriptor{ + Digest: godigest.FromBytes(configBlob21), + }, + }) + So(err, ShouldBeNil) + + repoMeta21 := repodb.ManifestMetadata{ + ManifestBlob: manifestBlob21, + ConfigBlob: configBlob21, + } + + digest21 := godigest.FromBytes(manifestBlob21) + err = repoDB.SetManifestMeta("repo2", digest21, repoMeta21) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("repo2", "1.0.0", digest21, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + // Create repodb data for invalid images/negative tests + manifestBlob31 := []byte("invalid manifest blob") + So(err, ShouldBeNil) + + repoMeta31 := repodb.ManifestMetadata{ + ManifestBlob: manifestBlob31, + } + + digest31 := godigest.FromBytes(manifestBlob31) + err = repoDB.SetManifestMeta("repo3", digest31, repoMeta31) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("repo3", "invalid-manifest", digest31, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + configBlob41 := []byte("invalid config blob") + So(err, ShouldBeNil) + + repoMeta41 := repodb.ManifestMetadata{ + ConfigBlob: configBlob41, + } + + digest41 := godigest.FromString("abc7") + err = repoDB.SetManifestMeta("repo4", digest41, repoMeta41) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("repo4", "invalid-config", digest41, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + digest51 := godigest.FromString("abc8") + err = repoDB.SetRepoTag("repo5", "nonexitent-manifest", digest51, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + // RepoDB loaded with initial data, mock the scanner severities := map[string]int{ "UNKNOWN": 0, "LOW": 1, @@ -1047,33 +1094,41 @@ func TestCVEStruct(t *testing.T) { // Almost same logic compared to actual Trivy specific implementation imageDir, inputTag := common.GetImageDirAndTag(image) - manifests, err := layoutUtils.GetImageManifests(imageDir) + repoMeta, err := repoDB.GetRepoMeta(imageDir) if err != nil { return false, err } - for _, manifest := range manifests { - tag, ok := manifest.Annotations[ispec.AnnotationRefName] + manifestDigestStr, ok := repoMeta.Tags[inputTag] + if !ok { + return false, zerr.ErrTagMetaNotFound + } - if ok && inputTag != "" && tag != inputTag { - continue - } + manifestDigest, err := godigest.Parse(manifestDigestStr.Digest) + if err != nil { + return false, err + } - blobManifest, err := layoutUtils.GetImageBlobManifest(imageDir, manifest.Digest) - if err != nil { - return false, err - } + manifestData, err := repoDB.GetManifestData(manifestDigest) + if err != nil { + return false, err + } - imageLayers := blobManifest.Layers + var manifestContent ispec.Manifest - for _, imageLayer := range imageLayers { - switch imageLayer.MediaType { - case ispec.MediaTypeImageLayer, ispec.MediaTypeImageLayerGzip, string(regTypes.DockerLayer): - return true, nil + err = json.Unmarshal(manifestData.ManifestBlob, &manifestContent) + if err != nil { + return false, zerr.ErrScanNotSupported + } - default: - return false, errors.ErrScanNotSupported - } + for _, imageLayer := range manifestContent.Layers { + switch imageLayer.MediaType { + case ispec.MediaTypeImageLayerGzip, ispec.MediaTypeImageLayer, string(regTypes.DockerLayer): + + return true, nil + default: + + return false, zerr.ErrScanNotSupported } } @@ -1082,225 +1137,249 @@ func TestCVEStruct(t *testing.T) { } log := log.NewLogger("debug", "") + cveInfo := cveinfo.BaseCveInfo{Log: log, Scanner: scanner, RepoDB: repoDB} - Convey("Test GetCVESummaryForImage", func() { - cveInfo := cveinfo.BaseCveInfo{Log: log, Scanner: scanner, LayoutUtils: layoutUtils} + t.Log("Test GetCVESummaryForImage") - // Image is found - cveSummary, err := cveInfo.GetCVESummaryForImage("repo1:0.1.0") - So(err, ShouldBeNil) - So(cveSummary.Count, ShouldEqual, 1) - So(cveSummary.MaxSeverity, ShouldEqual, "MEDIUM") + // Image is found + cveSummary, err := cveInfo.GetCVESummaryForImage("repo1:0.1.0") + So(err, ShouldBeNil) + So(cveSummary.Count, ShouldEqual, 1) + So(cveSummary.MaxSeverity, ShouldEqual, "MEDIUM") - cveSummary, err = cveInfo.GetCVESummaryForImage("repo1:1.0.0") - So(err, ShouldBeNil) - So(cveSummary.Count, ShouldEqual, 3) - So(cveSummary.MaxSeverity, ShouldEqual, "HIGH") + cveSummary, err = cveInfo.GetCVESummaryForImage("repo1:1.0.0") + So(err, ShouldBeNil) + So(cveSummary.Count, ShouldEqual, 3) + So(cveSummary.MaxSeverity, ShouldEqual, "HIGH") - cveSummary, err = cveInfo.GetCVESummaryForImage("repo1:1.0.1") - So(err, ShouldBeNil) - So(cveSummary.Count, ShouldEqual, 2) - So(cveSummary.MaxSeverity, ShouldEqual, "MEDIUM") + cveSummary, err = cveInfo.GetCVESummaryForImage("repo1:1.0.1") + So(err, ShouldBeNil) + So(cveSummary.Count, ShouldEqual, 2) + So(cveSummary.MaxSeverity, ShouldEqual, "MEDIUM") - cveSummary, err = cveInfo.GetCVESummaryForImage("repo1:1.1.0") - So(err, ShouldBeNil) - So(cveSummary.Count, ShouldEqual, 1) - So(cveSummary.MaxSeverity, ShouldEqual, "LOW") + cveSummary, err = cveInfo.GetCVESummaryForImage("repo1:1.1.0") + So(err, ShouldBeNil) + So(cveSummary.Count, ShouldEqual, 1) + So(cveSummary.MaxSeverity, ShouldEqual, "LOW") - // Image is not scannable - cveSummary, err = cveInfo.GetCVESummaryForImage("repo2:1.0.0") - So(err, ShouldEqual, errors.ErrScanNotSupported) - So(cveSummary.Count, ShouldEqual, 0) - So(cveSummary.MaxSeverity, ShouldEqual, "") + cveSummary, err = cveInfo.GetCVESummaryForImage("repo6:1.0.0") + So(err, ShouldBeNil) + So(cveSummary.Count, ShouldEqual, 0) + So(cveSummary.MaxSeverity, ShouldEqual, "NONE") - // Image is not found - cveSummary, err = cveInfo.GetCVESummaryForImage("repo3:1.0.0") - So(err, ShouldEqual, errors.ErrRepoNotFound) - So(cveSummary.Count, ShouldEqual, 0) - So(cveSummary.MaxSeverity, ShouldEqual, "") + // Image is not scannable + cveSummary, err = cveInfo.GetCVESummaryForImage("repo2:1.0.0") + So(err, ShouldEqual, zerr.ErrScanNotSupported) + So(cveSummary.Count, ShouldEqual, 0) + So(cveSummary.MaxSeverity, ShouldEqual, "") - // Image has no vulnerabilities - cveSummary, err = cveInfo.GetCVESummaryForImage("repo4:1.0.0") - So(err, ShouldBeNil) - So(cveSummary.Count, ShouldEqual, 0) - So(cveSummary.MaxSeverity, ShouldEqual, "NONE") - }) + // Tag is not found + cveSummary, err = cveInfo.GetCVESummaryForImage("repo3:1.0.0") + So(err, ShouldEqual, zerr.ErrTagMetaNotFound) + So(cveSummary.Count, ShouldEqual, 0) + So(cveSummary.MaxSeverity, ShouldEqual, "") - Convey("Test GetCVEListForImage", func() { - cveInfo := cveinfo.BaseCveInfo{Log: log, Scanner: scanner, LayoutUtils: layoutUtils} + // Manifest is not found + cveSummary, err = cveInfo.GetCVESummaryForImage("repo5:nonexitent-manifest") + So(err, ShouldEqual, zerr.ErrManifestDataNotFound) + So(cveSummary.Count, ShouldEqual, 0) + So(cveSummary.MaxSeverity, ShouldEqual, "") - // Image is found - cveMap, err := cveInfo.GetCVEListForImage("repo1:0.1.0") - So(err, ShouldBeNil) - So(len(cveMap), ShouldEqual, 1) - So(cveMap, ShouldContainKey, "CVE1") - So(cveMap, ShouldNotContainKey, "CVE2") - So(cveMap, ShouldNotContainKey, "CVE3") + // Repo is not found + cveSummary, err = cveInfo.GetCVESummaryForImage("repo100:1.0.0") + So(err, ShouldEqual, zerr.ErrRepoMetaNotFound) + So(cveSummary.Count, ShouldEqual, 0) + So(cveSummary.MaxSeverity, ShouldEqual, "") - cveMap, err = cveInfo.GetCVEListForImage("repo1:1.0.0") - So(err, ShouldBeNil) - So(len(cveMap), ShouldEqual, 3) - So(cveMap, ShouldContainKey, "CVE1") - So(cveMap, ShouldContainKey, "CVE2") - So(cveMap, ShouldContainKey, "CVE3") + t.Log("Test GetCVEListForImage") - cveMap, err = cveInfo.GetCVEListForImage("repo1:1.0.1") - So(err, ShouldBeNil) - So(len(cveMap), ShouldEqual, 2) - So(cveMap, ShouldContainKey, "CVE1") - So(cveMap, ShouldNotContainKey, "CVE2") - So(cveMap, ShouldContainKey, "CVE3") + // Image is found + cveMap, err := cveInfo.GetCVEListForImage("repo1:0.1.0") + So(err, ShouldBeNil) + So(len(cveMap), ShouldEqual, 1) + So(cveMap, ShouldContainKey, "CVE1") + So(cveMap, ShouldNotContainKey, "CVE2") + So(cveMap, ShouldNotContainKey, "CVE3") - cveMap, err = cveInfo.GetCVEListForImage("repo1:1.1.0") - So(err, ShouldBeNil) - So(len(cveMap), ShouldEqual, 1) - So(cveMap, ShouldNotContainKey, "CVE1") - So(cveMap, ShouldNotContainKey, "CVE2") - So(cveMap, ShouldContainKey, "CVE3") + cveMap, err = cveInfo.GetCVEListForImage("repo1:1.0.0") + So(err, ShouldBeNil) + So(len(cveMap), ShouldEqual, 3) + So(cveMap, ShouldContainKey, "CVE1") + So(cveMap, ShouldContainKey, "CVE2") + So(cveMap, ShouldContainKey, "CVE3") - // Image is not scannable - cveMap, err = cveInfo.GetCVEListForImage("repo2:1.0.0") - So(err, ShouldEqual, errors.ErrScanNotSupported) - So(len(cveMap), ShouldEqual, 0) + cveMap, err = cveInfo.GetCVEListForImage("repo1:1.0.1") + So(err, ShouldBeNil) + So(len(cveMap), ShouldEqual, 2) + So(cveMap, ShouldContainKey, "CVE1") + So(cveMap, ShouldNotContainKey, "CVE2") + So(cveMap, ShouldContainKey, "CVE3") - // Image is not found - cveMap, err = cveInfo.GetCVEListForImage("repo3:1.0.0") - So(err, ShouldEqual, errors.ErrRepoNotFound) - So(len(cveMap), ShouldEqual, 0) + cveMap, err = cveInfo.GetCVEListForImage("repo1:1.1.0") + So(err, ShouldBeNil) + So(len(cveMap), ShouldEqual, 1) + So(cveMap, ShouldNotContainKey, "CVE1") + So(cveMap, ShouldNotContainKey, "CVE2") + So(cveMap, ShouldContainKey, "CVE3") - // Image has no vulnerabilities - cveMap, err = cveInfo.GetCVEListForImage("repo4:1.0.0") - So(err, ShouldBeNil) - So(len(cveMap), ShouldEqual, 0) - }) + cveMap, err = cveInfo.GetCVEListForImage("repo6:1.0.0") + So(err, ShouldBeNil) + So(len(cveMap), ShouldEqual, 0) - Convey("Test GetImageListWithCVEFixed", func() { - cveInfo := cveinfo.BaseCveInfo{Log: log, Scanner: scanner, LayoutUtils: layoutUtils} + // Image is not scannable + cveMap, err = cveInfo.GetCVEListForImage("repo2:1.0.0") + So(err, ShouldEqual, zerr.ErrScanNotSupported) + So(len(cveMap), ShouldEqual, 0) - // Image is found - tagList, err := cveInfo.GetImageListWithCVEFixed("repo1", "CVE1") - So(err, ShouldBeNil) - So(len(tagList), ShouldEqual, 1) - So(tagList[0].Name, ShouldEqual, "1.1.0") + // Tag is not found + cveMap, err = cveInfo.GetCVEListForImage("repo3:1.0.0") + So(err, ShouldEqual, zerr.ErrTagMetaNotFound) + So(len(cveMap), ShouldEqual, 0) - tagList, err = cveInfo.GetImageListWithCVEFixed("repo1", "CVE2") - So(err, ShouldBeNil) - So(len(tagList), ShouldEqual, 2) - So(tagList[0].Name, ShouldEqual, "1.1.0") - So(tagList[1].Name, ShouldEqual, "1.0.1") + // Manifest is not found + cveMap, err = cveInfo.GetCVEListForImage("repo5:nonexitent-manifest") + So(err, ShouldEqual, zerr.ErrManifestDataNotFound) + So(len(cveMap), ShouldEqual, 0) - tagList, err = cveInfo.GetImageListWithCVEFixed("repo1", "CVE3") - So(err, ShouldBeNil) - // CVE3 is not present in 0.1.0, but that is older than all other - // images where it is present. The rest of the images explicitly have it. - // This means we consider it not fixed in any image. - So(len(tagList), ShouldEqual, 0) + // Repo is not found + cveMap, err = cveInfo.GetCVEListForImage("repo100:1.0.0") + So(err, ShouldEqual, zerr.ErrRepoMetaNotFound) + So(len(cveMap), ShouldEqual, 0) - // Image is not scannable - tagList, err = cveInfo.GetImageListWithCVEFixed("repo2", "CVE100") - // CVE is not considered fixed as scan is not possible - // but do not return an error - So(err, ShouldBeNil) - So(len(tagList), ShouldEqual, 0) + t.Log("Test GetImageListWithCVEFixed") - // Image is not found - tagList, err = cveInfo.GetImageListWithCVEFixed("repo3", "CVE101") - So(err, ShouldEqual, errors.ErrRepoNotFound) - So(len(tagList), ShouldEqual, 0) + // Image is found + tagList, err := cveInfo.GetImageListWithCVEFixed("repo1", "CVE1") + So(err, ShouldBeNil) + So(len(tagList), ShouldEqual, 1) + So(tagList[0].Name, ShouldEqual, "1.1.0") - // Image has no vulnerabilities - tagList, err = cveInfo.GetImageListWithCVEFixed("repo4", "CVE101") - So(err, ShouldBeNil) - So(len(tagList), ShouldEqual, 1) - So(tagList[0].Name, ShouldEqual, "1.0.0") - }) + tagList, err = cveInfo.GetImageListWithCVEFixed("repo1", "CVE2") + So(err, ShouldBeNil) + So(len(tagList), ShouldEqual, 2) + expectedTags := []string{"1.0.1", "1.1.0"} + So(expectedTags, ShouldContain, tagList[0].Name) + So(expectedTags, ShouldContain, tagList[1].Name) - Convey("Test GetImageListForCVE", func() { - cveInfo := cveinfo.BaseCveInfo{Log: log, Scanner: scanner, LayoutUtils: layoutUtils} + tagList, err = cveInfo.GetImageListWithCVEFixed("repo1", "CVE3") + So(err, ShouldBeNil) + // CVE3 is not present in 0.1.0, but that is older than all other + // images where it is present. The rest of the images explicitly have it. + // This means we consider it not fixed in any image. + So(len(tagList), ShouldEqual, 0) - // Image is found - imageInfoByCveList, err := cveInfo.GetImageListForCVE("repo1", "CVE1") - So(err, ShouldBeNil) - So(len(imageInfoByCveList), ShouldEqual, 3) - So(imageInfoByCveList[0].Tag, ShouldEqual, "0.1.0") - So(imageInfoByCveList[1].Tag, ShouldEqual, "1.0.0") - So(imageInfoByCveList[2].Tag, ShouldEqual, "1.0.1") + // Image doesn't have any CVEs in the first place + tagList, err = cveInfo.GetImageListWithCVEFixed("repo6", "CVE1") + So(err, ShouldBeNil) + So(len(tagList), ShouldEqual, 1) + So(tagList[0].Name, ShouldEqual, "1.0.0") - imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo1", "CVE2") - So(err, ShouldBeNil) - So(len(imageInfoByCveList), ShouldEqual, 1) - So(imageInfoByCveList[0].Tag, ShouldEqual, "1.0.0") + // Image is not scannable + tagList, err = cveInfo.GetImageListWithCVEFixed("repo2", "CVE100") + // CVE is not considered fixed as scan is not possible + // but do not return an error + So(err, ShouldBeNil) + So(len(tagList), ShouldEqual, 0) - imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo1", "CVE3") - So(err, ShouldBeNil) - So(len(imageInfoByCveList), ShouldEqual, 3) - So(imageInfoByCveList[0].Tag, ShouldEqual, "1.0.0") - So(imageInfoByCveList[1].Tag, ShouldEqual, "1.1.0") - So(imageInfoByCveList[2].Tag, ShouldEqual, "1.0.1") + // Tag is not found, but we should not error + tagList, err = cveInfo.GetImageListWithCVEFixed("repo3", "CVE101") + So(err, ShouldBeNil) + So(len(tagList), ShouldEqual, 0) - // Image is not scannable - imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo2", "CVE100") - // Image is not considered affected with CVE as scan is not possible - // but do not return an error - So(err, ShouldBeNil) - So(len(imageInfoByCveList), ShouldEqual, 0) + // Manifest is not found, we just consider exclude it from the fixed list + tagList, err = cveInfo.GetImageListWithCVEFixed("repo5", "CVE101") + So(err, ShouldBeNil) + So(len(tagList), ShouldEqual, 0) - // Image is not found - imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo3", "CVE101") - So(err, ShouldEqual, errors.ErrRepoNotFound) - So(len(imageInfoByCveList), ShouldEqual, 0) + // Repo is not found, there could potentially be unaffected tags in the repo + // but we can't access their data + tagList, err = cveInfo.GetImageListWithCVEFixed("repo100", "CVE100") + So(err, ShouldEqual, zerr.ErrRepoMetaNotFound) + So(len(tagList), ShouldEqual, 0) - // Image/repo is not vulnerable - imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo4", "CVE101") - So(err, ShouldBeNil) - So(len(imageInfoByCveList), ShouldEqual, 0) - }) + t.Log("Test GetImageListForCVE") - Convey("Test errors while scanning", func() { - localScanner := scanner + // Image is found + imageInfoByCveList, err := cveInfo.GetImageListForCVE("repo1", "CVE1") + So(err, ShouldBeNil) + So(len(imageInfoByCveList), ShouldEqual, 3) + expectedTags = []string{"0.1.0", "1.0.0", "1.0.1"} + So(expectedTags, ShouldContain, imageInfoByCveList[0].Tag) + So(expectedTags, ShouldContain, imageInfoByCveList[1].Tag) + So(expectedTags, ShouldContain, imageInfoByCveList[2].Tag) - localScanner.ScanImageFn = func(image string) (map[string]cvemodel.CVE, error) { + imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo1", "CVE2") + So(err, ShouldBeNil) + So(len(imageInfoByCveList), ShouldEqual, 1) + So(imageInfoByCveList[0].Tag, ShouldEqual, "1.0.0") + + imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo1", "CVE3") + So(err, ShouldBeNil) + So(len(imageInfoByCveList), ShouldEqual, 3) + expectedTags = []string{"1.0.0", "1.0.1", "1.1.0"} + So(expectedTags, ShouldContain, imageInfoByCveList[0].Tag) + So(expectedTags, ShouldContain, imageInfoByCveList[1].Tag) + So(expectedTags, ShouldContain, imageInfoByCveList[2].Tag) + + // Image/repo doesn't have the CVE at all + imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo6", "CVE1") + So(err, ShouldBeNil) + So(len(imageInfoByCveList), ShouldEqual, 0) + + // Image is not scannable + imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo2", "CVE100") + // Image is not considered affected with CVE as scan is not possible + // but do not return an error + So(err, ShouldBeNil) + So(len(imageInfoByCveList), ShouldEqual, 0) + + // Tag is not found, but we should not error + imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo3", "CVE101") + So(err, ShouldBeNil) + So(len(imageInfoByCveList), ShouldEqual, 0) + + // Manifest is not found, assume it is affetected by the CVE + // But we don't have enough of it's data to actually return it + imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo5", "CVE101") + So(err, ShouldEqual, zerr.ErrManifestMetaNotFound) + So(len(imageInfoByCveList), ShouldEqual, 0) + + // Repo is not found, assume it is affetected by the CVE + // But we don't have enough of it's data to actually return it + imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo100", "CVE100") + So(err, ShouldEqual, zerr.ErrRepoMetaNotFound) + So(len(imageInfoByCveList), ShouldEqual, 0) + + t.Log("Test errors while scanning") + + faultyScanner := mocks.CveScannerMock{ + ScanImageFn: func(image string) (map[string]cvemodel.CVE, error) { // Could be any type of error, let's reuse this one - return nil, errors.ErrScanNotSupported - } + return nil, zerr.ErrScanNotSupported + }, + } - cveInfo := cveinfo.BaseCveInfo{Log: log, Scanner: localScanner, LayoutUtils: layoutUtils} + cveInfo = cveinfo.BaseCveInfo{Log: log, Scanner: faultyScanner, RepoDB: repoDB} - cveSummary, err := cveInfo.GetCVESummaryForImage("repo1:0.1.0") - So(err, ShouldNotBeNil) - So(cveSummary.Count, ShouldEqual, 0) - So(cveSummary.MaxSeverity, ShouldEqual, "") + cveSummary, err = cveInfo.GetCVESummaryForImage("repo1:0.1.0") + So(err, ShouldNotBeNil) + So(cveSummary.Count, ShouldEqual, 0) + So(cveSummary.MaxSeverity, ShouldEqual, "") - cveMap, err := cveInfo.GetCVEListForImage("repo1:0.1.0") - So(err, ShouldNotBeNil) - So(cveMap, ShouldBeNil) + cveMap, err = cveInfo.GetCVEListForImage("repo1:0.1.0") + So(err, ShouldNotBeNil) + So(cveMap, ShouldBeNil) - tagList, err := cveInfo.GetImageListWithCVEFixed("repo1", "CVE1") - // CVE is not considered fixed as scan is not possible - // but do not return an error - So(err, ShouldBeNil) - So(len(tagList), ShouldEqual, 0) + tagList, err = cveInfo.GetImageListWithCVEFixed("repo1", "CVE1") + // CVE is not considered fixed as scan is not possible + // but do not return an error + So(err, ShouldBeNil) + So(len(tagList), ShouldEqual, 0) - imageInfoByCveList, err := cveInfo.GetImageListForCVE("repo1", "CVE1") - // Image is not considered affected with CVE as scan is not possible - // but do not return an error - So(err, ShouldBeNil) - So(len(imageInfoByCveList), ShouldEqual, 0) - }) - - Convey("Test error while reading blob manifest", func() { - localLayoutUtils := layoutUtils - localLayoutUtils.GetImageBlobManifestFn = func(imageDir string, - digest godigest.Digest, - ) (ispec.Manifest, error) { - return ispec.Manifest{}, errors.ErrBlobNotFound - } - - cveInfo := cveinfo.BaseCveInfo{Log: log, Scanner: scanner, LayoutUtils: localLayoutUtils} - - imageInfoByCveList, err := cveInfo.GetImageListForCVE("repo1", "CVE1") - So(err, ShouldNotBeNil) - So(len(imageInfoByCveList), ShouldEqual, 0) - }) + imageInfoByCveList, err = cveInfo.GetImageListForCVE("repo1", "CVE1") + // Image is not considered affected with CVE as scan is not possible + // but do not return an error + So(err, ShouldBeNil) + So(len(imageInfoByCveList), ShouldEqual, 0) }) } diff --git a/pkg/extensions/search/cve/trivy/scanner.go b/pkg/extensions/search/cve/trivy/scanner.go index baea59ea..0613894a 100644 --- a/pkg/extensions/search/cve/trivy/scanner.go +++ b/pkg/extensions/search/cve/trivy/scanner.go @@ -1,6 +1,7 @@ package trivy import ( + "encoding/json" "flag" "path" "strings" @@ -11,13 +12,15 @@ import ( "github.com/aquasecurity/trivy/pkg/commands/operation" "github.com/aquasecurity/trivy/pkg/types" regTypes "github.com/google/go-containerregistry/pkg/v1/types" + godigest "github.com/opencontainers/go-digest" ispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/urfave/cli/v2" - "zotregistry.io/zot/errors" + zerr "zotregistry.io/zot/errors" "zotregistry.io/zot/pkg/extensions/search/common" cvemodel "zotregistry.io/zot/pkg/extensions/search/cve/model" "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" "zotregistry.io/zot/pkg/storage" ) @@ -69,7 +72,7 @@ type cveTrivyController struct { } type Scanner struct { - layoutUtils common.OciLayoutUtils + repoDB repodb.RepoDB cveController cveTrivyController storeController storage.StoreController log log.Logger @@ -77,7 +80,7 @@ type Scanner struct { } func NewScanner(storeController storage.StoreController, - layoutUtils common.OciLayoutUtils, log log.Logger, + repoDB repodb.RepoDB, log log.Logger, ) *Scanner { cveController := cveTrivyController{} @@ -107,7 +110,7 @@ func NewScanner(storeController storage.StoreController, return &Scanner{ log: log, - layoutUtils: layoutUtils, + repoDB: repoDB, cveController: cveController, storeController: storeController, dbLock: &sync.Mutex{}, @@ -146,36 +149,44 @@ func (scanner Scanner) getTrivyContext(image string) *trivyCtx { func (scanner Scanner) IsImageFormatScannable(image string) (bool, error) { imageDir, inputTag := common.GetImageDirAndTag(image) - manifests, err := scanner.layoutUtils.GetImageManifests(imageDir) + repoMeta, err := scanner.repoDB.GetRepoMeta(imageDir) if err != nil { return false, err } - for _, manifest := range manifests { - tag, ok := manifest.Annotations[ispec.AnnotationRefName] + manifestDigestStr, ok := repoMeta.Tags[inputTag] + if !ok { + return false, zerr.ErrTagMetaNotFound + } - if ok && inputTag != "" && tag != inputTag { - continue - } + manifestDigest, err := godigest.Parse(manifestDigestStr.Digest) + if err != nil { + return false, err + } - blobManifest, err := scanner.layoutUtils.GetImageBlobManifest(imageDir, manifest.Digest) - if err != nil { - return false, err - } + manifestData, err := scanner.repoDB.GetManifestData(manifestDigest) + if err != nil { + return false, err + } - imageLayers := blobManifest.Layers + var manifestContent ispec.Manifest - for _, imageLayer := range imageLayers { - switch imageLayer.MediaType { - case ispec.MediaTypeImageLayer, ispec.MediaTypeImageLayerGzip, string(regTypes.DockerLayer): - return true, nil + err = json.Unmarshal(manifestData.ManifestBlob, &manifestContent) + if err != nil { + scanner.log.Error().Err(err).Str("image", image).Msg("unable to unmashal manifest blob") - default: - scanner.log.Debug().Str("image", - image).Msgf("image media type %s not supported for scanning", imageLayer.MediaType) + return false, zerr.ErrScanNotSupported + } - return false, errors.ErrScanNotSupported - } + for _, imageLayer := range manifestContent.Layers { + switch imageLayer.MediaType { + case ispec.MediaTypeImageLayerGzip, ispec.MediaTypeImageLayer, string(regTypes.DockerLayer): + return true, nil + default: + scanner.log.Debug().Str("image", image). + Msgf("image media type %s not supported for scanning", imageLayer.MediaType) + + return false, zerr.ErrScanNotSupported } } @@ -185,7 +196,7 @@ func (scanner Scanner) IsImageFormatScannable(image string) (bool, error) { func (scanner Scanner) ScanImage(image string) (map[string]cvemodel.CVE, error) { cveidMap := make(map[string]cvemodel.CVE) - scanner.log.Info().Str("image", image).Msg("scanning image") + scanner.log.Debug().Str("image", image).Msg("scanning image") tCtx := scanner.getTrivyContext(image) diff --git a/pkg/extensions/search/cve/trivy/scanner_internal_test.go b/pkg/extensions/search/cve/trivy/scanner_internal_test.go index db0de360..0c90d6f0 100644 --- a/pkg/extensions/search/cve/trivy/scanner_internal_test.go +++ b/pkg/extensions/search/cve/trivy/scanner_internal_test.go @@ -16,6 +16,8 @@ import ( "zotregistry.io/zot/pkg/extensions/monitoring" "zotregistry.io/zot/pkg/extensions/search/common" "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" + bolt "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" "zotregistry.io/zot/pkg/storage" "zotregistry.io/zot/pkg/storage/local" "zotregistry.io/zot/pkg/test" @@ -83,9 +85,15 @@ func TestMultipleStoragePath(t *testing.T) { storeController.SubStore = subStore - layoutUtils := common.NewBaseOciLayoutUtils(storeController, log) + repoDB, err := bolt.NewBoltDBWrapper(bolt.DBParameters{ + RootDir: firstRootDir, + }) + So(err, ShouldBeNil) - scanner := NewScanner(storeController, layoutUtils, log) + err = repodb.SyncRepoDB(repoDB, storeController, log) + So(err, ShouldBeNil) + + scanner := NewScanner(storeController, repoDB, log) So(scanner.storeController.DefaultStore, ShouldNotBeNil) So(scanner.storeController.SubStore, ShouldNotBeNil) diff --git a/pkg/extensions/search/gql_generated/generated.go b/pkg/extensions/search/gql_generated/generated.go index 58504ab9..f15c756e 100644 --- a/pkg/extensions/search/gql_generated/generated.go +++ b/pkg/extensions/search/gql_generated/generated.go @@ -64,6 +64,7 @@ type ComplexityRoot struct { GlobalSearchResult struct { Images func(childComplexity int) int Layers func(childComplexity int) int + Page func(childComplexity int) int Repos func(childComplexity int) int } @@ -126,19 +127,26 @@ type ComplexityRoot struct { Name func(childComplexity int) int } + PageInfo struct { + NextPage func(childComplexity int) int + ObjectCount func(childComplexity int) int + Pages func(childComplexity int) int + PreviousPage func(childComplexity int) int + } + Query struct { BaseImageList func(childComplexity int, image string) int CVEListForImage func(childComplexity int, image string) int DerivedImageList func(childComplexity int, image string) int ExpandedRepoInfo func(childComplexity int, repo string) int - GlobalSearch func(childComplexity int, query string) int + GlobalSearch func(childComplexity int, query string, filter *Filter, requestedPage *PageInput) int Image func(childComplexity int, image string) int ImageList func(childComplexity int, repo string) int ImageListForCve func(childComplexity int, id string) int ImageListForDigest func(childComplexity int, id string) int ImageListWithCVEFixed func(childComplexity int, id string, image string) int Referrers func(childComplexity int, repo string, digest string, typeArg string) int - RepoListWithNewestImage func(childComplexity int) int + RepoListWithNewestImage func(childComplexity int, requestedPage *PageInput) int } Referrer struct { @@ -157,6 +165,7 @@ type ComplexityRoot struct { RepoSummary struct { DownloadCount func(childComplexity int) int IsBookmarked func(childComplexity int) int + IsStarred func(childComplexity int) int LastUpdated func(childComplexity int) int Name func(childComplexity int) int NewestImage func(childComplexity int) int @@ -173,10 +182,10 @@ type QueryResolver interface { ImageListForCve(ctx context.Context, id string) ([]*ImageSummary, error) ImageListWithCVEFixed(ctx context.Context, id string, image string) ([]*ImageSummary, error) ImageListForDigest(ctx context.Context, id string) ([]*ImageSummary, error) - RepoListWithNewestImage(ctx context.Context) ([]*RepoSummary, error) + RepoListWithNewestImage(ctx context.Context, requestedPage *PageInput) ([]*RepoSummary, error) ImageList(ctx context.Context, repo string) ([]*ImageSummary, error) ExpandedRepoInfo(ctx context.Context, repo string) (*RepoInfo, error) - GlobalSearch(ctx context.Context, query string) (*GlobalSearchResult, error) + GlobalSearch(ctx context.Context, query string, filter *Filter, requestedPage *PageInput) (*GlobalSearchResult, error) DerivedImageList(ctx context.Context, image string) ([]*ImageSummary, error) BaseImageList(ctx context.Context, image string) ([]*ImageSummary, error) Image(ctx context.Context, image string) (*ImageSummary, error) @@ -275,6 +284,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.GlobalSearchResult.Layers(childComplexity), true + case "GlobalSearchResult.Page": + if e.complexity.GlobalSearchResult.Page == nil { + break + } + + return e.complexity.GlobalSearchResult.Page(childComplexity), true + case "GlobalSearchResult.Repos": if e.complexity.GlobalSearchResult.Repos == nil { break @@ -548,6 +564,34 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.PackageInfo.Name(childComplexity), true + case "PageInfo.NextPage": + if e.complexity.PageInfo.NextPage == nil { + break + } + + return e.complexity.PageInfo.NextPage(childComplexity), true + + case "PageInfo.ObjectCount": + if e.complexity.PageInfo.ObjectCount == nil { + break + } + + return e.complexity.PageInfo.ObjectCount(childComplexity), true + + case "PageInfo.Pages": + if e.complexity.PageInfo.Pages == nil { + break + } + + return e.complexity.PageInfo.Pages(childComplexity), true + + case "PageInfo.PreviousPage": + if e.complexity.PageInfo.PreviousPage == nil { + break + } + + return e.complexity.PageInfo.PreviousPage(childComplexity), true + case "Query.BaseImageList": if e.complexity.Query.BaseImageList == nil { break @@ -606,7 +650,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return 0, false } - return e.complexity.Query.GlobalSearch(childComplexity, args["query"].(string)), true + return e.complexity.Query.GlobalSearch(childComplexity, args["query"].(string), args["filter"].(*Filter), args["requestedPage"].(*PageInput)), true case "Query.Image": if e.complexity.Query.Image == nil { @@ -685,7 +729,12 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in break } - return e.complexity.Query.RepoListWithNewestImage(childComplexity), true + args, err := ec.field_Query_RepoListWithNewestImage_args(context.TODO(), rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.RepoListWithNewestImage(childComplexity, args["requestedPage"].(*PageInput)), true case "Referrer.Annotations": if e.complexity.Referrer.Annotations == nil { @@ -750,6 +799,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in return e.complexity.RepoSummary.IsBookmarked(childComplexity), true + case "RepoSummary.IsStarred": + if e.complexity.RepoSummary.IsStarred == nil { + break + } + + return e.complexity.RepoSummary.IsStarred(childComplexity), true + case "RepoSummary.LastUpdated": if e.complexity.RepoSummary.LastUpdated == nil { break @@ -813,7 +869,10 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { rc := graphql.GetOperationContext(ctx) ec := executionContext{rc, e} - inputUnmarshalMap := graphql.BuildUnmarshalerMap() + inputUnmarshalMap := graphql.BuildUnmarshalerMap( + ec.unmarshalInputFilter, + ec.unmarshalInputPageInput, + ) first := true switch rc.Operation.Operation { @@ -902,6 +961,7 @@ type RepoInfo { Search everything. Can search Images, Repos and Layers """ type GlobalSearchResult { + Page: PageInfo Images: [ImageSummary] Repos: [RepoSummary] Layers: [LayerSummary] @@ -926,7 +986,7 @@ type ImageSummary { DownloadCount: Int Layers: [LayerSummary] Description: String - Licenses: String + Licenses: String # The value of the annotation if present, 'unknown' otherwise). Labels: String Title: String Source: String @@ -952,10 +1012,11 @@ type RepoSummary { Platforms: [OsArch] Vendors: [String] Score: Int - NewestImage: ImageSummary + NewestImage: ImageSummary # Newest based on created timestamp DownloadCount: Int StarCount: Int IsBookmarked: Boolean + IsStarred: Boolean } # Currently the same as LayerInfo, we can refactor later @@ -1015,6 +1076,35 @@ type OsArch { Arch: String } +enum SortCriteria { + RELEVANCE + UPDATE_TIME + ALPHABETIC_ASC + ALPHABETIC_DSC + STARS + DOWNLOADS +} + +type PageInfo { + ObjectCount: Int! + PreviousPage: Int + NextPage: Int + Pages: Int +} + +# Pagination parameters +input PageInput { + limit: Int + offset: Int + sortBy: SortCriteria +} + +input Filter { + Os: [String] + Arch: [String] + HasToBeSigned: Boolean +} + type Query { """ Returns a CVE list for the image specified in the arugment @@ -1039,7 +1129,7 @@ type Query { """ Returns a list of repos with the newest tag within """ - RepoListWithNewestImage: [RepoSummary!]! # Newest based on created timestamp + RepoListWithNewestImage(requestedPage: PageInput): [RepoSummary!]! # Newest based on created timestamp """ Returns all the images from the specified repo @@ -1054,7 +1144,7 @@ type Query { """ Searches within repos, images, and layers """ - GlobalSearch(query: String!): GlobalSearchResult! + GlobalSearch(query: String!, filter: Filter, requestedPage: PageInput): GlobalSearchResult! """ List of images which use the argument image @@ -1157,6 +1247,24 @@ func (ec *executionContext) field_Query_GlobalSearch_args(ctx context.Context, r } } args["query"] = arg0 + var arg1 *Filter + if tmp, ok := rawArgs["filter"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("filter")) + arg1, err = ec.unmarshalOFilter2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐFilter(ctx, tmp) + if err != nil { + return nil, err + } + } + args["filter"] = arg1 + var arg2 *PageInput + if tmp, ok := rawArgs["requestedPage"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("requestedPage")) + arg2, err = ec.unmarshalOPageInput2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐPageInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["requestedPage"] = arg2 return args, nil } @@ -1277,6 +1385,21 @@ func (ec *executionContext) field_Query_Referrers_args(ctx context.Context, rawA return args, nil } +func (ec *executionContext) field_Query_RepoListWithNewestImage_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { + var err error + args := map[string]interface{}{} + var arg0 *PageInput + if tmp, ok := rawArgs["requestedPage"]; ok { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("requestedPage")) + arg0, err = ec.unmarshalOPageInput2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐPageInput(ctx, tmp) + if err != nil { + return nil, err + } + } + args["requestedPage"] = arg0 + return args, nil +} + func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} @@ -1719,6 +1842,57 @@ func (ec *executionContext) fieldContext_CVEResultForImage_CVEList(ctx context.C return fc, nil } +func (ec *executionContext) _GlobalSearchResult_Page(ctx context.Context, field graphql.CollectedField, obj *GlobalSearchResult) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_GlobalSearchResult_Page(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Page, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*PageInfo) + fc.Result = res + return ec.marshalOPageInfo2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐPageInfo(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_GlobalSearchResult_Page(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "GlobalSearchResult", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "ObjectCount": + return ec.fieldContext_PageInfo_ObjectCount(ctx, field) + case "PreviousPage": + return ec.fieldContext_PageInfo_PreviousPage(ctx, field) + case "NextPage": + return ec.fieldContext_PageInfo_NextPage(ctx, field) + case "Pages": + return ec.fieldContext_PageInfo_Pages(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type PageInfo", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _GlobalSearchResult_Images(ctx context.Context, field graphql.CollectedField, obj *GlobalSearchResult) (ret graphql.Marshaler) { fc, err := ec.fieldContext_GlobalSearchResult_Images(ctx, field) if err != nil { @@ -1860,6 +2034,8 @@ func (ec *executionContext) fieldContext_GlobalSearchResult_Repos(ctx context.Co return ec.fieldContext_RepoSummary_StarCount(ctx, field) case "IsBookmarked": return ec.fieldContext_RepoSummary_IsBookmarked(ctx, field) + case "IsStarred": + return ec.fieldContext_RepoSummary_IsStarred(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type RepoSummary", field.Name) }, @@ -3520,6 +3696,173 @@ func (ec *executionContext) fieldContext_PackageInfo_FixedVersion(ctx context.Co return fc, nil } +func (ec *executionContext) _PageInfo_ObjectCount(ctx context.Context, field graphql.CollectedField, obj *PageInfo) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_PageInfo_ObjectCount(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.ObjectCount, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_PageInfo_ObjectCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "PageInfo", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _PageInfo_PreviousPage(ctx context.Context, field graphql.CollectedField, obj *PageInfo) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_PageInfo_PreviousPage(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.PreviousPage, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_PageInfo_PreviousPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "PageInfo", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _PageInfo_NextPage(ctx context.Context, field graphql.CollectedField, obj *PageInfo) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_PageInfo_NextPage(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.NextPage, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_PageInfo_NextPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "PageInfo", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _PageInfo_Pages(ctx context.Context, field graphql.CollectedField, obj *PageInfo) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_PageInfo_Pages(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.Pages, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*int) + fc.Result = res + return ec.marshalOInt2ᚖint(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_PageInfo_Pages(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "PageInfo", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) _Query_CVEListForImage(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Query_CVEListForImage(ctx, field) if err != nil { @@ -3883,7 +4226,7 @@ func (ec *executionContext) _Query_RepoListWithNewestImage(ctx context.Context, }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().RepoListWithNewestImage(rctx) + return ec.resolvers.Query().RepoListWithNewestImage(rctx, fc.Args["requestedPage"].(*PageInput)) }) if err != nil { ec.Error(ctx, err) @@ -3928,10 +4271,23 @@ func (ec *executionContext) fieldContext_Query_RepoListWithNewestImage(ctx conte return ec.fieldContext_RepoSummary_StarCount(ctx, field) case "IsBookmarked": return ec.fieldContext_RepoSummary_IsBookmarked(ctx, field) + case "IsStarred": + return ec.fieldContext_RepoSummary_IsStarred(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type RepoSummary", field.Name) }, } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_RepoListWithNewestImage_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return + } return fc, nil } @@ -4106,7 +4462,7 @@ func (ec *executionContext) _Query_GlobalSearch(ctx context.Context, field graph }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().GlobalSearch(rctx, fc.Args["query"].(string)) + return ec.resolvers.Query().GlobalSearch(rctx, fc.Args["query"].(string), fc.Args["filter"].(*Filter), fc.Args["requestedPage"].(*PageInput)) }) if err != nil { ec.Error(ctx, err) @@ -4131,6 +4487,8 @@ func (ec *executionContext) fieldContext_Query_GlobalSearch(ctx context.Context, IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { + case "Page": + return ec.fieldContext_GlobalSearchResult_Page(ctx, field) case "Images": return ec.fieldContext_GlobalSearchResult_Images(ctx, field) case "Repos": @@ -4994,6 +5352,8 @@ func (ec *executionContext) fieldContext_RepoInfo_Summary(ctx context.Context, f return ec.fieldContext_RepoSummary_StarCount(ctx, field) case "IsBookmarked": return ec.fieldContext_RepoSummary_IsBookmarked(ctx, field) + case "IsStarred": + return ec.fieldContext_RepoSummary_IsStarred(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type RepoSummary", field.Name) }, @@ -5461,6 +5821,47 @@ func (ec *executionContext) fieldContext_RepoSummary_IsBookmarked(ctx context.Co return fc, nil } +func (ec *executionContext) _RepoSummary_IsStarred(ctx context.Context, field graphql.CollectedField, obj *RepoSummary) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_RepoSummary_IsStarred(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) { + ctx = rctx // use context from middleware stack in children + return obj.IsStarred, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*bool) + fc.Result = res + return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_RepoSummary_IsStarred(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "RepoSummary", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type Boolean does not have child fields") + }, + } + return fc, nil +} + func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) { fc, err := ec.fieldContext___Directive_name(ctx, field) if err != nil { @@ -7234,6 +7635,94 @@ func (ec *executionContext) fieldContext___Type_specifiedByURL(ctx context.Conte // region **************************** input.gotpl ***************************** +func (ec *executionContext) unmarshalInputFilter(ctx context.Context, obj interface{}) (Filter, error) { + var it Filter + asMap := map[string]interface{}{} + for k, v := range obj.(map[string]interface{}) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"Os", "Arch", "HasToBeSigned"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "Os": + var err error + + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("Os")) + it.Os, err = ec.unmarshalOString2ᚕᚖstring(ctx, v) + if err != nil { + return it, err + } + case "Arch": + var err error + + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("Arch")) + it.Arch, err = ec.unmarshalOString2ᚕᚖstring(ctx, v) + if err != nil { + return it, err + } + case "HasToBeSigned": + var err error + + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("HasToBeSigned")) + it.HasToBeSigned, err = ec.unmarshalOBoolean2ᚖbool(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputPageInput(ctx context.Context, obj interface{}) (PageInput, error) { + var it PageInput + asMap := map[string]interface{}{} + for k, v := range obj.(map[string]interface{}) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"limit", "offset", "sortBy"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "limit": + var err error + + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("limit")) + it.Limit, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "offset": + var err error + + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("offset")) + it.Offset, err = ec.unmarshalOInt2ᚖint(ctx, v) + if err != nil { + return it, err + } + case "sortBy": + var err error + + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("sortBy")) + it.SortBy, err = ec.unmarshalOSortCriteria2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐSortCriteria(ctx, v) + if err != nil { + return it, err + } + } + } + + return it, nil +} + // endregion **************************** input.gotpl ***************************** // region ************************** interface.gotpl *************************** @@ -7351,6 +7840,10 @@ func (ec *executionContext) _GlobalSearchResult(ctx context.Context, sel ast.Sel switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("GlobalSearchResult") + case "Page": + + out.Values[i] = ec._GlobalSearchResult_Page(ctx, field, obj) + case "Images": out.Values[i] = ec._GlobalSearchResult_Images(ctx, field, obj) @@ -7673,6 +8166,46 @@ func (ec *executionContext) _PackageInfo(ctx context.Context, sel ast.SelectionS return out } +var pageInfoImplementors = []string{"PageInfo"} + +func (ec *executionContext) _PageInfo(ctx context.Context, sel ast.SelectionSet, obj *PageInfo) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, pageInfoImplementors) + out := graphql.NewFieldSet(fields) + var invalids uint32 + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("PageInfo") + case "ObjectCount": + + out.Values[i] = ec._PageInfo_ObjectCount(ctx, field, obj) + + if out.Values[i] == graphql.Null { + invalids++ + } + case "PreviousPage": + + out.Values[i] = ec._PageInfo_PreviousPage(ctx, field, obj) + + case "NextPage": + + out.Values[i] = ec._PageInfo_NextPage(ctx, field, obj) + + case "Pages": + + out.Values[i] = ec._PageInfo_Pages(ctx, field, obj) + + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch() + if invalids > 0 { + return graphql.Null + } + return out +} + var queryImplementors = []string{"Query"} func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { @@ -8093,6 +8626,10 @@ func (ec *executionContext) _RepoSummary(ctx context.Context, sel ast.SelectionS out.Values[i] = ec._RepoSummary_IsBookmarked(ctx, field, obj) + case "IsStarred": + + out.Values[i] = ec._RepoSummary_IsStarred(ctx, field, obj) + default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -8513,6 +9050,21 @@ func (ec *executionContext) marshalNImageSummary2ᚖzotregistryᚗioᚋzotᚋpkg return ec._ImageSummary(ctx, sel, v) } +func (ec *executionContext) unmarshalNInt2int(ctx context.Context, v interface{}) (int, error) { + res, err := graphql.UnmarshalInt(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler { + res := graphql.MarshalInt(v) + if res == graphql.Null { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + } + return res +} + func (ec *executionContext) marshalNReferrer2ᚕᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐReferrer(ctx context.Context, sel ast.SelectionSet, v []*Referrer) graphql.Marshaler { ret := make(graphql.Array, len(v)) var wg sync.WaitGroup @@ -8968,6 +9520,14 @@ func (ec *executionContext) marshalOCVE2ᚖzotregistryᚗioᚋzotᚋpkgᚋextens return ec._CVE(ctx, sel, v) } +func (ec *executionContext) unmarshalOFilter2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐFilter(ctx context.Context, v interface{}) (*Filter, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalInputFilter(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + func (ec *executionContext) marshalOHistoryDescription2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐHistoryDescription(ctx context.Context, sel ast.SelectionSet, v *HistoryDescription) graphql.Marshaler { if v == nil { return graphql.Null @@ -9285,6 +9845,21 @@ func (ec *executionContext) marshalOPackageInfo2ᚖzotregistryᚗioᚋzotᚋpkg return ec._PackageInfo(ctx, sel, v) } +func (ec *executionContext) marshalOPageInfo2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐPageInfo(ctx context.Context, sel ast.SelectionSet, v *PageInfo) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._PageInfo(ctx, sel, v) +} + +func (ec *executionContext) unmarshalOPageInput2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐPageInput(ctx context.Context, v interface{}) (*PageInput, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalInputPageInput(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + func (ec *executionContext) marshalOReferrer2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐReferrer(ctx context.Context, sel ast.SelectionSet, v *Referrer) graphql.Marshaler { if v == nil { return graphql.Null @@ -9340,6 +9915,22 @@ func (ec *executionContext) marshalORepoSummary2ᚖzotregistryᚗioᚋzotᚋpkg return ec._RepoSummary(ctx, sel, v) } +func (ec *executionContext) unmarshalOSortCriteria2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐSortCriteria(ctx context.Context, v interface{}) (*SortCriteria, error) { + if v == nil { + return nil, nil + } + var res = new(SortCriteria) + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOSortCriteria2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐSortCriteria(ctx context.Context, sel ast.SelectionSet, v *SortCriteria) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return v +} + func (ec *executionContext) unmarshalOString2ᚕᚖstring(ctx context.Context, v interface{}) ([]*string, error) { if v == nil { return nil, nil diff --git a/pkg/extensions/search/gql_generated/models_gen.go b/pkg/extensions/search/gql_generated/models_gen.go index 388ea9e1..1c1f2014 100644 --- a/pkg/extensions/search/gql_generated/models_gen.go +++ b/pkg/extensions/search/gql_generated/models_gen.go @@ -3,6 +3,9 @@ package gql_generated import ( + "fmt" + "io" + "strconv" "time" ) @@ -26,8 +29,15 @@ type CVEResultForImage struct { CVEList []*Cve `json:"CVEList"` } +type Filter struct { + Os []*string `json:"Os"` + Arch []*string `json:"Arch"` + HasToBeSigned *bool `json:"HasToBeSigned"` +} + // Search everything. Can search Images, Repos and Layers type GlobalSearchResult struct { + Page *PageInfo `json:"Page"` Images []*ImageSummary `json:"Images"` Repos []*RepoSummary `json:"Repos"` Layers []*LayerSummary `json:"Layers"` @@ -100,6 +110,19 @@ type PackageInfo struct { FixedVersion *string `json:"FixedVersion"` } +type PageInfo struct { + ObjectCount int `json:"ObjectCount"` + PreviousPage *int `json:"PreviousPage"` + NextPage *int `json:"NextPage"` + Pages *int `json:"Pages"` +} + +type PageInput struct { + Limit *int `json:"limit"` + Offset *int `json:"offset"` + SortBy *SortCriteria `json:"sortBy"` +} + type Referrer struct { MediaType *string `json:"MediaType"` ArtifactType *string `json:"ArtifactType"` @@ -126,4 +149,54 @@ type RepoSummary struct { DownloadCount *int `json:"DownloadCount"` StarCount *int `json:"StarCount"` IsBookmarked *bool `json:"IsBookmarked"` + IsStarred *bool `json:"IsStarred"` +} + +type SortCriteria string + +const ( + SortCriteriaRelevance SortCriteria = "RELEVANCE" + SortCriteriaUpdateTime SortCriteria = "UPDATE_TIME" + SortCriteriaAlphabeticAsc SortCriteria = "ALPHABETIC_ASC" + SortCriteriaAlphabeticDsc SortCriteria = "ALPHABETIC_DSC" + SortCriteriaStars SortCriteria = "STARS" + SortCriteriaDownloads SortCriteria = "DOWNLOADS" +) + +var AllSortCriteria = []SortCriteria{ + SortCriteriaRelevance, + SortCriteriaUpdateTime, + SortCriteriaAlphabeticAsc, + SortCriteriaAlphabeticDsc, + SortCriteriaStars, + SortCriteriaDownloads, +} + +func (e SortCriteria) IsValid() bool { + switch e { + case SortCriteriaRelevance, SortCriteriaUpdateTime, SortCriteriaAlphabeticAsc, SortCriteriaAlphabeticDsc, SortCriteriaStars, SortCriteriaDownloads: + return true + } + return false +} + +func (e SortCriteria) String() string { + return string(e) +} + +func (e *SortCriteria) UnmarshalGQL(v interface{}) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = SortCriteria(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid SortCriteria", str) + } + return nil +} + +func (e SortCriteria) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) } diff --git a/pkg/extensions/search/resolver.go b/pkg/extensions/search/resolver.go index f9c525fc..745e8e3c 100644 --- a/pkg/extensions/search/resolver.go +++ b/pkg/extensions/search/resolver.go @@ -6,40 +6,52 @@ package search import ( "context" - "fmt" - "sort" - "strconv" "strings" "github.com/99designs/gqlgen/graphql" godigest "github.com/opencontainers/go-digest" ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" "github.com/vektah/gqlparser/v2/gqlerror" - "zotregistry.io/zot/errors" + zerr "zotregistry.io/zot/errors" "zotregistry.io/zot/pkg/extensions/search/common" + "zotregistry.io/zot/pkg/extensions/search/convert" cveinfo "zotregistry.io/zot/pkg/extensions/search/cve" digestinfo "zotregistry.io/zot/pkg/extensions/search/digest" "zotregistry.io/zot/pkg/extensions/search/gql_generated" "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" localCtx "zotregistry.io/zot/pkg/requestcontext" "zotregistry.io/zot/pkg/storage" ) // THIS CODE IS A STARTING POINT ONLY. IT WILL NOT BE UPDATED WITH SCHEMA CHANGES. +const ( + querySizeLimit = 256 +) + // Resolver ... type Resolver struct { cveInfo cveinfo.CveInfo + repoDB repodb.RepoDB storeController storage.StoreController digestInfo *digestinfo.DigestInfo log log.Logger } // GetResolverConfig ... -func GetResolverConfig(log log.Logger, storeController storage.StoreController, cveInfo cveinfo.CveInfo, +func GetResolverConfig(log log.Logger, storeController storage.StoreController, + repoDB repodb.RepoDB, cveInfo cveinfo.CveInfo, ) gql_generated.Config { digestInfo := digestinfo.NewDigestInfo(storeController, log) - resConfig := &Resolver{cveInfo: cveInfo, storeController: storeController, digestInfo: digestInfo, log: log} + resConfig := &Resolver{ + cveInfo: cveInfo, + repoDB: repoDB, + storeController: storeController, + digestInfo: digestInfo, + log: log, + } return gql_generated.Config{ Resolvers: resConfig, Directives: gql_generated.DirectiveRoot{}, @@ -47,6 +59,22 @@ func GetResolverConfig(log log.Logger, storeController storage.StoreController, } } +func NewResolver(log log.Logger, storeController storage.StoreController, + repoDB repodb.RepoDB, cveInfo cveinfo.CveInfo, +) *Resolver { + digestInfo := digestinfo.NewDigestInfo(storeController, log) + + resolver := &Resolver{ + cveInfo: cveInfo, + repoDB: repoDB, + storeController: storeController, + digestInfo: digestInfo, + log: log, + } + + return resolver +} + func (r *queryResolver) getImageListForDigest(repoList []string, digest string) ([]*gql_generated.ImageSummary, error) { imgResultForDigest := []*gql_generated.ImageSummary{} olu := common.NewBaseOciLayoutUtils(r.storeController, r.log) @@ -70,7 +98,7 @@ func (r *queryResolver) getImageListForDigest(repoList []string, digest string) } isSigned := olu.CheckManifestSignature(repo, imageInfo.Digest) - imageInfo := BuildImageInfo(repo, imageInfo.Tag, imageInfo.Digest, + imageInfo := convert.BuildImageInfo(repo, imageInfo.Tag, imageInfo.Digest, imageInfo.Manifest, imageConfig, isSigned) imgResultForDigest = append(imgResultForDigest, imageInfo) @@ -80,430 +108,342 @@ func (r *queryResolver) getImageListForDigest(repoList []string, digest string) return imgResultForDigest, errResult } -func repoListWithNewestImage( - ctx context.Context, - repoList []string, - olu common.OciLayoutUtils, - cveInfo cveinfo.CveInfo, - log log.Logger, -) ([]*gql_generated.RepoSummary, error) { //nolint:unparam - reposSummary := []*gql_generated.RepoSummary{} - - for _, repo := range repoList { - lastUpdatedTag, err := olu.GetRepoLastUpdated(repo) - if err != nil { - msg := fmt.Sprintf("can't get last updated manifest for repo: %s", repo) - log.Error().Err(err).Msg(msg) - - graphql.AddError(ctx, gqlerror.Errorf(msg)) - - continue - } - - repoSize := int64(0) - repoBlob2Size := make(map[string]int64, 10) - - manifests, err := olu.GetImageManifests(repo) - if err != nil { - msg := fmt.Sprintf("can't get manifests for repo: %s", repo) - - log.Error().Err(err).Msg(msg) - graphql.AddError(ctx, gqlerror.Errorf(msg)) - - continue - } - - repoPlatforms := make([]*gql_generated.OsArch, 0) - repoVendors := make([]*string, 0, len(manifests)) - repoName := repo - - var lastUpdatedImageSummary gql_generated.ImageSummary - - var brokenManifest bool - - for _, manifest := range manifests { - imageLayersSize := int64(0) - manifestSize := olu.GetImageManifestSize(repo, manifest.Digest) - - imageBlobManifest, err := olu.GetImageBlobManifest(repo, manifest.Digest) - if err != nil { - msg := fmt.Sprintf("reference not found for manifest %s", manifest.Digest) - - log.Error().Err(err).Msg(msg) - graphql.AddError(ctx, gqlerror.Errorf(msg)) - - brokenManifest = true - - continue - } - - configSize := imageBlobManifest.Config.Size - repoBlob2Size[manifest.Digest.String()] = manifestSize - repoBlob2Size[imageBlobManifest.Config.Digest.String()] = configSize - - for _, layer := range imageBlobManifest.Layers { - repoBlob2Size[layer.Digest.String()] = layer.Size - imageLayersSize += layer.Size - } - - imageSize := imageLayersSize + manifestSize + configSize - - imageConfigInfo, err := olu.GetImageConfigInfo(repo, manifest.Digest) - if err != nil { - msg := fmt.Sprintf("can't get image config for manifest %s", manifest.Digest) - - log.Error().Err(err).Msg(msg) - graphql.AddError(ctx, gqlerror.Errorf(msg)) - - brokenManifest = true - - continue - } - - os, arch := olu.GetImagePlatform(imageConfigInfo) - osArch := &gql_generated.OsArch{ - Os: &os, - Arch: &arch, - } - repoPlatforms = append(repoPlatforms, osArch) - - // get image info from manifest annotation, if not found get from image config labels. - annotations := common.GetAnnotations(imageBlobManifest.Annotations, imageConfigInfo.Config.Labels) - - repoVendors = append(repoVendors, &annotations.Vendor) - - manifestTag, ok := manifest.Annotations[ispec.AnnotationRefName] - if !ok { - msg := fmt.Sprintf("reference not found for manifest %s in repo %s", - manifest.Digest.String(), repoName) - - log.Error().Msg(msg) - graphql.AddError(ctx, gqlerror.Errorf(msg)) - - brokenManifest = true - - break - } - - imageCveSummary := cveinfo.ImageCVESummary{} - // Check if vulnerability scanning is disabled - if cveInfo != nil { - imageName := fmt.Sprintf("%s:%s", repoName, manifestTag) - imageCveSummary, err = cveInfo.GetCVESummaryForImage(imageName) - - if err != nil { - // Log the error, but we should still include the manifest in results - msg := fmt.Sprintf( - "unable to run vulnerability scan on tag %s in repo %s", - manifestTag, - repoName, - ) - - log.Error().Msg(msg) - graphql.AddError(ctx, gqlerror.Errorf(msg)) - } - } - - authors := annotations.Authors - if authors == "" { - authors = imageConfigInfo.Author - } - - tag := manifestTag - size := strconv.Itoa(int(imageSize)) - manifestDigest := manifest.Digest.String() - configDigest := imageBlobManifest.Config.Digest.String() - isSigned := olu.CheckManifestSignature(repo, manifest.Digest) - lastUpdated := common.GetImageLastUpdated(imageConfigInfo) - score := 0 - - imageSummary := gql_generated.ImageSummary{ - RepoName: &repoName, - Tag: &tag, - LastUpdated: &lastUpdated, - Digest: &manifestDigest, - ConfigDigest: &configDigest, - IsSigned: &isSigned, - Size: &size, - Platform: osArch, - Vendor: &annotations.Vendor, - Score: &score, - Description: &annotations.Description, - Title: &annotations.Title, - Documentation: &annotations.Documentation, - Licenses: &annotations.Licenses, - Labels: &annotations.Labels, - Source: &annotations.Source, - Vulnerabilities: &gql_generated.ImageVulnerabilitySummary{ - MaxSeverity: &imageCveSummary.MaxSeverity, - Count: &imageCveSummary.Count, - }, - Authors: &authors, - } - - if manifest.Digest.String() == lastUpdatedTag.Digest.String() { - lastUpdatedImageSummary = imageSummary - } - } - - if brokenManifest { - continue - } - - for blob := range repoBlob2Size { - repoSize += repoBlob2Size[blob] - } - - repoSizeStr := strconv.FormatInt(repoSize, 10) - index := 0 - - reposSummary = append(reposSummary, &gql_generated.RepoSummary{ - Name: &repoName, - LastUpdated: &lastUpdatedTag.Timestamp, - Size: &repoSizeStr, - Platforms: repoPlatforms, - Vendors: repoVendors, - Score: &index, - NewestImage: &lastUpdatedImageSummary, - }) +func getImageSummary(ctx context.Context, repo, tag string, repoDB repodb.RepoDB, + cveInfo cveinfo.CveInfo, log log.Logger, //nolint:unparam +) ( + *gql_generated.ImageSummary, error, +) { + repoMeta, err := repoDB.GetRepoMeta(repo) + if err != nil { + return nil, err } - return reposSummary, nil + manifestDescriptor, ok := repoMeta.Tags[tag] + if !ok { + return nil, gqlerror.Errorf("can't find image: %s:%s", repo, tag) + } + + manifestDigest := manifestDescriptor.Digest + + for t := range repoMeta.Tags { + if t != tag { + delete(repoMeta.Tags, t) + } + } + + manifestMeta, err := repoDB.GetManifestMeta(repo, godigest.Digest(manifestDigest)) + if err != nil { + return nil, err + } + + manifestMetaMap := map[string]repodb.ManifestMetadata{ + manifestDigest: manifestMeta, + } + + skip := convert.SkipQGLField{ + Vulnerabilities: canSkipField(convert.GetPreloads(ctx), "Vulnerabilities"), + } + + imageSummaries := convert.RepoMeta2ImageSummaries(ctx, repoMeta, manifestMetaMap, skip, cveInfo) + + return imageSummaries[0], nil } -func cleanQuerry(query string) string { - query = strings.ToLower(query) - query = strings.Replace(query, ":", " ", 1) +func repoListWithNewestImage( + ctx context.Context, + cveInfo cveinfo.CveInfo, + log log.Logger, //nolint:unparam // may be used by devs for debugging + requestedPage *gql_generated.PageInput, + repoDB repodb.RepoDB, +) ([]*gql_generated.RepoSummary, error) { + repos := []*gql_generated.RepoSummary{} - return query + if requestedPage == nil { + requestedPage = &gql_generated.PageInput{} + } + + skip := convert.SkipQGLField{ + Vulnerabilities: canSkipField(convert.GetPreloads(ctx), "NewestImage.Vulnerabilities"), + } + + pageInput := repodb.PageInput{ + Limit: safeDerefferencing(requestedPage.Limit, 0), + Offset: safeDerefferencing(requestedPage.Offset, 0), + SortBy: repodb.SortCriteria( + safeDerefferencing(requestedPage.SortBy, gql_generated.SortCriteriaUpdateTime), + ), + } + + reposMeta, manifestMetaMap, err := repoDB.SearchRepos(ctx, "", repodb.Filter{}, pageInput) + if err != nil { + return []*gql_generated.RepoSummary{}, err + } + + for _, repoMeta := range reposMeta { + repoSummary := convert.RepoMeta2RepoSummary(ctx, repoMeta, manifestMetaMap, skip, cveInfo) + repos = append(repos, repoSummary) + } + + return repos, nil } -func globalSearch(repoList []string, name, tag string, olu common.OciLayoutUtils, - cveInfo cveinfo.CveInfo, log log.Logger) ( - []*gql_generated.RepoSummary, []*gql_generated.ImageSummary, []*gql_generated.LayerSummary, +func globalSearch(ctx context.Context, query string, repoDB repodb.RepoDB, filter *gql_generated.Filter, + requestedPage *gql_generated.PageInput, cveInfo cveinfo.CveInfo, log log.Logger, //nolint:unparam +) ([]*gql_generated.RepoSummary, []*gql_generated.ImageSummary, []*gql_generated.LayerSummary, error, ) { + preloads := convert.GetPreloads(ctx) repos := []*gql_generated.RepoSummary{} images := []*gql_generated.ImageSummary{} layers := []*gql_generated.LayerSummary{} - for _, repo := range repoList { - repo := repo + if requestedPage == nil { + requestedPage = &gql_generated.PageInput{} + } - // map used for dedube if 2 images reference the same blob - repoBlob2Size := make(map[string]int64, 10) + localFilter := repodb.Filter{} + if filter != nil { + localFilter = repodb.Filter{ + Os: filter.Os, + Arch: filter.Arch, + HasToBeSigned: filter.HasToBeSigned, + } + } - // made up of all manifests, configs and image layers - repoSize := int64(0) - - lastUpdatedTag, err := olu.GetRepoLastUpdated(repo) - if err != nil { - log.Error().Err(err).Msgf("can't find latest updated tag for repo: %s", repo) + if searchingForRepos(query) { + skip := convert.SkipQGLField{ + Vulnerabilities: canSkipField(preloads, "Repos.NewestImage.Vulnerabilities"), } - manifests, err := olu.GetImageManifests(repo) + pageInput := repodb.PageInput{ + Limit: safeDerefferencing(requestedPage.Limit, 0), + Offset: safeDerefferencing(requestedPage.Offset, 0), + SortBy: repodb.SortCriteria( + safeDerefferencing(requestedPage.SortBy, gql_generated.SortCriteriaRelevance), + ), + } + + reposMeta, manifestMetaMap, err := repoDB.SearchRepos(ctx, query, localFilter, pageInput) if err != nil { - log.Error().Err(err).Msgf("can't get manifests for repo: %s", repo) + return []*gql_generated.RepoSummary{}, []*gql_generated.ImageSummary{}, []*gql_generated.LayerSummary{}, err + } + + for _, repoMeta := range reposMeta { + repoSummary := convert.RepoMeta2RepoSummary(ctx, repoMeta, manifestMetaMap, skip, cveInfo) + + repos = append(repos, repoSummary) + } + } else { // search for images + skip := convert.SkipQGLField{ + Vulnerabilities: canSkipField(preloads, "Images.Vulnerabilities"), + } + + pageInput := repodb.PageInput{ + Limit: safeDerefferencing(requestedPage.Limit, 0), + Offset: safeDerefferencing(requestedPage.Offset, 0), + SortBy: repodb.SortCriteria( + safeDerefferencing(requestedPage.SortBy, gql_generated.SortCriteriaRelevance), + ), + } + + reposMeta, manifestMetaMap, err := repoDB.SearchTags(ctx, query, localFilter, pageInput) + if err != nil { + return []*gql_generated.RepoSummary{}, []*gql_generated.ImageSummary{}, []*gql_generated.LayerSummary{}, err + } + + for _, repoMeta := range reposMeta { + imageSummaries := convert.RepoMeta2ImageSummaries(ctx, repoMeta, manifestMetaMap, skip, cveInfo) + + images = append(images, imageSummaries...) + } + } + + return repos, images, layers, nil +} + +func canSkipField(preloads map[string]bool, s string) bool { + fieldIsPresent := preloads[s] + + return !fieldIsPresent +} + +func validateGlobalSearchInput(query string, filter *gql_generated.Filter, + requestedPage *gql_generated.PageInput, +) error { + if len(query) > querySizeLimit { + format := "global-search: max string size limit exeeded for query parameter. max=%d current=%d" + + return errors.Wrapf(zerr.ErrInvalidRequestParams, format, querySizeLimit, len(query)) + } + + err := checkFilter(filter) + if err != nil { + return err + } + + err = checkRequestedPage(requestedPage) + if err != nil { + return err + } + + return nil +} + +func checkFilter(filter *gql_generated.Filter) error { + if filter == nil { + return nil + } + + for _, arch := range filter.Arch { + if len(*arch) > querySizeLimit { + format := "global-search: max string size limit exeeded for arch parameter. max=%d current=%d" + + return errors.Wrapf(zerr.ErrInvalidRequestParams, format, querySizeLimit, len(*arch)) + } + } + + for _, osSys := range filter.Os { + if len(*osSys) > querySizeLimit { + format := "global-search: max string size limit exeeded for os parameter. max=%d current=%d" + + return errors.Wrapf(zerr.ErrInvalidRequestParams, format, querySizeLimit, len(*osSys)) + } + } + + return nil +} + +func checkRequestedPage(requestedPage *gql_generated.PageInput) error { + if requestedPage == nil { + return nil + } + + if requestedPage.Limit != nil && *requestedPage.Limit < 0 { + format := "global-search: requested page limit parameter can't be negative" + + return errors.Wrap(zerr.ErrInvalidRequestParams, format) + } + + if requestedPage.Offset != nil && *requestedPage.Offset < 0 { + format := "global-search: requested page offset parameter can't be negative" + + return errors.Wrap(zerr.ErrInvalidRequestParams, format) + } + + return nil +} + +func cleanQuery(query string) string { + query = strings.TrimSpace(query) + query = strings.Trim(query, "/") + query = strings.ToLower(query) + + return query +} + +func cleanFilter(filter *gql_generated.Filter) *gql_generated.Filter { + if filter == nil { + return nil + } + + if filter.Arch != nil { + for i := range filter.Arch { + *filter.Arch[i] = strings.ToLower(*filter.Arch[i]) + *filter.Arch[i] = strings.TrimSpace(*filter.Arch[i]) + } + + filter.Arch = deleteEmptyElements(filter.Arch) + } + + if filter.Os != nil { + for i := range filter.Os { + *filter.Os[i] = strings.ToLower(*filter.Os[i]) + *filter.Os[i] = strings.TrimSpace(*filter.Os[i]) + } + + filter.Os = deleteEmptyElements(filter.Os) + } + + return filter +} + +func deleteEmptyElements(slice []*string) []*string { + i := 0 + for i < len(slice) { + if elementIsEmpty(*slice[i]) { + slice = deleteElementAt(slice, i) + } else { + i++ + } + } + + return slice +} + +func elementIsEmpty(s string) bool { + return s == "" +} + +func deleteElementAt(slice []*string, i int) []*string { + slice[i] = slice[len(slice)-1] + slice = slice[:len(slice)-1] + + return slice +} + +func expandedRepoInfo(ctx context.Context, repo string, repoDB repodb.RepoDB, cveInfo cveinfo.CveInfo, log log.Logger, +) (*gql_generated.RepoInfo, error) { + if ok, err := localCtx.RepoIsUserAvailable(ctx, repo); !ok || err != nil { + log.Info().Err(err).Msgf("resolver: 'repo %s is user available' = %v", repo, ok) + + return &gql_generated.RepoInfo{}, nil //nolint:nilerr // don't give details to a potential attacker + } + + repoMeta, err := repoDB.GetRepoMeta(repo) + if err != nil { + log.Error().Err(err).Msgf("resolver: can't retrieve repoMeta for repo %s", repo) + + return &gql_generated.RepoInfo{}, err + } + + manifestMetaMap := map[string]repodb.ManifestMetadata{} + + for tag, descriptor := range repoMeta.Tags { + digest := descriptor.Digest + + if _, alreadyDownloaded := manifestMetaMap[digest]; alreadyDownloaded { + continue + } + + manifestMeta, err := repoDB.GetManifestMeta(repo, godigest.Digest(digest)) + if err != nil { + graphql.AddError(ctx, errors.Wrapf(err, + "resolver: failed to get manifest meta for image %s:%s with manifest digest %s", repo, tag, digest)) continue } - var lastUpdatedImageSummary gql_generated.ImageSummary - - repoPlatforms := make([]*gql_generated.OsArch, 0, len(manifests)) - repoVendors := make([]*string, 0, len(manifests)) - - for i, manifest := range manifests { - imageLayersSize := int64(0) - - manifestTag, ok := manifest.Annotations[ispec.AnnotationRefName] - if !ok { - log.Error().Str("digest", manifest.Digest.String()).Msg("reference not found for this manifest") - - continue - } - - imageBlobManifest, err := olu.GetImageBlobManifest(repo, manifests[i].Digest) - if err != nil { - log.Error().Err(err).Msgf("can't read manifest for repo %s %s", repo, manifestTag) - - continue - } - - manifestSize := olu.GetImageManifestSize(repo, manifest.Digest) - configSize := imageBlobManifest.Config.Size - - repoBlob2Size[manifest.Digest.String()] = manifestSize - repoBlob2Size[imageBlobManifest.Config.Digest.String()] = configSize - - for _, layer := range imageBlobManifest.Layers { - layer := layer - layerDigest := layer.Digest.String() - layerSizeStr := strconv.Itoa(int(layer.Size)) - repoBlob2Size[layer.Digest.String()] = layer.Size - imageLayersSize += layer.Size - - // if we have a tag we won't match a layer - if tag != "" { - continue - } - - if index := strings.Index(layerDigest, name); index != -1 { - layers = append(layers, &gql_generated.LayerSummary{ - Digest: &layerDigest, - Size: &layerSizeStr, - Score: &index, - }) - } - } - - imageSize := imageLayersSize + manifestSize + configSize - - index := strings.Index(repo, name) - matchesTag := strings.HasPrefix(manifestTag, tag) - - if index != -1 { - imageConfigInfo, err := olu.GetImageConfigInfo(repo, manifests[i].Digest) - if err != nil { - log.Error().Err(err).Msgf("can't retrieve config info for the image %s %s", repo, manifestTag) - - continue - } - - size := strconv.Itoa(int(imageSize)) - isSigned := olu.CheckManifestSignature(repo, manifests[i].Digest) - - // update matching score - score := calculateImageMatchingScore(repo, index, matchesTag) - - lastUpdated := common.GetImageLastUpdated(imageConfigInfo) - os, arch := olu.GetImagePlatform(imageConfigInfo) - osArch := &gql_generated.OsArch{ - Os: &os, - Arch: &arch, - } - - // get image info from manifest annotation, if not found get from image config labels. - annotations := common.GetAnnotations(imageBlobManifest.Annotations, imageConfigInfo.Config.Labels) - - manifestDigest := manifest.Digest.String() - configDigest := imageBlobManifest.Config.Digest.String() - - repoPlatforms = append(repoPlatforms, osArch) - repoVendors = append(repoVendors, &annotations.Vendor) - - imageCveSummary := cveinfo.ImageCVESummary{} - // Check if vulnerability scanning is disabled - if cveInfo != nil { - imageName := fmt.Sprintf("%s:%s", repo, manifestTag) - imageCveSummary, err = cveInfo.GetCVESummaryForImage(imageName) - - if err != nil { - // Log the error, but we should still include the manifest in results - log.Error().Err(err).Msgf( - "unable to run vulnerability scan on tag %s in repo %s", - manifestTag, - repo, - ) - } - } - - authors := annotations.Authors - if authors == "" { - authors = imageConfigInfo.Author - } - - imageSummary := gql_generated.ImageSummary{ - RepoName: &repo, - Tag: &manifestTag, - LastUpdated: &lastUpdated, - Digest: &manifestDigest, - ConfigDigest: &configDigest, - IsSigned: &isSigned, - Size: &size, - Platform: osArch, - Vendor: &annotations.Vendor, - Score: &score, - Description: &annotations.Description, - Title: &annotations.Title, - Documentation: &annotations.Documentation, - Licenses: &annotations.Licenses, - Labels: &annotations.Labels, - Source: &annotations.Source, - Vulnerabilities: &gql_generated.ImageVulnerabilitySummary{ - MaxSeverity: &imageCveSummary.MaxSeverity, - Count: &imageCveSummary.Count, - }, - Authors: &authors, - } - - if manifest.Digest.String() == lastUpdatedTag.Digest.String() { - lastUpdatedImageSummary = imageSummary - } - - images = append(images, &imageSummary) - } - } - - for blob := range repoBlob2Size { - repoSize += repoBlob2Size[blob] - } - - if index := strings.Index(repo, name); index != -1 { - repoSize := strconv.FormatInt(repoSize, 10) - - repos = append(repos, &gql_generated.RepoSummary{ - Name: &repo, - LastUpdated: &lastUpdatedTag.Timestamp, - Size: &repoSize, - Platforms: repoPlatforms, - Vendors: repoVendors, - Score: &index, - NewestImage: &lastUpdatedImageSummary, - }) - } + manifestMetaMap[digest] = manifestMeta } - sort.Slice(repos, func(i, j int) bool { - return *repos[i].Score < *repos[j].Score - }) + skip := convert.SkipQGLField{ + Vulnerabilities: canSkipField(convert.GetPreloads(ctx), "Summary.NewestImage.Vulnerabilities"), + } - sort.Slice(images, func(i, j int) bool { - return *images[i].Score < *images[j].Score - }) + repoSummary, imageSummaries := convert.RepoMeta2ExpandedRepoInfo(ctx, repoMeta, manifestMetaMap, skip, cveInfo) - sort.Slice(layers, func(i, j int) bool { - return *layers[i].Score < *layers[j].Score - }) - - return repos, images, layers + return &gql_generated.RepoInfo{Summary: repoSummary, Images: imageSummaries}, nil } -// calcalculateImageMatchingScore iterated from the index of the matched string in the -// artifact name until the beginning of the string or until delimitator "/". -// The distance represents the score of the match. -// -// Example: -// -// query: image -// repos: repo/test/myimage -// -// Score will be 2. -func calculateImageMatchingScore(artefactName string, index int, matchesTag bool) int { - score := 0 - - for index >= 1 { - if artefactName[index-1] == '/' { - break - } - index-- - score++ +func safeDerefferencing[T any](pointer *T, defaultVal T) T { + if pointer != nil { + return *pointer } - if !matchesTag { - score += 10 - } + return defaultVal +} - return score +func searchingForRepos(query string) bool { + return !strings.Contains(query, ":") } func (r *queryResolver) getImageList(store storage.ImageStore, imageName string) ( @@ -558,7 +498,7 @@ func (r *queryResolver) getImageList(store storage.ImageStore, imageName string) tagPrefix := strings.HasPrefix(tag.Name, "sha256-") tagSuffix := strings.HasSuffix(tag.Name, ".sig") - imageInfo := BuildImageInfo(repo, tag.Name, digest, manifest, + imageInfo := convert.BuildImageInfo(repo, tag.Name, digest, manifest, imageConfig, isSigned) // check if it's an image or a signature @@ -617,163 +557,6 @@ func getReferrers(store storage.ImageStore, repoName string, digest string, arti return results, nil } -func BuildImageInfo(repo string, tag string, manifestDigest godigest.Digest, - manifest ispec.Manifest, imageConfig ispec.Image, isSigned bool, -) *gql_generated.ImageSummary { - layers := []*gql_generated.LayerSummary{} - size := int64(0) - log := log.NewLogger("debug", "") - allHistory := []*gql_generated.LayerHistory{} - formattedManifestDigest := manifestDigest.String() - formattedConfigDigest := manifest.Config.Digest.String() - annotations := common.GetAnnotations(manifest.Annotations, imageConfig.Config.Labels) - lastUpdated := common.GetImageLastUpdated(imageConfig) - - history := imageConfig.History - if len(history) == 0 { - for _, layer := range manifest.Layers { - size += layer.Size - digest := layer.Digest.String() - layerSize := strconv.FormatInt(layer.Size, 10) - - layer := &gql_generated.LayerSummary{ - Size: &layerSize, - Digest: &digest, - } - - layers = append( - layers, - layer, - ) - - allHistory = append(allHistory, &gql_generated.LayerHistory{ - Layer: layer, - HistoryDescription: &gql_generated.HistoryDescription{}, - }) - } - - formattedSize := strconv.FormatInt(size, 10) - - imageInfo := &gql_generated.ImageSummary{ - RepoName: &repo, - Tag: &tag, - Digest: &formattedManifestDigest, - ConfigDigest: &formattedConfigDigest, - Size: &formattedSize, - Layers: layers, - History: allHistory, - Vendor: &annotations.Vendor, - Description: &annotations.Description, - Title: &annotations.Title, - Documentation: &annotations.Documentation, - Licenses: &annotations.Licenses, - Labels: &annotations.Labels, - Source: &annotations.Source, - LastUpdated: &lastUpdated, - IsSigned: &isSigned, - Platform: &gql_generated.OsArch{ - Os: &imageConfig.OS, - Arch: &imageConfig.Architecture, - }, - } - - return imageInfo - } - - // iterator over manifest layers - var layersIterator int - // since we are appending pointers, it is important to iterate with an index over slice - for i := range history { - allHistory = append(allHistory, &gql_generated.LayerHistory{ - HistoryDescription: &gql_generated.HistoryDescription{ - Created: history[i].Created, - CreatedBy: &history[i].CreatedBy, - Author: &history[i].Author, - Comment: &history[i].Comment, - EmptyLayer: &history[i].EmptyLayer, - }, - }) - - if history[i].EmptyLayer { - continue - } - - if layersIterator+1 > len(manifest.Layers) { - formattedSize := strconv.FormatInt(size, 10) - - log.Error().Err(errors.ErrBadLayerCount).Msg("error on creating layer history for ImageSummary") - - return &gql_generated.ImageSummary{ - RepoName: &repo, - Tag: &tag, - Digest: &formattedManifestDigest, - ConfigDigest: &formattedConfigDigest, - Size: &formattedSize, - Layers: layers, - History: allHistory, - Vendor: &annotations.Vendor, - Description: &annotations.Description, - Title: &annotations.Title, - Documentation: &annotations.Documentation, - Licenses: &annotations.Licenses, - Labels: &annotations.Labels, - Source: &annotations.Source, - LastUpdated: &lastUpdated, - IsSigned: &isSigned, - Platform: &gql_generated.OsArch{ - Os: &imageConfig.OS, - Arch: &imageConfig.Architecture, - }, - } - } - - size += manifest.Layers[layersIterator].Size - digest := manifest.Layers[layersIterator].Digest.String() - layerSize := strconv.FormatInt(manifest.Layers[layersIterator].Size, 10) - - layer := &gql_generated.LayerSummary{ - Size: &layerSize, - Digest: &digest, - } - - layers = append( - layers, - layer, - ) - - allHistory[i].Layer = layer - - layersIterator++ - } - - formattedSize := strconv.FormatInt(size, 10) - - imageInfo := &gql_generated.ImageSummary{ - RepoName: &repo, - Tag: &tag, - Digest: &formattedManifestDigest, - ConfigDigest: &formattedConfigDigest, - Size: &formattedSize, - Layers: layers, - History: allHistory, - Vendor: &annotations.Vendor, - Description: &annotations.Description, - Title: &annotations.Title, - Documentation: &annotations.Documentation, - Licenses: &annotations.Licenses, - Labels: &annotations.Labels, - Source: &annotations.Source, - LastUpdated: &lastUpdated, - IsSigned: &isSigned, - Platform: &gql_generated.OsArch{ - Os: &imageConfig.OS, - Arch: &imageConfig.Architecture, - }, - } - - return imageInfo -} - // get passed context from authzHandler and filter out repos based on permissions. func userAvailableRepos(ctx context.Context, repoList []string) ([]string, error) { var availableRepos []string @@ -781,7 +564,7 @@ func userAvailableRepos(ctx context.Context, repoList []string) ([]string, error // authz request context (set in authz middleware) acCtx, err := localCtx.GetAccessControlContext(ctx) if err != nil { - err := errors.ErrBadType + err := zerr.ErrBadType return []string{}, err } @@ -802,7 +585,7 @@ func userAvailableRepos(ctx context.Context, repoList []string) ([]string, error func extractImageDetails( ctx context.Context, layoutUtils common.OciLayoutUtils, - repo, tag string, + repo, tag string, //nolint:unparam // function only called in the tests log log.Logger) ( godigest.Digest, *ispec.Manifest, *ispec.Image, error, ) { @@ -816,7 +599,7 @@ func extractImageDetails( if len(validRepoList) == 0 { log.Error().Err(err).Msg("user is not authorized") - return "", nil, nil, errors.ErrUnauthorizedAccess + return "", nil, nil, zerr.ErrUnauthorizedAccess } manifest, dig, err := layoutUtils.GetImageManifest(repo, tag) diff --git a/pkg/extensions/search/resolver_test.go b/pkg/extensions/search/resolver_test.go index 2e8a0e12..3d6fee03 100644 --- a/pkg/extensions/search/resolver_test.go +++ b/pkg/extensions/search/resolver_test.go @@ -4,21 +4,22 @@ import ( "context" "encoding/json" "errors" - "os" "strings" "testing" + "time" "github.com/99designs/gqlgen/graphql" godigest "github.com/opencontainers/go-digest" ispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/rs/zerolog" . "github.com/smartystreets/goconvey/convey" - "zotregistry.io/zot/pkg/extensions/monitoring" "zotregistry.io/zot/pkg/extensions/search/common" + cveinfo "zotregistry.io/zot/pkg/extensions/search/cve" + "zotregistry.io/zot/pkg/extensions/search/gql_generated" "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" localCtx "zotregistry.io/zot/pkg/requestcontext" - "zotregistry.io/zot/pkg/storage/local" + "zotregistry.io/zot/pkg/storage" "zotregistry.io/zot/pkg/test/mocks" ) @@ -26,292 +27,529 @@ var ErrTestError = errors.New("TestError") func TestGlobalSearch(t *testing.T) { Convey("globalSearch", t, func() { - Convey("GetRepoLastUpdated fail", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetRepoLastUpdatedFn: func(repo string) (common.TagInfo, error) { - return common.TagInfo{}, ErrTestError + const query = "repo1" + Convey("RepoDB SearchRepos error", func() { + mockRepoDB := mocks.RepoDBMock{ + SearchReposFn: func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + return make([]repodb.RepoMetadata, 0), make(map[string]repodb.ManifestMetadata), ErrTestError }, } + responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) mockCve := mocks.CveInfoMock{} - - globalSearch([]string{"repo1"}, "name", "tag", mockOlum, mockCve, log.NewLogger("debug", "")) + repos, images, layers, err := globalSearch(responseContext, query, mockRepoDB, &gql_generated.Filter{}, + &gql_generated.PageInput{}, mockCve, log.NewLogger("debug", "")) + So(err, ShouldNotBeNil) + So(images, ShouldBeEmpty) + So(layers, ShouldBeEmpty) + So(repos, ShouldBeEmpty) }) - Convey("GetImageTagsWithTimestamp fail", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetImageTagsWithTimestampFn: func(repo string) ([]common.TagInfo, error) { - return []common.TagInfo{}, ErrTestError - }, - } - mockCve := mocks.CveInfoMock{} - - globalSearch([]string{"repo1"}, "name", "tag", mockOlum, mockCve, log.NewLogger("debug", "")) - }) - - Convey("GetImageManifests fail", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetImageManifestsFn: func(name string) ([]ispec.Descriptor, error) { - return []ispec.Descriptor{}, ErrTestError - }, - } - mockCve := mocks.CveInfoMock{} - - globalSearch([]string{"repo1"}, "name", "tag", mockOlum, mockCve, log.NewLogger("debug", "")) - }) - - Convey("Manifests given, bad image blob manifest", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetImageManifestsFn: func(name string) ([]ispec.Descriptor, error) { - return []ispec.Descriptor{ + Convey("RepoDB SearchRepo is successful", func() { + mockRepoDB := mocks.RepoDBMock{ + SearchReposFn: func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + repos := []repodb.RepoMetadata{ { - Digest: "digest", - Size: -1, - Annotations: map[string]string{ - ispec.AnnotationRefName: "this is a bad format", - }, - }, - }, nil - }, - GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (ispec.Manifest, error) { - return ispec.Manifest{}, ErrTestError - }, - } - mockCve := mocks.CveInfoMock{} - - globalSearch([]string{"repo1"}, "name", "tag", mockOlum, mockCve, log.NewLogger("debug", "")) - }) - - Convey("Manifests given, no manifest tag", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetImageManifestsFn: func(name string) ([]ispec.Descriptor, error) { - return []ispec.Descriptor{ - { - Digest: "digest", - Size: -1, - }, - }, nil - }, - } - mockCve := mocks.CveInfoMock{} - - globalSearch([]string{"repo1"}, "test", "tag", mockOlum, mockCve, log.NewLogger("debug", "")) - }) - - Convey("Global search success, no tag", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetRepoLastUpdatedFn: func(repo string) (common.TagInfo, error) { - return common.TagInfo{ - Digest: "sha256:855b1556a45637abf05c63407437f6f305b4627c4361fb965a78e5731999c0c7", - }, nil - }, - GetImageManifestsFn: func(name string) ([]ispec.Descriptor, error) { - return []ispec.Descriptor{ - { - Digest: "sha256:855b1556a45637abf05c63407437f6f305b4627c4361fb965a78e5731999c0c7", - Size: -1, - Annotations: map[string]string{ - ispec.AnnotationRefName: "this is a bad format", - }, - }, - }, nil - }, - GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (ispec.Manifest, error) { - return ispec.Manifest{ - Layers: []ispec.Descriptor{ - { - Size: 0, - Digest: godigest.FromString(""), - }, - }, - }, nil - }, - } - mockCve := mocks.CveInfoMock{} - globalSearch([]string{"repo1/name"}, "name", "tag", mockOlum, mockCve, log.NewLogger("debug", "")) - }) - - Convey("Manifests given, bad image config info", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetImageManifestsFn: func(name string) ([]ispec.Descriptor, error) { - return []ispec.Descriptor{ - { - Digest: "digest", - Size: -1, - Annotations: map[string]string{ - ispec.AnnotationRefName: "this is a bad format", - }, - }, - }, nil - }, - GetImageConfigInfoFn: func(repo string, manifestDigest godigest.Digest) (ispec.Image, error) { - return ispec.Image{}, ErrTestError - }, - } - mockCve := mocks.CveInfoMock{} - globalSearch([]string{"repo1/name"}, "name", "tag", mockOlum, mockCve, log.NewLogger("debug", "")) - }) - - Convey("Tag given, no layer match", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetExpandedRepoInfoFn: func(name string) (common.RepoInfo, error) { - return common.RepoInfo{ - ImageSummaries: []common.ImageSummary{ - { - Tag: "latest", - Layers: []common.LayerSummary{ - { - Size: "100", - Digest: "sha256:855b1556a45637abf05c63407437f6f305b4627c4361fb965a78e5731999c0c7", - }, + Name: "repo1", + Tags: map[string]repodb.Descriptor{ + "1.0.1": { + Digest: "digestTag1.0.1", + MediaType: ispec.MediaTypeImageManifest, + }, + "1.0.2": { + Digest: "digestTag1.0.2", + MediaType: ispec.MediaTypeImageManifest, }, }, + Stars: 100, }, - }, nil - }, - GetImageManifestSizeFn: func(repo string, manifestDigest godigest.Digest) int64 { - return 100 - }, - GetImageConfigSizeFn: func(repo string, manifestDigest godigest.Digest) int64 { - return 100 - }, - GetImageTagsWithTimestampFn: func(repo string) ([]common.TagInfo, error) { - return []common.TagInfo{ - { - Name: "test", - Digest: "test", + } + + createTime := time.Now() + configBlob1, err := json.Marshal(ispec.Image{ + Config: ispec.ImageConfig{ + Labels: map[string]string{ + ispec.AnnotationVendor: "TestVendor1", + }, }, - }, nil + Created: &createTime, + }) + So(err, ShouldBeNil) + + configBlob2, err := json.Marshal(ispec.Image{ + Config: ispec.ImageConfig{ + Labels: map[string]string{ + ispec.AnnotationVendor: "TestVendor2", + }, + }, + }) + So(err, ShouldBeNil) + + manifestBlob, err := json.Marshal(ispec.Manifest{}) + So(err, ShouldBeNil) + + manifestMetas := map[string]repodb.ManifestMetadata{ + "digestTag1.0.1": { + ManifestBlob: manifestBlob, + ConfigBlob: configBlob1, + }, + "digestTag1.0.2": { + ManifestBlob: manifestBlob, + ConfigBlob: configBlob2, + }, + } + + return repos, manifestMetas, nil }, } + + const query = "repo1" + limit := 1 + ofset := 0 + sortCriteria := gql_generated.SortCriteriaAlphabeticAsc + pageInput := gql_generated.PageInput{ + Limit: &limit, + Offset: &ofset, + SortBy: &sortCriteria, + } + + responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) mockCve := mocks.CveInfoMock{} - globalSearch([]string{"repo1"}, "name", "tag", mockOlum, mockCve, log.NewLogger("debug", "")) + repos, images, layers, err := globalSearch(responseContext, query, mockRepoDB, + &gql_generated.Filter{}, &pageInput, mockCve, log.NewLogger("debug", "")) + So(err, ShouldBeNil) + So(images, ShouldBeEmpty) + So(layers, ShouldBeEmpty) + So(repos, ShouldNotBeEmpty) + So(len(repos[0].Vendors), ShouldEqual, 2) + }) + + Convey("RepoDB SearchRepo Bad manifest referenced", func() { + mockRepoDB := mocks.RepoDBMock{ + SearchReposFn: func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + repos := []repodb.RepoMetadata{ + { + Name: "repo1", + Tags: map[string]repodb.Descriptor{ + "1.0.1": { + Digest: "digestTag1.0.1", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + Stars: 100, + }, + } + + configBlob, err := json.Marshal(ispec.Image{}) + So(err, ShouldBeNil) + + manifestMetas := map[string]repodb.ManifestMetadata{ + "digestTag1.0.1": { + ManifestBlob: []byte("bad manifest blob"), + ConfigBlob: configBlob, + }, + } + + return repos, manifestMetas, nil + }, + } + + query := "repo1" + limit := 1 + ofset := 0 + sortCriteria := gql_generated.SortCriteriaAlphabeticAsc + pageInput := gql_generated.PageInput{ + Limit: &limit, + Offset: &ofset, + SortBy: &sortCriteria, + } + + responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) + mockCve := mocks.CveInfoMock{} + + repos, images, layers, err := globalSearch(responseContext, query, mockRepoDB, + &gql_generated.Filter{}, &pageInput, mockCve, log.NewLogger("debug", "")) + So(err, ShouldBeNil) + So(images, ShouldBeEmpty) + So(layers, ShouldBeEmpty) + So(repos, ShouldNotBeEmpty) + + query = "repo1:1.0.1" + + responseContext = graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) + repos, images, layers, err = globalSearch(responseContext, query, mockRepoDB, + &gql_generated.Filter{}, &pageInput, mockCve, log.NewLogger("debug", "")) + So(err, ShouldBeNil) + So(images, ShouldBeEmpty) + So(layers, ShouldBeEmpty) + So(repos, ShouldBeEmpty) + }) + + Convey("RepoDB SearchRepo good manifest referenced and bad config blob", func() { + mockRepoDB := mocks.RepoDBMock{ + SearchReposFn: func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + repos := []repodb.RepoMetadata{ + { + Name: "repo1", + Tags: map[string]repodb.Descriptor{ + "1.0.1": { + Digest: "digestTag1.0.1", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + Stars: 100, + }, + } + + manifestBlob, err := json.Marshal(ispec.Manifest{}) + So(err, ShouldBeNil) + + manifestMetas := map[string]repodb.ManifestMetadata{ + "digestTag1.0.1": { + ManifestBlob: manifestBlob, + ConfigBlob: []byte("bad config blob"), + }, + } + + return repos, manifestMetas, nil + }, + } + + query := "repo1" + limit := 1 + ofset := 0 + sortCriteria := gql_generated.SortCriteriaAlphabeticAsc + pageInput := gql_generated.PageInput{ + Limit: &limit, + Offset: &ofset, + SortBy: &sortCriteria, + } + + mockCve := mocks.CveInfoMock{} + + responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) + repos, images, layers, err := globalSearch(responseContext, query, mockRepoDB, + &gql_generated.Filter{}, &pageInput, mockCve, log.NewLogger("debug", "")) + So(err, ShouldBeNil) + So(images, ShouldBeEmpty) + So(layers, ShouldBeEmpty) + So(repos, ShouldNotBeEmpty) + + query = "repo1:1.0.1" + responseContext = graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) + repos, images, layers, err = globalSearch(responseContext, query, mockRepoDB, + &gql_generated.Filter{}, &pageInput, mockCve, log.NewLogger("debug", "")) + So(err, ShouldBeNil) + So(images, ShouldBeEmpty) + So(layers, ShouldBeEmpty) + So(repos, ShouldBeEmpty) + }) + + Convey("RepoDB SearchTags gives error", func() { + mockRepoDB := mocks.RepoDBMock{ + SearchTagsFn: func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + return make([]repodb.RepoMetadata, 0), make(map[string]repodb.ManifestMetadata), ErrTestError + }, + } + const query = "repo1:1.0.1" + mockCve := mocks.CveInfoMock{} + + responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) + repos, images, layers, err := globalSearch(responseContext, query, mockRepoDB, &gql_generated.Filter{}, + &gql_generated.PageInput{}, mockCve, log.NewLogger("debug", "")) + So(err, ShouldNotBeNil) + So(images, ShouldBeEmpty) + So(layers, ShouldBeEmpty) + So(repos, ShouldBeEmpty) + }) + + Convey("RepoDB SearchTags is successful", func() { + mockRepoDB := mocks.RepoDBMock{ + SearchTagsFn: func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + repos := []repodb.RepoMetadata{ + { + Name: "repo1", + Tags: map[string]repodb.Descriptor{ + "1.0.1": { + Digest: "digestTag1.0.1", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + Stars: 100, + }, + } + + configBlob1, err := json.Marshal(ispec.Image{ + Config: ispec.ImageConfig{ + Labels: map[string]string{ + ispec.AnnotationVendor: "TestVendor1", + }, + }, + }) + So(err, ShouldBeNil) + + configBlob2, err := json.Marshal(ispec.Image{ + Config: ispec.ImageConfig{ + Labels: map[string]string{ + ispec.AnnotationVendor: "TestVendor2", + }, + }, + }) + So(err, ShouldBeNil) + + manifestBlob, err := json.Marshal(ispec.Manifest{}) + So(err, ShouldBeNil) + + manifestMetas := map[string]repodb.ManifestMetadata{ + "digestTag1.0.1": { + ManifestBlob: manifestBlob, + ConfigBlob: configBlob1, + }, + "digestTag1.0.2": { + ManifestBlob: manifestBlob, + ConfigBlob: configBlob2, + }, + } + + return repos, manifestMetas, nil + }, + } + + const query = "repo1:1.0.1" + limit := 1 + ofset := 0 + sortCriteria := gql_generated.SortCriteriaAlphabeticAsc + pageInput := gql_generated.PageInput{ + Limit: &limit, + Offset: &ofset, + SortBy: &sortCriteria, + } + + mockCve := mocks.CveInfoMock{} + + responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) + repos, images, layers, err := globalSearch(responseContext, query, mockRepoDB, + &gql_generated.Filter{}, &pageInput, mockCve, log.NewLogger("debug", "")) + So(err, ShouldBeNil) + So(images, ShouldNotBeEmpty) + So(layers, ShouldBeEmpty) + So(repos, ShouldBeEmpty) }) }) } func TestRepoListWithNewestImage(t *testing.T) { - Convey("repoListWithNewestImage", t, func() { - Convey("GetImageManifests fail", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetImageManifestsFn: func(image string) ([]ispec.Descriptor, error) { - return []ispec.Descriptor{}, ErrTestError + Convey("RepoListWithNewestImage", t, func() { + Convey("RepoDB SearchRepos error", func() { + mockRepoDB := mocks.RepoDBMock{ + SearchReposFn: func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + return make([]repodb.RepoMetadata, 0), make(map[string]repodb.ManifestMetadata), ErrTestError }, } - - ctx := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, graphql.Recover) + responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) mockCve := mocks.CveInfoMock{} - _, err := repoListWithNewestImage(ctx, []string{"repo1"}, mockOlum, mockCve, log.NewLogger("debug", "")) - So(err, ShouldBeNil) - errs := graphql.GetErrors(ctx) - So(errs, ShouldNotBeEmpty) + limit := 1 + ofset := 0 + sortCriteria := gql_generated.SortCriteriaUpdateTime + pageInput := gql_generated.PageInput{ + Limit: &limit, + Offset: &ofset, + SortBy: &sortCriteria, + } + repos, err := repoListWithNewestImage(responseContext, mockCve, log.NewLogger("debug", ""), &pageInput, mockRepoDB) + So(err, ShouldNotBeNil) + So(repos, ShouldBeEmpty) }) - Convey("GetImageBlobManifest fail", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (ispec.Manifest, error) { - return ispec.Manifest{}, ErrTestError - }, - GetImageManifestsFn: func(image string) ([]ispec.Descriptor, error) { - return []ispec.Descriptor{ + Convey("RepoDB SearchRepo Bad manifest referenced", func() { + mockRepoDB := mocks.RepoDBMock{ + SearchReposFn: func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + repos := []repodb.RepoMetadata{ { - MediaType: "application/vnd.oci.image.layer.v1.tar", - Size: int64(0), + Name: "repo1", + Tags: map[string]repodb.Descriptor{ + "1.0.1": { + Digest: "digestTag1.0.1", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + Stars: 100, }, - }, nil - }, - } - - ctx := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, graphql.Recover) - mockCve := mocks.CveInfoMock{} - _, err := repoListWithNewestImage(ctx, []string{"repo1"}, mockOlum, mockCve, log.NewLogger("debug", "")) - So(err, ShouldBeNil) - - errs := graphql.GetErrors(ctx) - So(errs, ShouldNotBeEmpty) - }) - - Convey("GetImageConfigInfo fail", func() { - mockOlum := mocks.OciLayoutUtilsMock{ - GetImageManifestsFn: func(image string) ([]ispec.Descriptor, error) { - return []ispec.Descriptor{ { - MediaType: "application/vnd.oci.image.layer.v1.tar", - Size: int64(0), + Name: "repo2", + Tags: map[string]repodb.Descriptor{ + "1.0.2": { + Digest: "digestTag1.0.2", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + Stars: 100, }, - }, nil - }, - GetImageConfigInfoFn: func(repo string, manifestDigest godigest.Digest) (ispec.Image, error) { - return ispec.Image{ - Author: "test", - }, ErrTestError + } + + configBlob1, err := json.Marshal(ispec.Image{ + Config: ispec.ImageConfig{ + Labels: map[string]string{}, + }, + }) + So(err, ShouldBeNil) + + manifestMetas := map[string]repodb.ManifestMetadata{ + "digestTag1.0.1": { + ManifestBlob: []byte("bad manifest blob"), + ConfigBlob: configBlob1, + }, + "digestTag1.0.2": { + ManifestBlob: []byte("bad manifest blob"), + ConfigBlob: configBlob1, + }, + } + + return repos, manifestMetas, nil }, } - ctx := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, graphql.Recover) + responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) mockCve := mocks.CveInfoMock{} - _, err := repoListWithNewestImage(ctx, []string{"repo1"}, mockOlum, mockCve, log.NewLogger("debug", "")) + + limit := 1 + ofset := 0 + sortCriteria := gql_generated.SortCriteriaUpdateTime + pageInput := gql_generated.PageInput{ + Limit: &limit, + Offset: &ofset, + SortBy: &sortCriteria, + } + repos, err := repoListWithNewestImage(responseContext, mockCve, log.NewLogger("debug", ""), &pageInput, mockRepoDB) So(err, ShouldBeNil) - - errs := graphql.GetErrors(ctx) - So(errs, ShouldNotBeEmpty) + So(repos, ShouldNotBeEmpty) }) - }) -} -func TestUserAvailableRepos(t *testing.T) { - Convey("Type assertion fails", t, func() { - var invalid struct{} + Convey("Working SearchRepo function", func() { + createTime := time.Now() + createTime2 := createTime.Add(time.Second) + mockRepoDB := mocks.RepoDBMock{ + SearchReposFn: func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + pageFinder, err := repodb.NewBaseRepoPageFinder(requestedPage.Limit, requestedPage.Offset, requestedPage.SortBy) + So(err, ShouldBeNil) - log := log.Logger{Logger: zerolog.New(os.Stdout)} - dir := t.TempDir() - metrics := monitoring.NewMetricsServer(false, log) - defaultStore := local.NewImageStore(dir, false, 0, false, false, log, metrics, nil, nil) + repos := []repodb.RepoMetadata{ + { + Name: "repo1", + Tags: map[string]repodb.Descriptor{ + "1.0.1": { + Digest: "digestTag1.0.1", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + Stars: 100, + }, + { + Name: "repo2", + Tags: map[string]repodb.Descriptor{ + "1.0.2": { + Digest: "digestTag1.0.2", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + Stars: 100, + }, + } - repoList, err := defaultStore.GetRepositories() - So(err, ShouldBeNil) + for _, repoMeta := range repos { + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repoMeta, + UpdateTime: createTime, + }) + createTime = createTime.Add(time.Second) + } - ctx := context.TODO() - key := localCtx.GetContextKey() - ctx = context.WithValue(ctx, key, invalid) + repos = pageFinder.Page() - repos, err := userAvailableRepos(ctx, repoList) - So(err, ShouldNotBeNil) - So(repos, ShouldBeEmpty) - }) -} + configBlob1, err := json.Marshal(ispec.Image{ + Config: ispec.ImageConfig{ + Labels: map[string]string{}, + }, + Created: &createTime, + }) + So(err, ShouldBeNil) -func TestMatching(t *testing.T) { - pine := "pine" + configBlob2, err := json.Marshal(ispec.Image{ + Config: ispec.ImageConfig{ + Labels: map[string]string{}, + }, + Created: &createTime2, + }) + So(err, ShouldBeNil) - Convey("Perfect Matching", t, func() { - query := "alpine" - score := calculateImageMatchingScore("alpine", strings.Index("alpine", query), true) - So(score, ShouldEqual, 0) - }) + manifestBlob, err := json.Marshal(ispec.Manifest{}) + So(err, ShouldBeNil) - Convey("Partial Matching", t, func() { - query := pine - score := calculateImageMatchingScore("alpine", strings.Index("alpine", query), true) - So(score, ShouldEqual, 2) - }) + manifestMetas := map[string]repodb.ManifestMetadata{ + "digestTag1.0.1": { + ManifestBlob: manifestBlob, + ConfigBlob: configBlob1, + }, + "digestTag1.0.2": { + ManifestBlob: manifestBlob, + ConfigBlob: configBlob2, + }, + } - Convey("Complex Partial Matching", t, func() { - query := pine - score := calculateImageMatchingScore("repo/test/alpine", strings.Index("alpine", query), true) - So(score, ShouldEqual, 2) + return repos, manifestMetas, nil + }, + } + Convey("RepoDB missing requestedPage", func() { + responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) + mockCve := mocks.CveInfoMock{} + repos, err := repoListWithNewestImage(responseContext, mockCve, log.NewLogger("debug", ""), nil, mockRepoDB) + So(err, ShouldBeNil) + So(repos, ShouldNotBeEmpty) + }) - query = pine - score = calculateImageMatchingScore("repo/alpine/test", strings.Index("alpine", query), true) - So(score, ShouldEqual, 2) + Convey("RepoDB SearchRepo is successful", func() { + limit := 2 + ofset := 0 + sortCriteria := gql_generated.SortCriteriaUpdateTime + pageInput := gql_generated.PageInput{ + Limit: &limit, + Offset: &ofset, + SortBy: &sortCriteria, + } - query = pine - score = calculateImageMatchingScore("alpine/repo/test", strings.Index("alpine", query), true) - So(score, ShouldEqual, 2) + responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter, + graphql.DefaultRecover) - query = pine - score = calculateImageMatchingScore("alpine/repo/test", strings.Index("alpine", query), false) - So(score, ShouldEqual, 12) + mockCve := mocks.CveInfoMock{} + repos, err := repoListWithNewestImage(responseContext, mockCve, + log.NewLogger("debug", ""), &pageInput, mockRepoDB) + So(err, ShouldBeNil) + So(repos, ShouldNotBeEmpty) + So(len(repos), ShouldEqual, 2) + So(*repos[0].Name, ShouldEqual, "repo2") + So(*repos[0].LastUpdated, ShouldEqual, createTime2) + }) + }) }) } @@ -501,3 +739,417 @@ func TestExtractImageDetails(t *testing.T) { }) }) } + +func TestQueryResolverErrors(t *testing.T) { + Convey("Errors", t, func() { + log := log.NewLogger("debug", "") + ctx := context.Background() + + Convey("ImageListForCve olu.GetRepositories() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return nil, ErrTestError + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageListForCve(ctx, "id") + So(err, ShouldNotBeNil) + }) + + Convey("ImageListForCve cveInfo.GetImageListForCVE() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return []string{"repo"}, nil + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{ + GetImageListForCVEFn: func(repo, cveID string) ([]cveinfo.ImageInfoByCVE, error) { + return nil, ErrTestError + }, + }, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageListForCve(ctx, "a") + So(err, ShouldNotBeNil) + }) + + Convey("ImageListForCve olu.GetImageConfigInfo() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return []string{"repo"}, nil + }, + GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) { + return nil, ErrTestError + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{ + GetImageListForCVEFn: func(repo, cveID string) ([]cveinfo.ImageInfoByCVE, error) { + return []cveinfo.ImageInfoByCVE{{}}, nil + }, + }, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageListForCve(ctx, "a") + So(err, ShouldNotBeNil) + }) + + Convey("RepoListWithNewestImage repoListWithNewestImage errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{}, + }, + mocks.RepoDBMock{ + SearchReposFn: func(ctx context.Context, searchText string, filter repodb.Filter, + requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + return nil, nil, ErrTestError + }, + }, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.RepoListWithNewestImage(ctx, &gql_generated.PageInput{}) + So(err, ShouldNotBeNil) + }) + + Convey("ImageListWithCVEFixed olu.GetImageBlobManifest() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) { + return nil, ErrTestError + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{ + GetImageListWithCVEFixedFn: func(repo, cveID string) ([]common.TagInfo, error) { + return []common.TagInfo{{}}, nil + }, + }, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageListWithCVEFixed(ctx, "a", "d") + So(err, ShouldNotBeNil) + }) + + Convey("ImageListWithCVEFixed olu.GetImageConfigInfo() errors", func() { + getBlobContentCallCounter := 0 + + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetBlobContentFn: func(repo string, digest godigest.Digest) ([]byte, error) { + if getBlobContentCallCounter == 1 { + getBlobContentCallCounter++ + + return nil, ErrTestError + } + getBlobContentCallCounter++ + + return []byte("{}"), nil + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{ + GetImageListWithCVEFixedFn: func(repo, cveID string) ([]common.TagInfo, error) { + return []common.TagInfo{{}}, nil + }, + }, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageListWithCVEFixed(ctx, "a", "d") + So(err, ShouldNotBeNil) + }) + + Convey("ImageListForDigest defaultStore.GetRepositories() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return nil, ErrTestError + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageListForDigest(ctx, "") + So(err, ShouldNotBeNil) + }) + + Convey("ImageListForDigest getImageListForDigest() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return []string{"repo"}, nil + }, + GetIndexContentFn: func(repo string) ([]byte, error) { + return nil, ErrTestError + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageListForDigest(ctx, "") + So(err, ShouldNotBeNil) + }) + + Convey("ImageListForDigest substores store.GetRepositories() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetIndexContentFn: func(repo string) ([]byte, error) { + return []byte("{}"), nil + }, + GetRepositoriesFn: func() ([]string, error) { + return []string{"repo"}, nil + }, + }, + SubStore: map[string]storage.ImageStore{ + "sub1": mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return []string{"repo"}, ErrTestError + }, + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageListForDigest(ctx, "") + So(err, ShouldNotBeNil) + }) + + Convey("ImageListForDigest substores getImageListForDigest() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetIndexContentFn: func(repo string) ([]byte, error) { + return []byte("{}"), nil + }, + GetRepositoriesFn: func() ([]string, error) { + return []string{"repo"}, nil + }, + }, + SubStore: map[string]storage.ImageStore{ + "/sub1": mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return []string{"sub1/repo"}, nil + }, + GetIndexContentFn: func(repo string) ([]byte, error) { + return nil, ErrTestError + }, + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageListForDigest(ctx, "") + So(err, ShouldNotBeNil) + }) + + Convey("RepoListWithNewestImage repoListWithNewestImage() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{}, + mocks.RepoDBMock{ + SearchReposFn: func(ctx context.Context, searchText string, + filter repodb.Filter, requestedPage repodb.PageInput, + ) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + return nil, nil, ErrTestError + }, + }, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.RepoListWithNewestImage(ctx, &gql_generated.PageInput{}) + So(err, ShouldNotBeNil) + }) + + Convey("ImageList getImageList() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return nil, ErrTestError + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageList(ctx, "repo") + So(err, ShouldNotBeNil) + }) + + Convey("ImageList subpaths getImageList() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return []string{"sub1/repo"}, nil + }, + }, + SubStore: map[string]storage.ImageStore{ + "/sub1": mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return nil, ErrTestError + }, + }, + }, + }, + mocks.RepoDBMock{}, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.ImageList(ctx, "repo") + So(err, ShouldNotBeNil) + }) + + Convey("DerivedImageList ExpandedRepoInfo() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return []string{"sub1/repo"}, nil + }, + GetImageManifestFn: func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte("{}"), "digest", "str", nil + }, + }, + }, + mocks.RepoDBMock{ + GetRepoMetaFn: func(repo string) (repodb.RepoMetadata, error) { + return repodb.RepoMetadata{}, ErrTestError + }, + }, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.DerivedImageList(ctx, "repo:tag") + So(err, ShouldNotBeNil) + }) + + Convey("BaseImageList ExpandedRepoInfo() errors", func() { + resolverConfig := NewResolver( + log, + storage.StoreController{ + DefaultStore: mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return []string{"sub1/repo"}, nil + }, + GetImageManifestFn: func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte("{}"), "digest", "str", nil + }, + }, + }, + mocks.RepoDBMock{ + GetRepoMetaFn: func(repo string) (repodb.RepoMetadata, error) { + return repodb.RepoMetadata{}, ErrTestError + }, + }, + mocks.CveInfoMock{}, + ) + + qr := queryResolver{ + resolverConfig, + } + + _, err := qr.BaseImageList(ctx, "repo:tag") + So(err, ShouldNotBeNil) + }) + }) +} diff --git a/pkg/extensions/search/schema.graphql b/pkg/extensions/search/schema.graphql index ca78847e..eac9bf59 100644 --- a/pkg/extensions/search/schema.graphql +++ b/pkg/extensions/search/schema.graphql @@ -42,6 +42,7 @@ type RepoInfo { Search everything. Can search Images, Repos and Layers """ type GlobalSearchResult { + Page: PageInfo Images: [ImageSummary] Repos: [RepoSummary] Layers: [LayerSummary] @@ -66,7 +67,7 @@ type ImageSummary { DownloadCount: Int Layers: [LayerSummary] Description: String - Licenses: String + Licenses: String # The value of the annotation if present, 'unknown' otherwise). Labels: String Title: String Source: String @@ -92,10 +93,11 @@ type RepoSummary { Platforms: [OsArch] Vendors: [String] Score: Int - NewestImage: ImageSummary + NewestImage: ImageSummary # Newest based on created timestamp DownloadCount: Int StarCount: Int IsBookmarked: Boolean + IsStarred: Boolean } # Currently the same as LayerInfo, we can refactor later @@ -155,6 +157,35 @@ type OsArch { Arch: String } +enum SortCriteria { + RELEVANCE + UPDATE_TIME + ALPHABETIC_ASC + ALPHABETIC_DSC + STARS + DOWNLOADS +} + +type PageInfo { + ObjectCount: Int! + PreviousPage: Int + NextPage: Int + Pages: Int +} + +# Pagination parameters +input PageInput { + limit: Int + offset: Int + sortBy: SortCriteria +} + +input Filter { + Os: [String] + Arch: [String] + HasToBeSigned: Boolean +} + type Query { """ Returns a CVE list for the image specified in the arugment @@ -179,7 +210,7 @@ type Query { """ Returns a list of repos with the newest tag within """ - RepoListWithNewestImage: [RepoSummary!]! # Newest based on created timestamp + RepoListWithNewestImage(requestedPage: PageInput): [RepoSummary!]! # Newest based on created timestamp """ Returns all the images from the specified repo @@ -194,7 +225,7 @@ type Query { """ Searches within repos, images, and layers """ - GlobalSearch(query: String!): GlobalSearchResult! + GlobalSearch(query: String!, filter: Filter, requestedPage: PageInput): GlobalSearchResult! """ List of images which use the argument image diff --git a/pkg/extensions/search/schema.resolvers.go b/pkg/extensions/search/schema.resolvers.go index 66d81d3d..a933da7c 100644 --- a/pkg/extensions/search/schema.resolvers.go +++ b/pkg/extensions/search/schema.resolvers.go @@ -6,26 +6,26 @@ package search import ( "context" - "fmt" "github.com/vektah/gqlparser/v2/gqlerror" "zotregistry.io/zot/pkg/extensions/search/common" + "zotregistry.io/zot/pkg/extensions/search/convert" "zotregistry.io/zot/pkg/extensions/search/gql_generated" ) // CVEListForImage is the resolver for the CVEListForImage field. func (r *queryResolver) CVEListForImage(ctx context.Context, image string) (*gql_generated.CVEResultForImage, error) { - cveidMap, err := r.cveInfo.GetCVEListForImage(image) - if err != nil { - return &gql_generated.CVEResultForImage{}, err - } - _, copyImgTag := common.GetImageDirAndTag(image) if copyImgTag == "" { return &gql_generated.CVEResultForImage{}, gqlerror.Errorf("no reference provided") } + cveidMap, err := r.cveInfo.GetCVEListForImage(image) + if err != nil { + return &gql_generated.CVEResultForImage{}, err + } + cveids := []*gql_generated.Cve{} for id, cveDetail := range cveidMap { @@ -95,7 +95,13 @@ func (r *queryResolver) ImageListForCve(ctx context.Context, id string) ([]*gql_ } isSigned := olu.CheckManifestSignature(repo, imageByCVE.Digest) - imageInfo := BuildImageInfo(repo, imageByCVE.Tag, imageByCVE.Digest, imageByCVE.Manifest, imageConfig, isSigned) + imageInfo := convert.BuildImageInfo( + repo, imageByCVE.Tag, + imageByCVE.Digest, + imageByCVE.Manifest, + imageConfig, + isSigned, + ) affectedImages = append( affectedImages, @@ -135,7 +141,7 @@ func (r *queryResolver) ImageListWithCVEFixed(ctx context.Context, id string, im } isSigned := olu.CheckManifestSignature(image, digest) - imageInfo := BuildImageInfo(image, tag.Name, digest, manifest, imageConfig, isSigned) + imageInfo := convert.BuildImageInfo(image, tag.Name, digest, manifest, imageConfig, isSigned) unaffectedImages = append(unaffectedImages, imageInfo) } @@ -192,41 +198,12 @@ func (r *queryResolver) ImageListForDigest(ctx context.Context, id string) ([]*g } // RepoListWithNewestImage is the resolver for the RepoListWithNewestImage field. -func (r *queryResolver) RepoListWithNewestImage(ctx context.Context) ([]*gql_generated.RepoSummary, error) { +func (r *queryResolver) RepoListWithNewestImage(ctx context.Context, requestedPage *gql_generated.PageInput) ([]*gql_generated.RepoSummary, error) { r.log.Info().Msg("extension api: finding image list") - olu := common.NewBaseOciLayoutUtils(r.storeController, r.log) - - reposSummary := make([]*gql_generated.RepoSummary, 0) - - repoList := []string{} - - defaultRepoList, err := r.storeController.DefaultStore.GetRepositories() + reposSummary, err := repoListWithNewestImage(ctx, r.cveInfo, r.log, requestedPage, r.repoDB) if err != nil { - r.log.Error().Err(err).Msg("extension api: error extracting default store repo list") - - return reposSummary, err - } - - if len(defaultRepoList) > 0 { - repoList = append(repoList, defaultRepoList...) - } - - subStore := r.storeController.SubStore - for _, store := range subStore { - subRepoList, err := store.GetRepositories() - if err != nil { - r.log.Error().Err(err).Msg("extension api: error extracting substore repo list") - - return reposSummary, err - } - - repoList = append(repoList, subRepoList...) - } - - reposSummary, err = repoListWithNewestImage(ctx, repoList, olu, r.cveInfo, r.log) - if err != nil { - r.log.Error().Err(err).Msg("extension api: error extracting substore image list") + r.log.Error().Err(err).Msg("unable to retrieve repo list") return reposSummary, err } @@ -273,137 +250,27 @@ func (r *queryResolver) ImageList(ctx context.Context, repo string) ([]*gql_gene // ExpandedRepoInfo is the resolver for the ExpandedRepoInfo field. func (r *queryResolver) ExpandedRepoInfo(ctx context.Context, repo string) (*gql_generated.RepoInfo, error) { - olu := common.NewBaseOciLayoutUtils(r.storeController, r.log) + repoInfo, err := expandedRepoInfo(ctx, repo, r.repoDB, r.cveInfo, r.log) - origRepoInfo, err := olu.GetExpandedRepoInfo(repo) - if err != nil { - r.log.Error().Err(err).Msgf("error getting repo '%s'", repo) - - return &gql_generated.RepoInfo{}, err - } - - // repos type is of common deep copy this to search - repoInfo := &gql_generated.RepoInfo{} - - images := make([]*gql_generated.ImageSummary, 0) - - summary := &gql_generated.RepoSummary{} - - summary.LastUpdated = &origRepoInfo.Summary.LastUpdated - summary.Name = &origRepoInfo.Summary.Name - summary.Platforms = []*gql_generated.OsArch{} - summary.NewestImage = &gql_generated.ImageSummary{ - RepoName: &origRepoInfo.Summary.NewestImage.RepoName, - Tag: &origRepoInfo.Summary.NewestImage.Tag, - LastUpdated: &origRepoInfo.Summary.NewestImage.LastUpdated, - Digest: &origRepoInfo.Summary.NewestImage.Digest, - ConfigDigest: &origRepoInfo.Summary.NewestImage.ConfigDigest, - IsSigned: &origRepoInfo.Summary.NewestImage.IsSigned, - Size: &origRepoInfo.Summary.NewestImage.Size, - Platform: &gql_generated.OsArch{ - Os: &origRepoInfo.Summary.NewestImage.Platform.Os, - Arch: &origRepoInfo.Summary.NewestImage.Platform.Arch, - }, - Vendor: &origRepoInfo.Summary.NewestImage.Vendor, - Score: &origRepoInfo.Summary.NewestImage.Score, - Description: &origRepoInfo.Summary.NewestImage.Description, - Title: &origRepoInfo.Summary.NewestImage.Title, - Documentation: &origRepoInfo.Summary.NewestImage.Documentation, - Licenses: &origRepoInfo.Summary.NewestImage.Licenses, - Labels: &origRepoInfo.Summary.NewestImage.Labels, - Source: &origRepoInfo.Summary.NewestImage.Source, - } - - for _, platform := range origRepoInfo.Summary.Platforms { - platform := platform - - summary.Platforms = append(summary.Platforms, &gql_generated.OsArch{ - Os: &platform.Os, - Arch: &platform.Arch, - }) - } - - summary.Size = &origRepoInfo.Summary.Size - - for _, vendor := range origRepoInfo.Summary.Vendors { - vendor := vendor - summary.Vendors = append(summary.Vendors, &vendor) - } - - score := -1 // score not relevant for this query - summary.Score = &score - - for _, image := range origRepoInfo.ImageSummaries { - tag := image.Tag - digest := image.Digest - configDigest := image.ConfigDigest - isSigned := image.IsSigned - size := image.Size - - imageSummary := &gql_generated.ImageSummary{ - Tag: &tag, - Digest: &digest, - ConfigDigest: &configDigest, - IsSigned: &isSigned, - RepoName: &repo, - } - - layers := make([]*gql_generated.LayerSummary, 0) - - for _, l := range image.Layers { - size := l.Size - digest := l.Digest - - layerInfo := &gql_generated.LayerSummary{Digest: &digest, Size: &size} - - layers = append(layers, layerInfo) - } - - imageSummary.Layers = layers - imageSummary.Size = &size - images = append(images, imageSummary) - } - - repoInfo.Summary = summary - repoInfo.Images = images - - return repoInfo, nil + return repoInfo, err } // GlobalSearch is the resolver for the GlobalSearch field. -func (r *queryResolver) GlobalSearch(ctx context.Context, query string) (*gql_generated.GlobalSearchResult, error) { - query = cleanQuerry(query) - defaultStore := r.storeController.DefaultStore - olu := common.NewBaseOciLayoutUtils(r.storeController, r.log) - - var name, tag string - - _, err := fmt.Sscanf(query, "%s %s", &name, &tag) - if err != nil { - name = query - } - - repoList, err := defaultStore.GetRepositories() - if err != nil { - r.log.Error().Err(err).Msg("unable to search repositories") - +func (r *queryResolver) GlobalSearch(ctx context.Context, query string, filter *gql_generated.Filter, requestedPage *gql_generated.PageInput) (*gql_generated.GlobalSearchResult, error) { + if err := validateGlobalSearchInput(query, filter, requestedPage); err != nil { return &gql_generated.GlobalSearchResult{}, err } - availableRepos, err := userAvailableRepos(ctx, repoList) - if err != nil { - r.log.Error().Err(err).Msg("unable to filter user available repositories") + query = cleanQuery(query) + filter = cleanFilter(filter) - return &gql_generated.GlobalSearchResult{}, err - } - - repos, images, layers := globalSearch(availableRepos, name, tag, olu, r.cveInfo, r.log) + repos, images, layers, err := globalSearch(ctx, query, r.repoDB, filter, requestedPage, r.cveInfo, r.log) return &gql_generated.GlobalSearchResult{ Images: images, Repos: repos, Layers: layers, - }, nil + }, err } // DependencyListForImage is the resolver for the DependencyListForImage field. @@ -563,23 +430,12 @@ func (r *queryResolver) BaseImageList(ctx context.Context, image string) ([]*gql // Image is the resolver for the Image field. func (r *queryResolver) Image(ctx context.Context, image string) (*gql_generated.ImageSummary, error) { repo, tag := common.GetImageDirAndTag(image) - layoutUtils := common.NewBaseOciLayoutUtils(r.storeController, r.log) if tag == "" { return &gql_generated.ImageSummary{}, gqlerror.Errorf("no reference provided") } - digest, manifest, imageConfig, err := extractImageDetails(ctx, layoutUtils, repo, tag, r.log) - if err != nil { - r.log.Error().Err(err).Msg("unable to get image details") - - return nil, err - } - - isSigned := layoutUtils.CheckManifestSignature(repo, digest) - result := BuildImageInfo(repo, tag, digest, *manifest, *imageConfig, isSigned) - - return result, nil + return getImageSummary(ctx, repo, tag, r.repoDB, r.cveInfo, r.log) } // Referrers is the resolver for the Referrers field. diff --git a/pkg/meta/repodb/boltdb-wrapper/boltdb_wrapper.go b/pkg/meta/repodb/boltdb-wrapper/boltdb_wrapper.go new file mode 100644 index 00000000..00997119 --- /dev/null +++ b/pkg/meta/repodb/boltdb-wrapper/boltdb_wrapper.go @@ -0,0 +1,893 @@ +package bolt + +import ( + "context" + "encoding/json" + "os" + "path" + "strings" + "time" + + godigest "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/rs/zerolog" + bolt "go.etcd.io/bbolt" + + zerr "zotregistry.io/zot/errors" + "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" + "zotregistry.io/zot/pkg/meta/repodb/common" + "zotregistry.io/zot/pkg/meta/repodb/version" + localCtx "zotregistry.io/zot/pkg/requestcontext" +) + +type DBParameters struct { + RootDir string +} + +type DBWrapper struct { + DB *bolt.DB + Patches []func(DB *bolt.DB) error + Log log.Logger +} + +func NewBoltDBWrapper(params DBParameters) (*DBWrapper, error) { + const perms = 0o600 + + boltDB, err := bolt.Open(path.Join(params.RootDir, "repo.db"), perms, &bolt.Options{Timeout: time.Second * 10}) + if err != nil { + return nil, err + } + + err = boltDB.Update(func(transaction *bolt.Tx) error { + versionBuck, err := transaction.CreateBucketIfNotExists([]byte(repodb.VersionBucket)) + if err != nil { + return err + } + + err = versionBuck.Put([]byte(version.DBVersionKey), []byte(version.CurrentVersion)) + if err != nil { + return err + } + + _, err = transaction.CreateBucketIfNotExists([]byte(repodb.ManifestDataBucket)) + if err != nil { + return err + } + + _, err = transaction.CreateBucketIfNotExists([]byte(repodb.RepoMetadataBucket)) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return nil, err + } + + return &DBWrapper{ + DB: boltDB, + Patches: version.GetBoltDBPatches(), + Log: log.Logger{Logger: zerolog.New(os.Stdout)}, + }, nil +} + +func (bdw DBWrapper) SetManifestData(manifestDigest godigest.Digest, manifestData repodb.ManifestData) error { + err := bdw.DB.Update(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.ManifestDataBucket)) + + mdBlob, err := json.Marshal(manifestData) + if err != nil { + return errors.Wrapf(err, "repodb: error while calculating blob for manifest with digest %s", manifestDigest) + } + + err = buck.Put([]byte(manifestDigest), mdBlob) + if err != nil { + return errors.Wrapf(err, "repodb: error while setting manifest data with for digest %s", manifestDigest) + } + + return nil + }) + + return err +} + +func (bdw DBWrapper) GetManifestData(manifestDigest godigest.Digest) (repodb.ManifestData, error) { + var manifestData repodb.ManifestData + + err := bdw.DB.View(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.ManifestDataBucket)) + + mdBlob := buck.Get([]byte(manifestDigest)) + + if len(mdBlob) == 0 { + return zerr.ErrManifestDataNotFound + } + + err := json.Unmarshal(mdBlob, &manifestData) + if err != nil { + return errors.Wrapf(err, "repodb: error while unmashaling manifest meta for digest %s", manifestDigest) + } + + return nil + }) + + return manifestData, err +} + +func (bdw DBWrapper) SetManifestMeta(repo string, manifestDigest godigest.Digest, manifestMeta repodb.ManifestMetadata, +) error { + err := bdw.DB.Update(func(tx *bolt.Tx) error { + dataBuck := tx.Bucket([]byte(repodb.ManifestDataBucket)) + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMeta := repodb.RepoMetadata{ + Name: repo, + Tags: map[string]repodb.Descriptor{}, + Statistics: map[string]repodb.DescriptorStatistics{}, + Signatures: map[string]repodb.ManifestSignatures{}, + } + + repoMetaBlob := repoBuck.Get([]byte(repo)) + if len(repoMetaBlob) > 0 { + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + } + + mdBlob, err := json.Marshal(repodb.ManifestData{ + ManifestBlob: manifestMeta.ManifestBlob, + ConfigBlob: manifestMeta.ConfigBlob, + }) + if err != nil { + return errors.Wrapf(err, "repodb: error while calculating blob for manifest with digest %s", manifestDigest) + } + + err = dataBuck.Put([]byte(manifestDigest), mdBlob) + if err != nil { + return errors.Wrapf(err, "repodb: error while setting manifest meta with for digest %s", manifestDigest) + } + + updatedRepoMeta := common.UpdateManifestMeta(repoMeta, manifestDigest, manifestMeta) + + updatedRepoMetaBlob, err := json.Marshal(updatedRepoMeta) + if err != nil { + return errors.Wrapf(err, "repodb: error while calculating blob for updated repo meta '%s'", repo) + } + + return repoBuck.Put([]byte(repo), updatedRepoMetaBlob) + }) + + return err +} + +func (bdw DBWrapper) GetManifestMeta(repo string, manifestDigest godigest.Digest) (repodb.ManifestMetadata, error) { + var manifestMetadata repodb.ManifestMetadata + + err := bdw.DB.View(func(tx *bolt.Tx) error { + dataBuck := tx.Bucket([]byte(repodb.ManifestDataBucket)) + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + mdBlob := dataBuck.Get([]byte(manifestDigest)) + + if len(mdBlob) == 0 { + return zerr.ErrManifestMetaNotFound + } + + var manifestData repodb.ManifestData + + err := json.Unmarshal(mdBlob, &manifestData) + if err != nil { + return errors.Wrapf(err, "repodb: error while unmashaling manifest meta for digest %s", manifestDigest) + } + + var repoMeta repodb.RepoMetadata + + repoMetaBlob := repoBuck.Get([]byte(repo)) + if len(repoMetaBlob) > 0 { + err = json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return errors.Wrapf(err, "repodb: error while unmashaling manifest meta for digest %s", manifestDigest) + } + } + + manifestMetadata.ManifestBlob = manifestData.ManifestBlob + manifestMetadata.ConfigBlob = manifestData.ConfigBlob + manifestMetadata.DownloadCount = repoMeta.Statistics[manifestDigest.String()].DownloadCount + + manifestMetadata.Signatures = repodb.ManifestSignatures{} + if repoMeta.Signatures[manifestDigest.String()] != nil { + manifestMetadata.Signatures = repoMeta.Signatures[manifestDigest.String()] + } + + return nil + }) + + return manifestMetadata, err +} + +func (bdw DBWrapper) SetRepoTag(repo string, tag string, manifestDigest godigest.Digest, + mediaType string, +) error { + if err := common.ValidateRepoTagInput(repo, tag, manifestDigest); err != nil { + return err + } + + err := bdw.DB.Update(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMetaBlob := buck.Get([]byte(repo)) + + // object not found + if len(repoMetaBlob) == 0 { + // create a new object + repoMeta := repodb.RepoMetadata{ + Name: repo, + Tags: map[string]repodb.Descriptor{ + tag: { + Digest: manifestDigest.String(), + MediaType: mediaType, + }, + }, + Statistics: map[string]repodb.DescriptorStatistics{ + manifestDigest.String(): {DownloadCount: 0}, + }, + Signatures: map[string]repodb.ManifestSignatures{ + manifestDigest.String(): {}, + }, + } + + repoMetaBlob, err := json.Marshal(repoMeta) + if err != nil { + return err + } + + return buck.Put([]byte(repo), repoMetaBlob) + } + + // object found + var repoMeta repodb.RepoMetadata + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + repoMeta.Tags[tag] = repodb.Descriptor{ + Digest: manifestDigest.String(), + MediaType: mediaType, + } + + repoMetaBlob, err = json.Marshal(repoMeta) + if err != nil { + return err + } + + return buck.Put([]byte(repo), repoMetaBlob) + }) + + return err +} + +func (bdw DBWrapper) GetRepoMeta(repo string) (repodb.RepoMetadata, error) { + var repoMeta repodb.RepoMetadata + + err := bdw.DB.Update(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMetaBlob := buck.Get([]byte(repo)) + + // object not found + if repoMetaBlob == nil { + return zerr.ErrRepoMetaNotFound + } + + // object found + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + return nil + }) + + return repoMeta, err +} + +func (bdw DBWrapper) DeleteRepoTag(repo string, tag string) error { + err := bdw.DB.Update(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMetaBlob := buck.Get([]byte(repo)) + + // object not found + if repoMetaBlob == nil { + return nil + } + + // object found + var repoMeta repodb.RepoMetadata + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + delete(repoMeta.Tags, tag) + + if len(repoMeta.Tags) == 0 { + return buck.Delete([]byte(repo)) + } + + repoMetaBlob, err = json.Marshal(repoMeta) + if err != nil { + return err + } + + return buck.Put([]byte(repo), repoMetaBlob) + }) + + return err +} + +func (bdw DBWrapper) IncrementRepoStars(repo string) error { + err := bdw.DB.Update(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMetaBlob := buck.Get([]byte(repo)) + if repoMetaBlob == nil { + return zerr.ErrRepoMetaNotFound + } + + var repoMeta repodb.RepoMetadata + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + repoMeta.Stars++ + + repoMetaBlob, err = json.Marshal(repoMeta) + if err != nil { + return err + } + + return buck.Put([]byte(repo), repoMetaBlob) + }) + + return err +} + +func (bdw DBWrapper) DecrementRepoStars(repo string) error { + err := bdw.DB.Update(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMetaBlob := buck.Get([]byte(repo)) + if repoMetaBlob == nil { + return zerr.ErrRepoMetaNotFound + } + + var repoMeta repodb.RepoMetadata + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + if repoMeta.Stars > 0 { + repoMeta.Stars-- + } + + repoMetaBlob, err = json.Marshal(repoMeta) + if err != nil { + return err + } + + return buck.Put([]byte(repo), repoMetaBlob) + }) + + return err +} + +func (bdw DBWrapper) GetRepoStars(repo string) (int, error) { + stars := 0 + + err := bdw.DB.View(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + buck.Get([]byte(repo)) + repoMetaBlob := buck.Get([]byte(repo)) + if repoMetaBlob == nil { + return zerr.ErrRepoMetaNotFound + } + + var repoMeta repodb.RepoMetadata + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + stars = repoMeta.Stars + + return nil + }) + + return stars, err +} + +func (bdw DBWrapper) GetMultipleRepoMeta(ctx context.Context, filter func(repoMeta repodb.RepoMetadata) bool, + requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, error) { + var ( + foundRepos = make([]repodb.RepoMetadata, 0) + pageFinder repodb.PageFinder + ) + + pageFinder, err := repodb.NewBaseRepoPageFinder(requestedPage.Limit, requestedPage.Offset, requestedPage.SortBy) + if err != nil { + return nil, err + } + + err = bdw.DB.View(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + cursor := buck.Cursor() + + for repoName, repoMetaBlob := cursor.First(); repoName != nil; repoName, repoMetaBlob = cursor.Next() { + if ok, err := localCtx.RepoIsUserAvailable(ctx, string(repoName)); !ok || err != nil { + continue + } + + repoMeta := repodb.RepoMetadata{} + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + if filter(repoMeta) { + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repoMeta, + }) + } + } + + foundRepos = pageFinder.Page() + + return nil + }) + + return foundRepos, err +} + +func (bdw DBWrapper) IncrementImageDownloads(repo string, reference string) error { + err := bdw.DB.Update(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMetaBlob := buck.Get([]byte(repo)) + if repoMetaBlob == nil { + return zerr.ErrManifestMetaNotFound + } + + var repoMeta repodb.RepoMetadata + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + manifestDigest := reference + + if !common.ReferenceIsDigest(reference) { + // search digest for tag + descriptor, found := repoMeta.Tags[reference] + + if !found { + return zerr.ErrManifestMetaNotFound + } + + manifestDigest = descriptor.Digest + } + + manifestStatistics := repoMeta.Statistics[manifestDigest] + manifestStatistics.DownloadCount++ + repoMeta.Statistics[manifestDigest] = manifestStatistics + + repoMetaBlob, err = json.Marshal(repoMeta) + if err != nil { + return err + } + + return buck.Put([]byte(repo), repoMetaBlob) + }) + + return err +} + +func (bdw DBWrapper) AddManifestSignature(repo string, signedManifestDigest godigest.Digest, + sygMeta repodb.SignatureMetadata, +) error { + err := bdw.DB.Update(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMetaBlob := buck.Get([]byte(repo)) + if repoMetaBlob == nil { + return zerr.ErrManifestMetaNotFound + } + + var repoMeta repodb.RepoMetadata + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + var ( + manifestSignatures repodb.ManifestSignatures + found bool + ) + + if manifestSignatures, found = repoMeta.Signatures[signedManifestDigest.String()]; !found { + manifestSignatures = repodb.ManifestSignatures{} + } + + signatureSlice := manifestSignatures[sygMeta.SignatureType] + if !common.SignatureAlreadyExists(signatureSlice, sygMeta) { + if sygMeta.SignatureType == repodb.NotationType { + signatureSlice = append(signatureSlice, repodb.SignatureInfo{ + SignatureManifestDigest: sygMeta.SignatureDigest, + LayersInfo: sygMeta.LayersInfo, + }) + } else if sygMeta.SignatureType == repodb.CosignType { + signatureSlice = []repodb.SignatureInfo{{ + SignatureManifestDigest: sygMeta.SignatureDigest, + LayersInfo: sygMeta.LayersInfo, + }} + } + } + + manifestSignatures[sygMeta.SignatureType] = signatureSlice + + repoMeta.Signatures[signedManifestDigest.String()] = manifestSignatures + + repoMetaBlob, err = json.Marshal(repoMeta) + if err != nil { + return err + } + + return buck.Put([]byte(repo), repoMetaBlob) + }) + + return err +} + +func (bdw DBWrapper) DeleteSignature(repo string, signedManifestDigest godigest.Digest, + sigMeta repodb.SignatureMetadata, +) error { + err := bdw.DB.Update(func(tx *bolt.Tx) error { + buck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMetaBlob := buck.Get([]byte(repo)) + if repoMetaBlob == nil { + return zerr.ErrManifestMetaNotFound + } + + var repoMeta repodb.RepoMetadata + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + sigType := sigMeta.SignatureType + + var ( + manifestSignatures repodb.ManifestSignatures + found bool + ) + + if manifestSignatures, found = repoMeta.Signatures[signedManifestDigest.String()]; !found { + return zerr.ErrManifestMetaNotFound + } + + signatureSlice := manifestSignatures[sigType] + + newSignatureSlice := make([]repodb.SignatureInfo, 0, len(signatureSlice)-1) + + for _, sigDigest := range signatureSlice { + if sigDigest.SignatureManifestDigest != sigMeta.SignatureDigest { + newSignatureSlice = append(newSignatureSlice, sigDigest) + } + } + + manifestSignatures[sigType] = newSignatureSlice + + repoMeta.Signatures[signedManifestDigest.String()] = manifestSignatures + + repoMetaBlob, err = json.Marshal(repoMeta) + if err != nil { + return err + } + + return buck.Put([]byte(repo), repoMetaBlob) + }) + + return err +} + +func (bdw DBWrapper) SearchRepos(ctx context.Context, searchText string, filter repodb.Filter, + requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + var ( + foundRepos = make([]repodb.RepoMetadata, 0) + foundManifestMetadataMap = make(map[string]repodb.ManifestMetadata) + pageFinder repodb.PageFinder + ) + + pageFinder, err := repodb.NewBaseRepoPageFinder(requestedPage.Limit, requestedPage.Offset, requestedPage.SortBy) + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, err + } + + err = bdw.DB.View(func(tx *bolt.Tx) error { + var ( + manifestMetadataMap = make(map[string]repodb.ManifestMetadata) + repoBuck = tx.Bucket([]byte(repodb.RepoMetadataBucket)) + dataBuck = tx.Bucket([]byte(repodb.ManifestDataBucket)) + ) + + cursor := repoBuck.Cursor() + + for repoName, repoMetaBlob := cursor.First(); repoName != nil; repoName, repoMetaBlob = cursor.Next() { + if ok, err := localCtx.RepoIsUserAvailable(ctx, string(repoName)); !ok || err != nil { + continue + } + + var repoMeta repodb.RepoMetadata + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + if score := common.ScoreRepoName(searchText, string(repoName)); score != -1 { + var ( + // specific values used for sorting that need to be calculated based on all manifests from the repo + repoDownloads = 0 + repoLastUpdated time.Time + firstImageChecked = true + osSet = map[string]bool{} + archSet = map[string]bool{} + isSigned = false + ) + + for _, descriptor := range repoMeta.Tags { + var manifestMeta repodb.ManifestMetadata + + manifestMeta, manifestDownloaded := manifestMetadataMap[descriptor.Digest] + + if !manifestDownloaded { + manifestMetaBlob := dataBuck.Get([]byte(descriptor.Digest)) + if manifestMetaBlob == nil { + return zerr.ErrManifestMetaNotFound + } + + err := json.Unmarshal(manifestMetaBlob, &manifestMeta) + if err != nil { + return errors.Wrapf(err, "repodb: error while unmarshaling manifest metadata for digest %s", descriptor.Digest) + } + } + + // get fields related to filtering + var configContent ispec.Image + + err = json.Unmarshal(manifestMeta.ConfigBlob, &configContent) + if err != nil { + return errors.Wrapf(err, "repodb: error while unmarshaling config content for digest %s", descriptor.Digest) + } + + osSet[configContent.OS] = true + archSet[configContent.Architecture] = true + + // get fields related to sorting + repoDownloads += repoMeta.Statistics[descriptor.Digest].DownloadCount + + imageLastUpdated := common.GetImageLastUpdatedTimestamp(configContent) + + if firstImageChecked || repoLastUpdated.Before(imageLastUpdated) { + repoLastUpdated = imageLastUpdated + firstImageChecked = false + + isSigned = common.CheckIsSigned(repoMeta.Signatures[descriptor.Digest]) + } + + manifestMetadataMap[descriptor.Digest] = manifestMeta + } + + repoFilterData := repodb.FilterData{ + OsList: common.GetMapKeys(osSet), + ArchList: common.GetMapKeys(archSet), + IsSigned: isSigned, + } + + if !common.AcceptedByFilter(filter, repoFilterData) { + continue + } + + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repoMeta, + Score: score, + Downloads: repoDownloads, + UpdateTime: repoLastUpdated, + }) + } + } + + foundRepos = pageFinder.Page() + + // keep just the manifestMeta we need + for _, repoMeta := range foundRepos { + for _, manifestDigest := range repoMeta.Tags { + foundManifestMetadataMap[manifestDigest.Digest] = manifestMetadataMap[manifestDigest.Digest] + } + } + + return nil + }) + + return foundRepos, foundManifestMetadataMap, err +} + +func (bdw DBWrapper) SearchTags(ctx context.Context, searchText string, filter repodb.Filter, + requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + var ( + foundRepos = make([]repodb.RepoMetadata, 0) + foundManifestMetadataMap = make(map[string]repodb.ManifestMetadata) + + pageFinder repodb.PageFinder + ) + + pageFinder, err := repodb.NewBaseImagePageFinder(requestedPage.Limit, requestedPage.Offset, requestedPage.SortBy) + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, err + } + + searchedRepo, searchedTag, err := common.GetRepoTag(searchText) + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, + errors.Wrap(err, "repodb: error while parsing search text, invalid format") + } + + err = bdw.DB.View(func(tx *bolt.Tx) error { + var ( + manifestMetadataMap = make(map[string]repodb.ManifestMetadata) + repoBuck = tx.Bucket([]byte(repodb.RepoMetadataBucket)) + dataBuck = tx.Bucket([]byte(repodb.ManifestDataBucket)) + cursor = repoBuck.Cursor() + ) + + repoName, repoMetaBlob := cursor.Seek([]byte(searchedRepo)) + + for ; repoName != nil; repoName, repoMetaBlob = cursor.Next() { + if ok, err := localCtx.RepoIsUserAvailable(ctx, string(repoName)); !ok || err != nil { + continue + } + + repoMeta := repodb.RepoMetadata{} + + err := json.Unmarshal(repoMetaBlob, &repoMeta) + if err != nil { + return err + } + + if string(repoName) == searchedRepo { + matchedTags := make(map[string]repodb.Descriptor) + // take all manifestMetas + for tag, descriptor := range repoMeta.Tags { + if !strings.HasPrefix(tag, searchedTag) { + continue + } + + matchedTags[tag] = descriptor + + // in case tags reference the same manifest we don't download from DB multiple times + if manifestMeta, manifestExists := manifestMetadataMap[descriptor.Digest]; manifestExists { + manifestMetadataMap[descriptor.Digest] = manifestMeta + + continue + } + + manifestMetaBlob := dataBuck.Get([]byte(descriptor.Digest)) + if manifestMetaBlob == nil { + return zerr.ErrManifestMetaNotFound + } + + var manifestMeta repodb.ManifestMetadata + + err := json.Unmarshal(manifestMetaBlob, &manifestMeta) + if err != nil { + return errors.Wrapf(err, "repodb: error while unmashaling manifest metadata for digest %s", descriptor.Digest) + } + + var configContent ispec.Image + + err = json.Unmarshal(manifestMeta.ConfigBlob, &configContent) + if err != nil { + return errors.Wrapf(err, "repodb: error while unmashaling manifest metadata for digest %s", descriptor.Digest) + } + + imageFilterData := repodb.FilterData{ + OsList: []string{configContent.OS}, + ArchList: []string{configContent.Architecture}, + IsSigned: false, + } + + if !common.AcceptedByFilter(filter, imageFilterData) { + delete(matchedTags, tag) + delete(manifestMetadataMap, descriptor.Digest) + + continue + } + + manifestMetadataMap[descriptor.Digest] = manifestMeta + } + + repoMeta.Tags = matchedTags + + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repoMeta, + }) + } + } + + foundRepos = pageFinder.Page() + + // keep just the manifestMeta we need + for _, repoMeta := range foundRepos { + for _, descriptor := range repoMeta.Tags { + foundManifestMetadataMap[descriptor.Digest] = manifestMetadataMap[descriptor.Digest] + } + } + + return nil + }) + + return foundRepos, foundManifestMetadataMap, err +} + +func (bdw *DBWrapper) PatchDB() error { + var DBVersion string + + err := bdw.DB.View(func(tx *bolt.Tx) error { + versionBuck := tx.Bucket([]byte(repodb.VersionBucket)) + DBVersion = string(versionBuck.Get([]byte(version.DBVersionKey))) + + return nil + }) + if err != nil { + return errors.Wrapf(err, "patching the database failed, can't read db version") + } + + if version.GetVersionIndex(DBVersion) == -1 { + return errors.New("DB has broken format, no version found") + } + + for patchIndex, patch := range bdw.Patches { + if patchIndex < version.GetVersionIndex(DBVersion) { + continue + } + + err := patch(bdw.DB) + if err != nil { + return err + } + } + + return nil +} diff --git a/pkg/meta/repodb/boltdb-wrapper/boltdb_wrapper_test.go b/pkg/meta/repodb/boltdb-wrapper/boltdb_wrapper_test.go new file mode 100644 index 00000000..bb1d55c6 --- /dev/null +++ b/pkg/meta/repodb/boltdb-wrapper/boltdb_wrapper_test.go @@ -0,0 +1,479 @@ +package bolt_test + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + . "github.com/smartystreets/goconvey/convey" + "go.etcd.io/bbolt" + + "zotregistry.io/zot/pkg/meta/repodb" + bolt "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" +) + +func TestWrapperErrors(t *testing.T) { + Convey("Errors", t, func() { + tmpDir := t.TempDir() + boltDBParams := bolt.DBParameters{RootDir: tmpDir} + boltdbWrapper, err := bolt.NewBoltDBWrapper(boltDBParams) + defer os.Remove("repo.db") + So(boltdbWrapper, ShouldNotBeNil) + So(err, ShouldBeNil) + + repoMeta := repodb.RepoMetadata{ + Tags: map[string]repodb.Descriptor{}, + Signatures: map[string]repodb.ManifestSignatures{}, + } + + repoMetaBlob, err := json.Marshal(repoMeta) + So(err, ShouldBeNil) + + Convey("GetManifestData", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + dataBuck := tx.Bucket([]byte(repodb.ManifestDataBucket)) + + return dataBuck.Put([]byte("digest1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + _, err = boltdbWrapper.GetManifestData("digest1") + So(err, ShouldNotBeNil) + + _, err = boltdbWrapper.GetManifestMeta("repo1", "digest1") + So(err, ShouldNotBeNil) + }) + + Convey("SetManifestMeta", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + dataBuck := tx.Bucket([]byte(repodb.ManifestDataBucket)) + + err := dataBuck.Put([]byte("digest1"), repoMetaBlob) + if err != nil { + return err + } + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.SetManifestMeta("repo1", "digest1", repodb.ManifestMetadata{}) + So(err, ShouldNotBeNil) + + _, err = boltdbWrapper.GetManifestMeta("repo1", "digest1") + So(err, ShouldNotBeNil) + }) + + Convey("SetRepoTag", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.SetRepoTag("repo1", "tag", "digest", ispec.MediaTypeImageManifest) + So(err, ShouldNotBeNil) + }) + + Convey("DeleteRepoTag", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.DeleteRepoTag("repo1", "tag") + So(err, ShouldNotBeNil) + }) + + Convey("IncrementRepoStars", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.IncrementRepoStars("repo2") + So(err, ShouldNotBeNil) + + err = boltdbWrapper.IncrementRepoStars("repo1") + So(err, ShouldNotBeNil) + }) + + Convey("DecrementRepoStars", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.DecrementRepoStars("repo2") + So(err, ShouldNotBeNil) + + err = boltdbWrapper.DecrementRepoStars("repo1") + So(err, ShouldNotBeNil) + }) + + Convey("GetRepoStars", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + _, err = boltdbWrapper.GetRepoStars("repo1") + So(err, ShouldNotBeNil) + }) + + Convey("GetMultipleRepoMeta", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + _, err = boltdbWrapper.GetMultipleRepoMeta(context.TODO(), func(repoMeta repodb.RepoMetadata) bool { + return true + }, repodb.PageInput{}) + So(err, ShouldNotBeNil) + }) + + Convey("IncrementImageDownloads", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.IncrementImageDownloads("repo2", "tag") + So(err, ShouldNotBeNil) + + err = boltdbWrapper.IncrementImageDownloads("repo1", "tag") + So(err, ShouldNotBeNil) + + err = boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), repoMetaBlob) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.IncrementImageDownloads("repo1", "tag") + So(err, ShouldNotBeNil) + }) + + Convey("AddManifestSignature", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.AddManifestSignature("repo2", digest.FromString("dig"), + repodb.SignatureMetadata{}) + So(err, ShouldNotBeNil) + + err = boltdbWrapper.AddManifestSignature("repo1", digest.FromString("dig"), + repodb.SignatureMetadata{}) + So(err, ShouldNotBeNil) + + err = boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), repoMetaBlob) + }) + So(err, ShouldBeNil) + + // signatures not found + err = boltdbWrapper.AddManifestSignature("repo1", digest.FromString("dig"), + repodb.SignatureMetadata{}) + So(err, ShouldBeNil) + + // + err = boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMeta := repodb.RepoMetadata{ + Tags: map[string]repodb.Descriptor{}, + Signatures: map[string]repodb.ManifestSignatures{ + "digest1": { + "cosgin": {{}}, + }, + "digest2": { + "notation": {{}}, + }, + }, + } + + repoMetaBlob, err := json.Marshal(repoMeta) + So(err, ShouldBeNil) + + return repoBuck.Put([]byte("repo1"), repoMetaBlob) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.AddManifestSignature("repo1", digest.FromString("dig"), + repodb.SignatureMetadata{ + SignatureType: "cosign", + SignatureDigest: "digest1", + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.AddManifestSignature("repo1", digest.FromString("dig"), + repodb.SignatureMetadata{ + SignatureType: "notation", + SignatureDigest: "digest2", + }) + So(err, ShouldBeNil) + }) + + Convey("DeleteSignature", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.DeleteSignature("repo2", digest.FromString("dig"), + repodb.SignatureMetadata{}) + So(err, ShouldNotBeNil) + + err = boltdbWrapper.DeleteSignature("repo1", digest.FromString("dig"), + repodb.SignatureMetadata{}) + So(err, ShouldNotBeNil) + + err = boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + repoMeta := repodb.RepoMetadata{ + Tags: map[string]repodb.Descriptor{}, + Signatures: map[string]repodb.ManifestSignatures{ + "digest1": { + "cosgin": []repodb.SignatureInfo{ + { + SignatureManifestDigest: "sigDigest1", + }, + { + SignatureManifestDigest: "sigDigest2", + }, + }, + }, + "digest2": { + "notation": {{}}, + }, + }, + } + + repoMetaBlob, err := json.Marshal(repoMeta) + So(err, ShouldBeNil) + + return repoBuck.Put([]byte("repo1"), repoMetaBlob) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.DeleteSignature("repo1", "digest1", + repodb.SignatureMetadata{ + SignatureType: "cosgin", + SignatureDigest: "sigDigest2", + }) + So(err, ShouldBeNil) + }) + + Convey("SearchRepos", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + _, _, err = boltdbWrapper.SearchRepos(context.Background(), "", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldNotBeNil) + + err = boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + dataBuck := tx.Bucket([]byte(repodb.ManifestDataBucket)) + + err := dataBuck.Put([]byte("dig1"), []byte("wrong json")) + if err != nil { + return err + } + + repoMeta := repodb.RepoMetadata{ + Tags: map[string]repodb.Descriptor{ + "tag1": {Digest: "dig1", MediaType: ispec.MediaTypeImageManifest}, + }, + Signatures: map[string]repodb.ManifestSignatures{}, + } + repoMetaBlob, err := json.Marshal(repoMeta) + So(err, ShouldBeNil) + + err = repoBuck.Put([]byte("repo1"), repoMetaBlob) + if err != nil { + return err + } + + repoMeta = repodb.RepoMetadata{ + Tags: map[string]repodb.Descriptor{ + "tag2": {Digest: "dig2", MediaType: ispec.MediaTypeImageManifest}, + }, + Signatures: map[string]repodb.ManifestSignatures{}, + } + repoMetaBlob, err = json.Marshal(repoMeta) + So(err, ShouldBeNil) + + return repoBuck.Put([]byte("repo2"), repoMetaBlob) + }) + So(err, ShouldBeNil) + + _, _, err = boltdbWrapper.SearchRepos(context.Background(), "repo1", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldNotBeNil) + + _, _, err = boltdbWrapper.SearchRepos(context.Background(), "repo2", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldNotBeNil) + + err = boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + dataBuck := tx.Bucket([]byte(repodb.ManifestDataBucket)) + + manifestMeta := repodb.ManifestMetadata{ + ManifestBlob: []byte("{}"), + ConfigBlob: []byte("wrong json"), + Signatures: repodb.ManifestSignatures{}, + } + + manifestMetaBlob, err := json.Marshal(manifestMeta) + if err != nil { + return err + } + + err = dataBuck.Put([]byte("dig1"), manifestMetaBlob) + if err != nil { + return err + } + + repoMeta = repodb.RepoMetadata{ + Tags: map[string]repodb.Descriptor{ + "tag1": {Digest: "dig1", MediaType: ispec.MediaTypeImageManifest}, + }, + Signatures: map[string]repodb.ManifestSignatures{}, + } + repoMetaBlob, err = json.Marshal(repoMeta) + So(err, ShouldBeNil) + + return repoBuck.Put([]byte("repo1"), repoMetaBlob) + }) + So(err, ShouldBeNil) + + _, _, err = boltdbWrapper.SearchRepos(context.Background(), "repo1", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldNotBeNil) + }) + + Convey("SearchTags", func() { + ctx := context.Background() + + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + + return repoBuck.Put([]byte("repo1"), []byte("wrong json")) + }) + So(err, ShouldBeNil) + + _, _, err = boltdbWrapper.SearchTags(ctx, "", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldNotBeNil) + + _, _, err = boltdbWrapper.SearchTags(ctx, "repo1:", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldNotBeNil) + + err = boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + repoBuck := tx.Bucket([]byte(repodb.RepoMetadataBucket)) + dataBuck := tx.Bucket([]byte(repodb.ManifestDataBucket)) + + manifestMeta := repodb.ManifestMetadata{ + ManifestBlob: []byte("{}"), + ConfigBlob: []byte("wrong json"), + Signatures: repodb.ManifestSignatures{}, + } + + manifestMetaBlob, err := json.Marshal(manifestMeta) + if err != nil { + return err + } + + err = dataBuck.Put([]byte("dig1"), manifestMetaBlob) + if err != nil { + return err + } + + err = dataBuck.Put([]byte("wrongManifestData"), []byte("wrong json")) + if err != nil { + return err + } + + // manifest data doesn't exist + repoMeta = repodb.RepoMetadata{ + Tags: map[string]repodb.Descriptor{ + "tag2": {Digest: "dig2", MediaType: ispec.MediaTypeImageManifest}, + }, + Signatures: map[string]repodb.ManifestSignatures{}, + } + repoMetaBlob, err = json.Marshal(repoMeta) + So(err, ShouldBeNil) + + err = repoBuck.Put([]byte("repo1"), repoMetaBlob) + if err != nil { + return err + } + + // manifest data is wrong + repoMeta = repodb.RepoMetadata{ + Tags: map[string]repodb.Descriptor{ + "tag2": {Digest: "wrongManifestData", MediaType: ispec.MediaTypeImageManifest}, + }, + Signatures: map[string]repodb.ManifestSignatures{}, + } + repoMetaBlob, err = json.Marshal(repoMeta) + So(err, ShouldBeNil) + + err = repoBuck.Put([]byte("repo2"), repoMetaBlob) + if err != nil { + return err + } + + repoMeta = repodb.RepoMetadata{ + Tags: map[string]repodb.Descriptor{ + "tag1": {Digest: "dig1", MediaType: ispec.MediaTypeImageManifest}, + }, + Signatures: map[string]repodb.ManifestSignatures{}, + } + repoMetaBlob, err = json.Marshal(repoMeta) + So(err, ShouldBeNil) + + return repoBuck.Put([]byte("repo3"), repoMetaBlob) + }) + So(err, ShouldBeNil) + + _, _, err = boltdbWrapper.SearchTags(ctx, "repo1:", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldNotBeNil) + + _, _, err = boltdbWrapper.SearchTags(ctx, "repo2:", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldNotBeNil) + + _, _, err = boltdbWrapper.SearchTags(ctx, "repo3:", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldNotBeNil) + }) + }) +} diff --git a/pkg/meta/repodb/common.go b/pkg/meta/repodb/common.go new file mode 100644 index 00000000..43748208 --- /dev/null +++ b/pkg/meta/repodb/common.go @@ -0,0 +1,57 @@ +package repodb + +import ( + "time" +) + +// DetailedRepoMeta is a auxiliary structure used for sorting RepoMeta arrays by information +// that's not directly available in the RepoMetadata structure (ex. that needs to be calculated +// by iterating the manifests, etc.) +type DetailedRepoMeta struct { + RepoMeta RepoMetadata + Score int + Downloads int + UpdateTime time.Time +} + +func SortFunctions() map[SortCriteria]func(pageBuffer []DetailedRepoMeta) func(i, j int) bool { + return map[SortCriteria]func(pageBuffer []DetailedRepoMeta) func(i, j int) bool{ + AlphabeticAsc: SortByAlphabeticAsc, + AlphabeticDsc: SortByAlphabeticDsc, + Relevance: SortByRelevance, + UpdateTime: SortByUpdateTime, + Downloads: SortByDownloads, + } +} + +func SortByAlphabeticAsc(pageBuffer []DetailedRepoMeta) func(i, j int) bool { + return func(i, j int) bool { + return pageBuffer[i].RepoMeta.Name < pageBuffer[j].RepoMeta.Name + } +} + +func SortByAlphabeticDsc(pageBuffer []DetailedRepoMeta) func(i, j int) bool { + return func(i, j int) bool { + return pageBuffer[i].RepoMeta.Name > pageBuffer[j].RepoMeta.Name + } +} + +func SortByRelevance(pageBuffer []DetailedRepoMeta) func(i, j int) bool { + return func(i, j int) bool { + return pageBuffer[i].Score < pageBuffer[j].Score + } +} + +// SortByUpdateTime sorting descending by time. +func SortByUpdateTime(pageBuffer []DetailedRepoMeta) func(i, j int) bool { + return func(i, j int) bool { + return pageBuffer[i].UpdateTime.After(pageBuffer[j].UpdateTime) + } +} + +// SortByDownloads returns a comparison function for descendant sorting by downloads. +func SortByDownloads(pageBuffer []DetailedRepoMeta) func(i, j int) bool { + return func(i, j int) bool { + return pageBuffer[i].Downloads > pageBuffer[j].Downloads + } +} diff --git a/pkg/meta/repodb/common/common.go b/pkg/meta/repodb/common/common.go new file mode 100644 index 00000000..d76492cd --- /dev/null +++ b/pkg/meta/repodb/common/common.go @@ -0,0 +1,199 @@ +package common + +import ( + "strings" + "time" + + godigest "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + + zerr "zotregistry.io/zot/errors" + "zotregistry.io/zot/pkg/meta/repodb" +) + +func UpdateManifestMeta(repoMeta repodb.RepoMetadata, manifestDigest godigest.Digest, + manifestMeta repodb.ManifestMetadata, +) repodb.RepoMetadata { + updatedRepoMeta := repoMeta + + updatedStatistics := repoMeta.Statistics[manifestDigest.String()] + updatedStatistics.DownloadCount = manifestMeta.DownloadCount + updatedRepoMeta.Statistics[manifestDigest.String()] = updatedStatistics + + if manifestMeta.Signatures == nil { + manifestMeta.Signatures = repodb.ManifestSignatures{} + } + + updatedRepoMeta.Signatures[manifestDigest.String()] = manifestMeta.Signatures + + return updatedRepoMeta +} + +func SignatureAlreadyExists(signatureSlice []repodb.SignatureInfo, sm repodb.SignatureMetadata) bool { + for _, sigInfo := range signatureSlice { + if sm.SignatureDigest == sigInfo.SignatureManifestDigest { + return true + } + } + + return false +} + +func ReferenceIsDigest(reference string) bool { + _, err := godigest.Parse(reference) + + return err == nil +} + +func ValidateRepoTagInput(repo, tag string, manifestDigest godigest.Digest) error { + if repo == "" { + return zerr.ErrEmptyRepoName + } + + if tag == "" { + return zerr.ErrEmptyTag + } + + if manifestDigest == "" { + return zerr.ErrEmptyDigest + } + + return nil +} + +func ScoreRepoName(searchText string, repoName string) int { + searchTextSlice := strings.Split(searchText, "/") + repoNameSlice := strings.Split(repoName, "/") + + if len(searchTextSlice) > len(repoNameSlice) { + return -1 + } + + if len(searchTextSlice) == 1 { + // check if it maches first or last name in path + if index := strings.Index(repoNameSlice[len(repoNameSlice)-1], searchTextSlice[0]); index != -1 { + return index + 1 + } + + // we'll make repos that match the first name in path less important than matching the last name in path + if index := strings.Index(repoNameSlice[0], searchTextSlice[0]); index != -1 { + return (index + 1) * 10 + } + + return -1 + } + + if len(searchTextSlice) < len(repoNameSlice) && + strings.HasPrefix(repoName, searchText) { + return 1 + } + + // searchText and repoName match perfectly up until the last name in path + for i := 0; i < len(searchTextSlice)-1; i++ { + if searchTextSlice[i] != repoNameSlice[i] { + return -1 + } + } + + // check the last + if index := strings.Index(repoNameSlice[len(repoNameSlice)-1], searchTextSlice[len(searchTextSlice)-1]); index != -1 { + return (index + 1) + } + + return -1 +} + +func GetImageLastUpdatedTimestamp(configContent ispec.Image) time.Time { + var timeStamp *time.Time + + if configContent.Created != nil && !configContent.Created.IsZero() { + return *configContent.Created + } + + if len(configContent.History) != 0 { + timeStamp = configContent.History[len(configContent.History)-1].Created + } + + if timeStamp == nil { + timeStamp = &time.Time{} + } + + return *timeStamp +} + +func CheckIsSigned(signatures repodb.ManifestSignatures) bool { + for _, signatures := range signatures { + if len(signatures) > 0 { + return true + } + } + + return false +} + +func GetRepoTag(searchText string) (string, string, error) { + const repoTagCount = 2 + + splitSlice := strings.Split(searchText, ":") + + if len(splitSlice) != repoTagCount { + return "", "", zerr.ErrInvalidRepoTagFormat + } + + repo := strings.TrimSpace(splitSlice[0]) + tag := strings.TrimSpace(splitSlice[1]) + + return repo, tag, nil +} + +func GetMapKeys[K comparable, V any](genericMap map[K]V) []K { + keys := make([]K, 0, len(genericMap)) + + for k := range genericMap { + keys = append(keys, k) + } + + return keys +} + +// acceptedByFilter checks that data contains at least 1 element of each filter +// criteria(os, arch) present in filter. +func AcceptedByFilter(filter repodb.Filter, data repodb.FilterData) bool { + if filter.Arch != nil { + foundArch := false + for _, arch := range filter.Arch { + foundArch = foundArch || containsString(data.ArchList, *arch) + } + + if !foundArch { + return false + } + } + + if filter.Os != nil { + foundOs := false + for _, os := range filter.Os { + foundOs = foundOs || containsString(data.OsList, *os) + } + + if !foundOs { + return false + } + } + + if filter.HasToBeSigned != nil && *filter.HasToBeSigned != data.IsSigned { + return false + } + + return true +} + +func containsString(strSlice []string, str string) bool { + for _, val := range strSlice { + if strings.EqualFold(val, str) { + return true + } + } + + return false +} diff --git a/pkg/meta/repodb/dynamodb-wrapper/dynamo_test.go b/pkg/meta/repodb/dynamodb-wrapper/dynamo_test.go new file mode 100644 index 00000000..2f7fe636 --- /dev/null +++ b/pkg/meta/repodb/dynamodb-wrapper/dynamo_test.go @@ -0,0 +1,453 @@ +package dynamo_test + +import ( + "context" + "os" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/rs/zerolog" + . "github.com/smartystreets/goconvey/convey" + + "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" + dynamo "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper" + "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper/iterator" + dynamoParams "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper/params" +) + +func TestIterator(t *testing.T) { + const ( + endpoint = "http://localhost:4566" + region = "us-east-2" + ) + + Convey("TestIterator", t, func() { + dynamoWrapper, err := dynamo.NewDynamoDBWrapper(dynamoParams.DBDriverParameters{ + Endpoint: endpoint, + Region: region, + RepoMetaTablename: "RepoMetadataTable", + ManifestDataTablename: "ManifestDataTable", + VersionTablename: "Version", + }) + So(err, ShouldBeNil) + + So(dynamoWrapper.ResetManifestDataTable(), ShouldBeNil) + So(dynamoWrapper.ResetRepoMetaTable(), ShouldBeNil) + + err = dynamoWrapper.SetRepoTag("repo1", "tag1", "manifestType", "manifestDigest1") + So(err, ShouldBeNil) + + err = dynamoWrapper.SetRepoTag("repo2", "tag2", "manifestType", "manifestDigest2") + So(err, ShouldBeNil) + + err = dynamoWrapper.SetRepoTag("repo3", "tag3", "manifestType", "manifestDigest3") + So(err, ShouldBeNil) + + repoMetaAttributeIterator := iterator.NewBaseDynamoAttributesIterator( + dynamoWrapper.Client, + "RepoMetadataTable", + "RepoMetadata", + 1, + log.Logger{Logger: zerolog.New(os.Stdout)}, + ) + + attribute, err := repoMetaAttributeIterator.First(context.Background()) + So(err, ShouldBeNil) + So(attribute, ShouldNotBeNil) + + attribute, err = repoMetaAttributeIterator.Next(context.Background()) + So(err, ShouldBeNil) + So(attribute, ShouldNotBeNil) + + attribute, err = repoMetaAttributeIterator.Next(context.Background()) + So(err, ShouldBeNil) + So(attribute, ShouldNotBeNil) + + attribute, err = repoMetaAttributeIterator.Next(context.Background()) + So(err, ShouldBeNil) + So(attribute, ShouldBeNil) + }) +} + +func TestIteratorErrors(t *testing.T) { + Convey("errors", t, func() { + customResolver := aws.EndpointResolverWithOptionsFunc( + func(service, region string, options ...interface{}) (aws.Endpoint, error) { + return aws.Endpoint{ + PartitionID: "aws", + URL: "endpoint", + SigningRegion: region, + }, nil + }) + + cfg, err := config.LoadDefaultConfig(context.Background(), config.WithRegion("region"), + config.WithEndpointResolverWithOptions(customResolver)) + So(err, ShouldBeNil) + + repoMetaAttributeIterator := iterator.NewBaseDynamoAttributesIterator( + dynamodb.NewFromConfig(cfg), + "RepoMetadataTable", + "RepoMetadata", + 1, + log.Logger{Logger: zerolog.New(os.Stdout)}, + ) + + _, err = repoMetaAttributeIterator.First(context.Background()) + So(err, ShouldNotBeNil) + }) +} + +func TestWrapperErrors(t *testing.T) { + const ( + endpoint = "http://localhost:4566" + region = "us-east-2" + ) + + ctx := context.Background() + + Convey("Errors", t, func() { + dynamoWrapper, err := dynamo.NewDynamoDBWrapper(dynamoParams.DBDriverParameters{ //nolint:contextcheck + Endpoint: endpoint, + Region: region, + RepoMetaTablename: "RepoMetadataTable", + ManifestDataTablename: "ManifestDataTable", + VersionTablename: "Version", + }) + So(err, ShouldBeNil) + + So(dynamoWrapper.ResetManifestDataTable(), ShouldBeNil) //nolint:contextcheck + So(dynamoWrapper.ResetRepoMetaTable(), ShouldBeNil) //nolint:contextcheck + + Convey("SetManifestData", func() { + dynamoWrapper.ManifestDataTablename = "WRONG table" + + err := dynamoWrapper.SetManifestData("dig", repodb.ManifestData{}) + So(err, ShouldNotBeNil) + }) + + Convey("GetManifestData", func() { + dynamoWrapper.ManifestDataTablename = "WRONG table" + + _, err := dynamoWrapper.GetManifestData("dig") + So(err, ShouldNotBeNil) + }) + + Convey("GetManifestData unmarshal error", func() { + err := setBadManifestData(dynamoWrapper.Client, "dig") + So(err, ShouldBeNil) + + _, err = dynamoWrapper.GetManifestData("dig") + So(err, ShouldNotBeNil) + }) + + Convey("SetManifestMeta GetRepoMeta error", func() { + err := setBadRepoMeta(dynamoWrapper.Client, "repo1") + So(err, ShouldBeNil) + + err = dynamoWrapper.SetManifestMeta("repo1", "dig", repodb.ManifestMetadata{}) + So(err, ShouldNotBeNil) + }) + + Convey("GetManifestMeta GetManifestData not found error", func() { + err := dynamoWrapper.SetRepoTag("repo", "tag", "dig", "") + So(err, ShouldBeNil) + + _, err = dynamoWrapper.GetManifestMeta("repo", "dig") + So(err, ShouldNotBeNil) + }) + + Convey("GetManifestMeta GetRepoMeta Not Found error", func() { + err := dynamoWrapper.SetManifestData("dig", repodb.ManifestData{}) + So(err, ShouldBeNil) + + _, err = dynamoWrapper.GetManifestMeta("repoNotFound", "dig") + So(err, ShouldNotBeNil) + }) + + Convey("GetManifestMeta GetRepoMeta error", func() { + err := dynamoWrapper.SetManifestData("dig", repodb.ManifestData{}) + So(err, ShouldBeNil) + + err = setBadRepoMeta(dynamoWrapper.Client, "repo") + So(err, ShouldBeNil) + + _, err = dynamoWrapper.GetManifestMeta("repo", "dig") + So(err, ShouldNotBeNil) + }) + + Convey("IncrementRepoStars GetRepoMeta error", func() { + err = dynamoWrapper.IncrementRepoStars("repo") + So(err, ShouldNotBeNil) + }) + + Convey("DecrementRepoStars GetRepoMeta error", func() { + err = dynamoWrapper.DecrementRepoStars("repo") + So(err, ShouldNotBeNil) + }) + + Convey("DeleteRepoTag Client.GetItem error", func() { + strSlice := make([]string, 10000) + repoName := strings.Join(strSlice, ".") + + err = dynamoWrapper.DeleteRepoTag(repoName, "tag") + So(err, ShouldNotBeNil) + }) + + Convey("DeleteRepoTag unmarshal error", func() { + err = setBadRepoMeta(dynamoWrapper.Client, "repo") + So(err, ShouldBeNil) + + err = dynamoWrapper.DeleteRepoTag("repo", "tag") + So(err, ShouldNotBeNil) + }) + + Convey("GetRepoMeta Client.GetItem error", func() { + strSlice := make([]string, 10000) + repoName := strings.Join(strSlice, ".") + + _, err = dynamoWrapper.GetRepoMeta(repoName) + So(err, ShouldNotBeNil) + }) + + Convey("GetRepoMeta unmarshal error", func() { + err = setBadRepoMeta(dynamoWrapper.Client, "repo") + So(err, ShouldBeNil) + + _, err = dynamoWrapper.GetRepoMeta("repo") + So(err, ShouldNotBeNil) + }) + + Convey("IncrementImageDownloads GetRepoMeta error", func() { + err = dynamoWrapper.IncrementImageDownloads("repoNotFound", "") + So(err, ShouldNotBeNil) + }) + + Convey("IncrementImageDownloads tag not found error", func() { + err := dynamoWrapper.SetRepoTag("repo", "tag", "dig", "") + So(err, ShouldBeNil) + + err = dynamoWrapper.IncrementImageDownloads("repo", "notFoundTag") + So(err, ShouldNotBeNil) + }) + + Convey("IncrementImageDownloads GetManifestMeta error", func() { + err := dynamoWrapper.SetRepoTag("repo", "tag", "dig", "") + So(err, ShouldBeNil) + + err = dynamoWrapper.IncrementImageDownloads("repo", "tag") + So(err, ShouldNotBeNil) + }) + + Convey("AddManifestSignature GetRepoMeta error", func() { + err := dynamoWrapper.SetRepoTag("repo", "tag", "dig", "") + So(err, ShouldBeNil) + + err = dynamoWrapper.AddManifestSignature("repoNotFound", "tag", repodb.SignatureMetadata{}) + So(err, ShouldNotBeNil) + }) + + Convey("AddManifestSignature ManifestSignatures signedManifestDigest not found error", func() { + err := dynamoWrapper.SetRepoTag("repo", "tag", "dig", "") + So(err, ShouldBeNil) + + err = dynamoWrapper.AddManifestSignature("repo", "tagNotFound", repodb.SignatureMetadata{}) + So(err, ShouldNotBeNil) + }) + + Convey("AddManifestSignature SignatureType repodb.NotationType", func() { + err := dynamoWrapper.SetRepoTag("repo", "tag", "dig", "") + So(err, ShouldBeNil) + + err = dynamoWrapper.AddManifestSignature("repo", "tagNotFound", repodb.SignatureMetadata{ + SignatureType: "notation", + }) + So(err, ShouldBeNil) + }) + + Convey("DeleteSignature GetRepoMeta error", func() { + err = dynamoWrapper.DeleteSignature("repoNotFound", "tagNotFound", repodb.SignatureMetadata{}) + So(err, ShouldNotBeNil) + }) + + Convey("DeleteSignature sigDigest.SignatureManifestDigest != sigMeta.SignatureDigest true", func() { + err := setRepoMeta(dynamoWrapper.Client, repodb.RepoMetadata{ + Name: "repo", + Signatures: map[string]repodb.ManifestSignatures{ + "tag1": { + "cosign": []repodb.SignatureInfo{ + {SignatureManifestDigest: "dig1"}, + {SignatureManifestDigest: "dig2"}, + }, + }, + }, + }) + So(err, ShouldBeNil) + + err = dynamoWrapper.DeleteSignature("repo", "tag1", repodb.SignatureMetadata{ + SignatureDigest: "dig2", + SignatureType: "cosign", + }) + So(err, ShouldBeNil) + }) + + Convey("GetMultipleRepoMeta unmarshal error", func() { + err = setBadRepoMeta(dynamoWrapper.Client, "repo") //nolint:contextcheck + So(err, ShouldBeNil) + + _, err = dynamoWrapper.GetMultipleRepoMeta(ctx, func(repoMeta repodb.RepoMetadata) bool { return true }, + repodb.PageInput{}) + + So(err, ShouldNotBeNil) + }) + + Convey("SearchRepos repoMeta unmarshal error", func() { + err = setBadRepoMeta(dynamoWrapper.Client, "repo") //nolint:contextcheck + So(err, ShouldBeNil) + + _, _, err = dynamoWrapper.SearchRepos(ctx, "", repodb.Filter{}, repodb.PageInput{}) + + So(err, ShouldNotBeNil) + }) + + Convey("SearchRepos GetManifestMeta error", func() { + err := dynamoWrapper.SetRepoTag("repo", "tag1", "notFoundDigest", "") //nolint:contextcheck + So(err, ShouldBeNil) + + _, _, err = dynamoWrapper.SearchRepos(ctx, "", repodb.Filter{}, repodb.PageInput{}) + + So(err, ShouldNotBeNil) + }) + + Convey("SearchRepos config unmarshal error", func() { + err := dynamoWrapper.SetRepoTag("repo", "tag1", "dig1", "") //nolint:contextcheck + So(err, ShouldBeNil) + + err = dynamoWrapper.SetManifestData("dig1", repodb.ManifestData{ //nolint:contextcheck + ManifestBlob: []byte("{}"), + ConfigBlob: []byte("bad json"), + }) + So(err, ShouldBeNil) + + _, _, err = dynamoWrapper.SearchRepos(ctx, "", repodb.Filter{}, repodb.PageInput{}) + + So(err, ShouldNotBeNil) + }) + + Convey("SearchTags repoMeta unmarshal error", func() { + err = setBadRepoMeta(dynamoWrapper.Client, "repo") //nolint:contextcheck + So(err, ShouldBeNil) + + _, _, err = dynamoWrapper.SearchTags(ctx, "repo:", repodb.Filter{}, repodb.PageInput{}) + + So(err, ShouldNotBeNil) + }) + + Convey("SearchTags GetManifestMeta error", func() { + err := dynamoWrapper.SetRepoTag("repo", "tag1", "manifestNotFound", "") //nolint:contextcheck + So(err, ShouldBeNil) + + _, _, err = dynamoWrapper.SearchTags(ctx, "repo:", repodb.Filter{}, repodb.PageInput{}) + + So(err, ShouldNotBeNil) + }) + + Convey("SearchTags config unmarshal error", func() { + err := dynamoWrapper.SetRepoTag("repo", "tag1", "dig1", "") //nolint:contextcheck + So(err, ShouldBeNil) + + err = dynamoWrapper.SetManifestData( //nolint:contextcheck + "dig1", + repodb.ManifestData{ + ManifestBlob: []byte("{}"), + ConfigBlob: []byte("bad json"), + }, + ) + So(err, ShouldBeNil) + + _, _, err = dynamoWrapper.SearchTags(ctx, "repo:", repodb.Filter{}, repodb.PageInput{}) + + So(err, ShouldNotBeNil) + }) + }) +} + +func setBadManifestData(client *dynamodb.Client, digest string) error { + mdAttributeValue, err := attributevalue.Marshal("string") + if err != nil { + return err + } + + _, err = client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]string{ + "#MD": "ManifestData", + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":ManifestData": mdAttributeValue, + }, + Key: map[string]types.AttributeValue{ + "Digest": &types.AttributeValueMemberS{ + Value: digest, + }, + }, + TableName: aws.String("ManifestDataTable"), + UpdateExpression: aws.String("SET #MD = :ManifestData"), + }) + + return err +} + +func setBadRepoMeta(client *dynamodb.Client, repoName string) error { + repoAttributeValue, err := attributevalue.Marshal("string") + if err != nil { + return err + } + + _, err = client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]string{ + "#RM": "RepoMetadata", + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":RepoMetadata": repoAttributeValue, + }, + Key: map[string]types.AttributeValue{ + "RepoName": &types.AttributeValueMemberS{ + Value: repoName, + }, + }, + TableName: aws.String("RepoMetadataTable"), + UpdateExpression: aws.String("SET #RM = :RepoMetadata"), + }) + + return err +} + +func setRepoMeta(client *dynamodb.Client, repoMeta repodb.RepoMetadata) error { + repoAttributeValue, err := attributevalue.Marshal(repoMeta) + if err != nil { + return err + } + + _, err = client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]string{ + "#RM": "RepoMetadata", + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":RepoMetadata": repoAttributeValue, + }, + Key: map[string]types.AttributeValue{ + "RepoName": &types.AttributeValueMemberS{ + Value: repoMeta.Name, + }, + }, + TableName: aws.String("RepoMetadataTable"), + UpdateExpression: aws.String("SET #RM = :RepoMetadata"), + }) + + return err +} diff --git a/pkg/meta/repodb/dynamodb-wrapper/dynamo_wrapper.go b/pkg/meta/repodb/dynamodb-wrapper/dynamo_wrapper.go new file mode 100644 index 00000000..f529c4eb --- /dev/null +++ b/pkg/meta/repodb/dynamodb-wrapper/dynamo_wrapper.go @@ -0,0 +1,977 @@ +package dynamo + +import ( + "context" + "encoding/json" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + godigest "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/rs/zerolog" + + zerr "zotregistry.io/zot/errors" + "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" //nolint:go-staticcheck + "zotregistry.io/zot/pkg/meta/repodb/common" + "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper/iterator" + dynamoParams "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper/params" + "zotregistry.io/zot/pkg/meta/repodb/version" + localCtx "zotregistry.io/zot/pkg/requestcontext" +) + +type DBWrapper struct { + Client *dynamodb.Client + RepoMetaTablename string + ManifestDataTablename string + VersionTablename string + Patches []func(client *dynamodb.Client, tableNames map[string]string) error + Log log.Logger +} + +func NewDynamoDBWrapper(params dynamoParams.DBDriverParameters) (*DBWrapper, error) { + // custom endpoint resolver to point to localhost + customResolver := aws.EndpointResolverWithOptionsFunc( + func(service, region string, options ...interface{}) (aws.Endpoint, error) { + return aws.Endpoint{ + PartitionID: "aws", + URL: params.Endpoint, + SigningRegion: region, + }, nil + }) + + // Using the SDK's default configuration, loading additional config + // and credentials values from the environment variables, shared + // credentials, and shared configuration files + cfg, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(params.Region), + config.WithEndpointResolverWithOptions(customResolver)) + if err != nil { + return nil, err + } + + dynamoWrapper := DBWrapper{ + Client: dynamodb.NewFromConfig(cfg), + RepoMetaTablename: params.RepoMetaTablename, + ManifestDataTablename: params.ManifestDataTablename, + VersionTablename: params.VersionTablename, + Patches: version.GetDynamoDBPatches(), + Log: log.Logger{Logger: zerolog.New(os.Stdout)}, + } + + err = dynamoWrapper.createVersionTable() + if err != nil { + return nil, err + } + + err = dynamoWrapper.createRepoMetaTable() + if err != nil { + return nil, err + } + + err = dynamoWrapper.createManifestDataTable() + if err != nil { + return nil, err + } + + // Using the Config value, create the DynamoDB client + return &dynamoWrapper, nil +} + +func (dwr DBWrapper) SetManifestData(manifestDigest godigest.Digest, manifestData repodb.ManifestData) error { + mdAttributeValue, err := attributevalue.Marshal(manifestData) + if err != nil { + return err + } + + _, err = dwr.Client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]string{ + "#MD": "ManifestData", + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":ManifestData": mdAttributeValue, + }, + Key: map[string]types.AttributeValue{ + "Digest": &types.AttributeValueMemberS{ + Value: manifestDigest.String(), + }, + }, + TableName: aws.String(dwr.ManifestDataTablename), + UpdateExpression: aws.String("SET #MD = :ManifestData"), + }) + + return err +} + +func (dwr DBWrapper) GetManifestData(manifestDigest godigest.Digest) (repodb.ManifestData, error) { + resp, err := dwr.Client.GetItem(context.Background(), &dynamodb.GetItemInput{ + TableName: aws.String(dwr.ManifestDataTablename), + Key: map[string]types.AttributeValue{ + "Digest": &types.AttributeValueMemberS{Value: manifestDigest.String()}, + }, + }) + if err != nil { + return repodb.ManifestData{}, err + } + + if resp.Item == nil { + return repodb.ManifestData{}, zerr.ErrManifestDataNotFound + } + + var manifestData repodb.ManifestData + + err = attributevalue.Unmarshal(resp.Item["ManifestData"], &manifestData) + if err != nil { + return repodb.ManifestData{}, err + } + + return manifestData, nil +} + +func (dwr DBWrapper) SetManifestMeta(repo string, manifestDigest godigest.Digest, manifestMeta repodb.ManifestMetadata, +) error { + if manifestMeta.Signatures == nil { + manifestMeta.Signatures = repodb.ManifestSignatures{} + } + + repoMeta, err := dwr.GetRepoMeta(repo) + if err != nil { + if !errors.Is(err, zerr.ErrRepoMetaNotFound) { + return err + } + + repoMeta = repodb.RepoMetadata{ + Name: repo, + Tags: map[string]repodb.Descriptor{}, + Statistics: map[string]repodb.DescriptorStatistics{}, + Signatures: map[string]repodb.ManifestSignatures{}, + } + } + + err = dwr.SetManifestData(manifestDigest, repodb.ManifestData{ + ManifestBlob: manifestMeta.ManifestBlob, + ConfigBlob: manifestMeta.ConfigBlob, + }) + if err != nil { + return err + } + + updatedRepoMeta := common.UpdateManifestMeta(repoMeta, manifestDigest, manifestMeta) + + err = dwr.setRepoMeta(repo, updatedRepoMeta) + if err != nil { + return err + } + + return err +} + +func (dwr DBWrapper) GetManifestMeta(repo string, manifestDigest godigest.Digest, +) (repodb.ManifestMetadata, error) { //nolint:contextcheck + manifestData, err := dwr.GetManifestData(manifestDigest) + if err != nil { + if errors.Is(err, zerr.ErrManifestDataNotFound) { + return repodb.ManifestMetadata{}, zerr.ErrManifestMetaNotFound + } + + return repodb.ManifestMetadata{}, + errors.Wrapf(err, "error while constructing manifest meta for manifest '%s' from repo '%s'", + manifestDigest, repo) + } + + repoMeta, err := dwr.GetRepoMeta(repo) + if err != nil { + if errors.Is(err, zerr.ErrRepoMetaNotFound) { + return repodb.ManifestMetadata{}, zerr.ErrManifestMetaNotFound + } + + return repodb.ManifestMetadata{}, + errors.Wrapf(err, "error while constructing manifest meta for manifest '%s' from repo '%s'", + manifestDigest, repo) + } + + manifestMetadata := repodb.ManifestMetadata{} + + manifestMetadata.ManifestBlob = manifestData.ManifestBlob + manifestMetadata.ConfigBlob = manifestData.ConfigBlob + manifestMetadata.DownloadCount = repoMeta.Statistics[manifestDigest.String()].DownloadCount + + manifestMetadata.Signatures = repodb.ManifestSignatures{} + + if repoMeta.Signatures[manifestDigest.String()] != nil { + manifestMetadata.Signatures = repoMeta.Signatures[manifestDigest.String()] + } + + return manifestMetadata, nil +} + +func (dwr DBWrapper) IncrementRepoStars(repo string) error { + repoMeta, err := dwr.GetRepoMeta(repo) + if err != nil { + return err + } + + repoMeta.Stars++ + + err = dwr.setRepoMeta(repo, repoMeta) + + return err +} + +func (dwr DBWrapper) DecrementRepoStars(repo string) error { + repoMeta, err := dwr.GetRepoMeta(repo) + if err != nil { + return err + } + + if repoMeta.Stars > 0 { + repoMeta.Stars-- + } + + err = dwr.setRepoMeta(repo, repoMeta) + + return err +} + +func (dwr DBWrapper) GetRepoStars(repo string) (int, error) { + repoMeta, err := dwr.GetRepoMeta(repo) + if err != nil { + return 0, err + } + + return repoMeta.Stars, nil +} + +func (dwr DBWrapper) SetRepoTag(repo string, tag string, manifestDigest godigest.Digest, mediaType string) error { + if err := common.ValidateRepoTagInput(repo, tag, manifestDigest); err != nil { + return err + } + + resp, err := dwr.Client.GetItem(context.TODO(), &dynamodb.GetItemInput{ + TableName: aws.String(dwr.RepoMetaTablename), + Key: map[string]types.AttributeValue{ + "RepoName": &types.AttributeValueMemberS{Value: repo}, + }, + }) + if err != nil { + return err + } + + repoMeta := repodb.RepoMetadata{ + Name: repo, + Tags: map[string]repodb.Descriptor{}, + Statistics: map[string]repodb.DescriptorStatistics{}, + Signatures: map[string]repodb.ManifestSignatures{}, + } + + if resp.Item != nil { + err := attributevalue.Unmarshal(resp.Item["RepoMetadata"], &repoMeta) + if err != nil { + return err + } + } + + repoMeta.Tags[tag] = repodb.Descriptor{ + Digest: manifestDigest.String(), + MediaType: mediaType, + } + + err = dwr.setRepoMeta(repo, repoMeta) + + return err +} + +func (dwr DBWrapper) DeleteRepoTag(repo string, tag string) error { + resp, err := dwr.Client.GetItem(context.TODO(), &dynamodb.GetItemInput{ + TableName: aws.String(dwr.RepoMetaTablename), + Key: map[string]types.AttributeValue{ + "RepoName": &types.AttributeValueMemberS{Value: repo}, + }, + }) + if err != nil { + return err + } + + if resp.Item == nil { + return nil + } + + var repoMeta repodb.RepoMetadata + + err = attributevalue.Unmarshal(resp.Item["RepoMetadata"], &repoMeta) + if err != nil { + return err + } + + delete(repoMeta.Tags, tag) + + if len(repoMeta.Tags) == 0 { + _, err := dwr.Client.DeleteItem(context.Background(), &dynamodb.DeleteItemInput{ + TableName: aws.String(dwr.RepoMetaTablename), + Key: map[string]types.AttributeValue{ + "RepoName": &types.AttributeValueMemberS{Value: repo}, + }, + }) + + return err + } + + repoAttributeValue, err := attributevalue.Marshal(repoMeta) + if err != nil { + return err + } + + _, err = dwr.Client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]string{ + "#RM": "RepoMetadata", + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":RepoMetadata": repoAttributeValue, + }, + Key: map[string]types.AttributeValue{ + "RepoName": &types.AttributeValueMemberS{ + Value: repo, + }, + }, + TableName: aws.String(dwr.RepoMetaTablename), + UpdateExpression: aws.String("SET #RM = :RepoMetadata"), + }) + + return err +} + +func (dwr DBWrapper) GetRepoMeta(repo string) (repodb.RepoMetadata, error) { + resp, err := dwr.Client.GetItem(context.TODO(), &dynamodb.GetItemInput{ + TableName: aws.String(dwr.RepoMetaTablename), + Key: map[string]types.AttributeValue{ + "RepoName": &types.AttributeValueMemberS{Value: repo}, + }, + }) + if err != nil { + return repodb.RepoMetadata{}, err + } + + if resp.Item == nil { + return repodb.RepoMetadata{}, zerr.ErrRepoMetaNotFound + } + + var repoMeta repodb.RepoMetadata + + err = attributevalue.Unmarshal(resp.Item["RepoMetadata"], &repoMeta) + if err != nil { + return repodb.RepoMetadata{}, err + } + + return repoMeta, nil +} + +func (dwr DBWrapper) IncrementImageDownloads(repo string, reference string) error { + repoMeta, err := dwr.GetRepoMeta(repo) + if err != nil { + return err + } + + manifestDigest := reference + + if !common.ReferenceIsDigest(reference) { + // search digest for tag + descriptor, found := repoMeta.Tags[reference] + + if !found { + return zerr.ErrManifestMetaNotFound + } + + manifestDigest = descriptor.Digest + } + + manifestMeta, err := dwr.GetManifestMeta(repo, godigest.Digest(manifestDigest)) + if err != nil { + return err + } + + manifestMeta.DownloadCount++ + + err = dwr.SetManifestMeta(repo, godigest.Digest(manifestDigest), manifestMeta) + + return err +} + +func (dwr DBWrapper) AddManifestSignature(repo string, signedManifestDigest godigest.Digest, + sygMeta repodb.SignatureMetadata, +) error { + repoMeta, err := dwr.GetRepoMeta(repo) + if err != nil { + return err + } + + var ( + manifestSignatures repodb.ManifestSignatures + found bool + ) + + if manifestSignatures, found = repoMeta.Signatures[signedManifestDigest.String()]; !found { + manifestSignatures = repodb.ManifestSignatures{} + } + + signatureSlice := manifestSignatures[sygMeta.SignatureType] + if !common.SignatureAlreadyExists(signatureSlice, sygMeta) { + if sygMeta.SignatureType == repodb.NotationType { + signatureSlice = append(signatureSlice, repodb.SignatureInfo{ + SignatureManifestDigest: sygMeta.SignatureDigest, + LayersInfo: sygMeta.LayersInfo, + }) + } else if sygMeta.SignatureType == repodb.CosignType { + signatureSlice = []repodb.SignatureInfo{{ + SignatureManifestDigest: sygMeta.SignatureDigest, + LayersInfo: sygMeta.LayersInfo, + }} + } + } + + manifestSignatures[sygMeta.SignatureType] = signatureSlice + + repoMeta.Signatures[signedManifestDigest.String()] = manifestSignatures + + err = dwr.setRepoMeta(repoMeta.Name, repoMeta) + + return err +} + +func (dwr DBWrapper) DeleteSignature(repo string, signedManifestDigest godigest.Digest, + sigMeta repodb.SignatureMetadata, +) error { + repoMeta, err := dwr.GetRepoMeta(repo) + if err != nil { + return err + } + + sigType := sigMeta.SignatureType + + var ( + manifestSignatures repodb.ManifestSignatures + found bool + ) + + if manifestSignatures, found = repoMeta.Signatures[signedManifestDigest.String()]; !found { + return zerr.ErrManifestMetaNotFound + } + + signatureSlice := manifestSignatures[sigType] + + newSignatureSlice := make([]repodb.SignatureInfo, 0, len(signatureSlice)-1) + + for _, sigDigest := range signatureSlice { + if sigDigest.SignatureManifestDigest != sigMeta.SignatureDigest { + newSignatureSlice = append(newSignatureSlice, sigDigest) + } + } + + manifestSignatures[sigType] = newSignatureSlice + + repoMeta.Signatures[signedManifestDigest.String()] = manifestSignatures + + err = dwr.setRepoMeta(repoMeta.Name, repoMeta) + + return err +} + +func (dwr DBWrapper) GetMultipleRepoMeta(ctx context.Context, + filter func(repoMeta repodb.RepoMetadata) bool, requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, error) { + var ( + repoMetaAttributeIterator iterator.AttributesIterator + pageFinder repodb.PageFinder + ) + + repoMetaAttributeIterator = iterator.NewBaseDynamoAttributesIterator( + dwr.Client, dwr.RepoMetaTablename, "RepoMetadata", 0, dwr.Log, + ) + + pageFinder, err := repodb.NewBaseRepoPageFinder(requestedPage.Limit, requestedPage.Offset, requestedPage.SortBy) + if err != nil { + return nil, err + } + + repoMetaAttribute, err := repoMetaAttributeIterator.First(ctx) + + for ; repoMetaAttribute != nil; repoMetaAttribute, err = repoMetaAttributeIterator.Next(ctx) { + if err != nil { + // log + return []repodb.RepoMetadata{}, err + } + + var repoMeta repodb.RepoMetadata + + err := attributevalue.Unmarshal(repoMetaAttribute, &repoMeta) + if err != nil { + return []repodb.RepoMetadata{}, err + } + + if ok, err := localCtx.RepoIsUserAvailable(ctx, repoMeta.Name); !ok || err != nil { + continue + } + + if filter(repoMeta) { + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repoMeta, + }) + } + } + + foundRepos := pageFinder.Page() + + return foundRepos, err +} + +func (dwr DBWrapper) SearchRepos(ctx context.Context, searchText string, filter repodb.Filter, + requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + var ( + foundManifestMetadataMap = make(map[string]repodb.ManifestMetadata) + manifestMetadataMap = make(map[string]repodb.ManifestMetadata) + + repoMetaAttributeIterator iterator.AttributesIterator + pageFinder repodb.PageFinder + ) + + repoMetaAttributeIterator = iterator.NewBaseDynamoAttributesIterator( + dwr.Client, dwr.RepoMetaTablename, "RepoMetadata", 0, dwr.Log, + ) + + pageFinder, err := repodb.NewBaseRepoPageFinder(requestedPage.Limit, requestedPage.Offset, requestedPage.SortBy) + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, err + } + + repoMetaAttribute, err := repoMetaAttributeIterator.First(ctx) + + for ; repoMetaAttribute != nil; repoMetaAttribute, err = repoMetaAttributeIterator.Next(ctx) { + if err != nil { + // log + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, err + } + + var repoMeta repodb.RepoMetadata + + err := attributevalue.Unmarshal(repoMetaAttribute, &repoMeta) + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, err + } + + if ok, err := localCtx.RepoIsUserAvailable(ctx, repoMeta.Name); !ok || err != nil { + continue + } + + if score := common.ScoreRepoName(searchText, repoMeta.Name); score != -1 { + var ( + // specific values used for sorting that need to be calculated based on all manifests from the repo + repoDownloads = 0 + repoLastUpdated time.Time + firstImageChecked = true + osSet = map[string]bool{} + archSet = map[string]bool{} + isSigned = false + ) + + for _, descriptor := range repoMeta.Tags { + var manifestMeta repodb.ManifestMetadata + + manifestMeta, manifestDownloaded := manifestMetadataMap[descriptor.Digest] + + if !manifestDownloaded { + manifestMeta, err = dwr.GetManifestMeta(repoMeta.Name, godigest.Digest(descriptor.Digest)) //nolint:contextcheck + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, + errors.Wrapf(err, "repodb: error while unmarshaling manifest metadata for digest %s", descriptor.Digest) + } + } + + // get fields related to filtering + var configContent ispec.Image + + err = json.Unmarshal(manifestMeta.ConfigBlob, &configContent) + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, + errors.Wrapf(err, "repodb: error while unmarshaling config content for digest %s", descriptor.Digest) + } + + osSet[configContent.OS] = true + archSet[configContent.Architecture] = true + + // get fields related to sorting + repoDownloads += repoMeta.Statistics[descriptor.Digest].DownloadCount + + imageLastUpdated := common.GetImageLastUpdatedTimestamp(configContent) + + if firstImageChecked || repoLastUpdated.Before(imageLastUpdated) { + repoLastUpdated = imageLastUpdated + firstImageChecked = false + + isSigned = common.CheckIsSigned(manifestMeta.Signatures) + } + + manifestMetadataMap[descriptor.Digest] = manifestMeta + } + + repoFilterData := repodb.FilterData{ + OsList: common.GetMapKeys(osSet), + ArchList: common.GetMapKeys(archSet), + IsSigned: isSigned, + } + + if !common.AcceptedByFilter(filter, repoFilterData) { + continue + } + + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repoMeta, + Score: score, + Downloads: repoDownloads, + UpdateTime: repoLastUpdated, + }) + } + } + + foundRepos := pageFinder.Page() + + // keep just the manifestMeta we need + for _, repoMeta := range foundRepos { + for _, descriptor := range repoMeta.Tags { + foundManifestMetadataMap[descriptor.Digest] = manifestMetadataMap[descriptor.Digest] + } + } + + return foundRepos, foundManifestMetadataMap, err +} + +func (dwr DBWrapper) SearchTags(ctx context.Context, searchText string, filter repodb.Filter, + requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + var ( + foundManifestMetadataMap = make(map[string]repodb.ManifestMetadata) + manifestMetadataMap = make(map[string]repodb.ManifestMetadata) + repoMetaAttributeIterator = iterator.NewBaseDynamoAttributesIterator( + dwr.Client, dwr.RepoMetaTablename, "RepoMetadata", 0, dwr.Log, + ) + + pageFinder repodb.PageFinder + ) + + pageFinder, err := repodb.NewBaseImagePageFinder(requestedPage.Limit, requestedPage.Offset, requestedPage.SortBy) + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, err + } + + searchedRepo, searchedTag, err := common.GetRepoTag(searchText) + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, + errors.Wrap(err, "repodb: error while parsing search text, invalid format") + } + + repoMetaAttribute, err := repoMetaAttributeIterator.First(ctx) + + for ; repoMetaAttribute != nil; repoMetaAttribute, err = repoMetaAttributeIterator.Next(ctx) { + if err != nil { + // log + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, err + } + + var repoMeta repodb.RepoMetadata + + err := attributevalue.Unmarshal(repoMetaAttribute, &repoMeta) + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, err + } + + if ok, err := localCtx.RepoIsUserAvailable(ctx, repoMeta.Name); !ok || err != nil { + continue + } + + if repoMeta.Name == searchedRepo { + matchedTags := make(map[string]repodb.Descriptor) + // take all manifestMetas + for tag, descriptor := range repoMeta.Tags { + if !strings.HasPrefix(tag, searchedTag) { + continue + } + + matchedTags[tag] = descriptor + + // in case tags reference the same manifest we don't download from DB multiple times + if manifestMeta, manifestExists := manifestMetadataMap[descriptor.Digest]; manifestExists { + manifestMetadataMap[descriptor.Digest] = manifestMeta + + continue + } + + manifestMeta, err := dwr.GetManifestMeta(repoMeta.Name, godigest.Digest(descriptor.Digest)) //nolint:contextcheck + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, + errors.Wrapf(err, "repodb: error while unmashaling manifest metadata for digest %s", descriptor.Digest) + } + + var configContent ispec.Image + + err = json.Unmarshal(manifestMeta.ConfigBlob, &configContent) + if err != nil { + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, + errors.Wrapf(err, "repodb: error while unmashaling manifest metadata for digest %s", descriptor.Digest) + } + + imageFilterData := repodb.FilterData{ + OsList: []string{configContent.OS}, + ArchList: []string{configContent.Architecture}, + IsSigned: false, + } + + if !common.AcceptedByFilter(filter, imageFilterData) { + delete(matchedTags, tag) + delete(manifestMetadataMap, descriptor.Digest) + + continue + } + + manifestMetadataMap[descriptor.Digest] = manifestMeta + } + + repoMeta.Tags = matchedTags + + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repoMeta, + }) + } + } + + foundRepos := pageFinder.Page() + + // keep just the manifestMeta we need + for _, repoMeta := range foundRepos { + for _, descriptor := range repoMeta.Tags { + foundManifestMetadataMap[descriptor.Digest] = manifestMetadataMap[descriptor.Digest] + } + } + + return foundRepos, foundManifestMetadataMap, err +} + +func (dwr *DBWrapper) PatchDB() error { + DBVersion, err := dwr.getDBVersion() + if err != nil { + return errors.Wrapf(err, "patching dynamo failed, error retrieving database version") + } + + if version.GetVersionIndex(DBVersion) == -1 { + return errors.New("DB has broken format, no version found") + } + + for patchIndex, patch := range dwr.Patches { + if patchIndex < version.GetVersionIndex(DBVersion) { + continue + } + + tableNames := map[string]string{ + "RepoMetaTablename": dwr.RepoMetaTablename, + "ManifestDataTablename": dwr.ManifestDataTablename, + "VersionTablename": dwr.VersionTablename, + } + + err := patch(dwr.Client, tableNames) + if err != nil { + return err + } + } + + return nil +} + +func (dwr DBWrapper) setRepoMeta(repo string, repoMeta repodb.RepoMetadata) error { + repoAttributeValue, err := attributevalue.Marshal(repoMeta) + if err != nil { + return err + } + + _, err = dwr.Client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]string{ + "#RM": "RepoMetadata", + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":RepoMetadata": repoAttributeValue, + }, + Key: map[string]types.AttributeValue{ + "RepoName": &types.AttributeValueMemberS{ + Value: repo, + }, + }, + TableName: aws.String(dwr.RepoMetaTablename), + UpdateExpression: aws.String("SET #RM = :RepoMetadata"), + }) + + return err +} + +func (dwr DBWrapper) createRepoMetaTable() error { + _, err := dwr.Client.CreateTable(context.Background(), &dynamodb.CreateTableInput{ + TableName: aws.String(dwr.RepoMetaTablename), + AttributeDefinitions: []types.AttributeDefinition{ + { + AttributeName: aws.String("RepoName"), + AttributeType: types.ScalarAttributeTypeS, + }, + }, + KeySchema: []types.KeySchemaElement{ + { + AttributeName: aws.String("RepoName"), + KeyType: types.KeyTypeHash, + }, + }, + BillingMode: types.BillingModePayPerRequest, + }) + + if err != nil && strings.Contains(err.Error(), "Table already exists") { + return nil + } + + return err +} + +func (dwr DBWrapper) deleteRepoMetaTable() error { + _, err := dwr.Client.DeleteTable(context.Background(), &dynamodb.DeleteTableInput{ + TableName: aws.String(dwr.RepoMetaTablename), + }) + + return err +} + +func (dwr DBWrapper) ResetRepoMetaTable() error { + err := dwr.deleteRepoMetaTable() + if err != nil { + return err + } + + return dwr.createRepoMetaTable() +} + +func (dwr DBWrapper) createManifestDataTable() error { + _, err := dwr.Client.CreateTable(context.Background(), &dynamodb.CreateTableInput{ + TableName: aws.String(dwr.ManifestDataTablename), + AttributeDefinitions: []types.AttributeDefinition{ + { + AttributeName: aws.String("Digest"), + AttributeType: types.ScalarAttributeTypeS, + }, + }, + KeySchema: []types.KeySchemaElement{ + { + AttributeName: aws.String("Digest"), + KeyType: types.KeyTypeHash, + }, + }, + BillingMode: types.BillingModePayPerRequest, + }) + + if err != nil && strings.Contains(err.Error(), "Table already exists") { + return nil + } + + return err +} + +func (dwr *DBWrapper) createVersionTable() error { + _, err := dwr.Client.CreateTable(context.Background(), &dynamodb.CreateTableInput{ + TableName: aws.String(dwr.VersionTablename), + AttributeDefinitions: []types.AttributeDefinition{ + { + AttributeName: aws.String("VersionKey"), + AttributeType: types.ScalarAttributeTypeS, + }, + }, + KeySchema: []types.KeySchemaElement{ + { + AttributeName: aws.String("VersionKey"), + KeyType: types.KeyTypeHash, + }, + }, + BillingMode: types.BillingModePayPerRequest, + }) + + if err != nil && strings.Contains(err.Error(), "Table already exists") { + return nil + } + + if err == nil { + mdAttributeValue, err := attributevalue.Marshal(version.CurrentVersion) + if err != nil { + return err + } + + _, err = dwr.Client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]string{ + "#V": "Version", + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":Version": mdAttributeValue, + }, + Key: map[string]types.AttributeValue{ + "VersionKey": &types.AttributeValueMemberS{ + Value: version.DBVersionKey, + }, + }, + TableName: aws.String(dwr.VersionTablename), + UpdateExpression: aws.String("SET #V = :Version"), + }) + + if err != nil { + return err + } + } + + return err +} + +func (dwr *DBWrapper) getDBVersion() (string, error) { + resp, err := dwr.Client.GetItem(context.TODO(), &dynamodb.GetItemInput{ + TableName: aws.String(dwr.VersionTablename), + Key: map[string]types.AttributeValue{ + "VersionKey": &types.AttributeValueMemberS{Value: version.DBVersionKey}, + }, + }) + if err != nil { + return "", err + } + + if resp.Item == nil { + return "", nil + } + + var version string + + err = attributevalue.Unmarshal(resp.Item["Version"], &version) + if err != nil { + return "", err + } + + return version, nil +} + +func (dwr DBWrapper) deleteManifestDataTable() error { + _, err := dwr.Client.DeleteTable(context.Background(), &dynamodb.DeleteTableInput{ + TableName: aws.String(dwr.ManifestDataTablename), + }) + + return err +} + +func (dwr DBWrapper) ResetManifestDataTable() error { + err := dwr.deleteManifestDataTable() + if err != nil { + return err + } + + return dwr.createManifestDataTable() +} diff --git a/pkg/meta/repodb/dynamodb-wrapper/iterator/dynamo_iterator.go b/pkg/meta/repodb/dynamodb-wrapper/iterator/dynamo_iterator.go new file mode 100644 index 00000000..94b7770f --- /dev/null +++ b/pkg/meta/repodb/dynamodb-wrapper/iterator/dynamo_iterator.go @@ -0,0 +1,99 @@ +package iterator + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + + "zotregistry.io/zot/pkg/log" +) + +type AttributesIterator interface { + First(ctx context.Context) (types.AttributeValue, error) + Next(ctx context.Context) (types.AttributeValue, error) +} + +type BaseAttributesIterator struct { + Client *dynamodb.Client + Table string + Attribute string + + itemBuffer []map[string]types.AttributeValue + currentItemIndex int + lastEvaluatedKey map[string]types.AttributeValue + readLimit *int32 + + log log.Logger +} + +func NewBaseDynamoAttributesIterator(client *dynamodb.Client, table, attribute string, maxReadLimit int32, + log log.Logger, +) *BaseAttributesIterator { + var readLimit *int32 + + if maxReadLimit > 0 { + readLimit = &maxReadLimit + } + + return &BaseAttributesIterator{ + Client: client, + Table: table, + Attribute: attribute, + itemBuffer: []map[string]types.AttributeValue{}, + currentItemIndex: 0, + readLimit: readLimit, + log: log, + } +} + +func (dii *BaseAttributesIterator) First(ctx context.Context) (types.AttributeValue, error) { + scanOutput, err := dii.Client.Scan(ctx, &dynamodb.ScanInput{ + TableName: aws.String(dii.Table), + Limit: dii.readLimit, + }) + if err != nil { + return nil, err + } + + if len(scanOutput.Items) == 0 { + return nil, nil + } + + dii.itemBuffer = scanOutput.Items + dii.lastEvaluatedKey = scanOutput.LastEvaluatedKey + dii.currentItemIndex = 1 + + return dii.itemBuffer[0][dii.Attribute], nil +} + +func (dii *BaseAttributesIterator) Next(ctx context.Context) (types.AttributeValue, error) { + if len(dii.itemBuffer) <= dii.currentItemIndex { + if dii.lastEvaluatedKey == nil { + return nil, nil + } + + scanOutput, err := dii.Client.Scan(ctx, &dynamodb.ScanInput{ + TableName: aws.String(dii.Table), + ExclusiveStartKey: dii.lastEvaluatedKey, + }) + if err != nil { + return nil, err + } + + // all items have been scanned + if len(scanOutput.Items) == 0 { + return nil, nil + } + + dii.itemBuffer = scanOutput.Items + dii.lastEvaluatedKey = scanOutput.LastEvaluatedKey + dii.currentItemIndex = 0 + } + + nextItem := dii.itemBuffer[dii.currentItemIndex][dii.Attribute] + dii.currentItemIndex++ + + return nextItem, nil +} diff --git a/pkg/meta/repodb/dynamodb-wrapper/params/parameters.go b/pkg/meta/repodb/dynamodb-wrapper/params/parameters.go new file mode 100644 index 00000000..b1d62266 --- /dev/null +++ b/pkg/meta/repodb/dynamodb-wrapper/params/parameters.go @@ -0,0 +1,5 @@ +package params + +type DBDriverParameters struct { + Endpoint, Region, RepoMetaTablename, ManifestDataTablename, VersionTablename string +} diff --git a/pkg/meta/repodb/pagination.go b/pkg/meta/repodb/pagination.go new file mode 100644 index 00000000..90a1da2b --- /dev/null +++ b/pkg/meta/repodb/pagination.go @@ -0,0 +1,241 @@ +package repodb + +import ( + "sort" + + "github.com/pkg/errors" + + zerr "zotregistry.io/zot/errors" +) + +// PageFinder permits keeping a pool of objects using Add +// and returning a specific page. +type PageFinder interface { + // Add + Add(detailedRepoMeta DetailedRepoMeta) + Page() []RepoMetadata + Reset() +} + +// RepoPageFinder implements PageFinder. It manages RepoMeta objects and calculates the page +// using the given limit, offset and sortBy option. +type RepoPageFinder struct { + limit int + offset int + sortBy SortCriteria + pageBuffer []DetailedRepoMeta +} + +func NewBaseRepoPageFinder(limit, offset int, sortBy SortCriteria) (*RepoPageFinder, error) { + if sortBy == "" { + sortBy = AlphabeticAsc + } + + if limit < 0 { + return nil, zerr.ErrLimitIsNegative + } + + if offset < 0 { + return nil, zerr.ErrOffsetIsNegative + } + + if _, found := SortFunctions()[sortBy]; !found { + return nil, errors.Wrapf(zerr.ErrSortCriteriaNotSupported, "sorting repos by '%s' is not supported", sortBy) + } + + return &RepoPageFinder{ + limit: limit, + offset: offset, + sortBy: sortBy, + pageBuffer: make([]DetailedRepoMeta, 0, limit), + }, nil +} + +func (bpt *RepoPageFinder) Reset() { + bpt.pageBuffer = []DetailedRepoMeta{} +} + +func (bpt *RepoPageFinder) Add(namedRepoMeta DetailedRepoMeta) { + bpt.pageBuffer = append(bpt.pageBuffer, namedRepoMeta) +} + +func (bpt *RepoPageFinder) Page() []RepoMetadata { + if len(bpt.pageBuffer) == 0 { + return []RepoMetadata{} + } + + sort.Slice(bpt.pageBuffer, SortFunctions()[bpt.sortBy](bpt.pageBuffer)) + + // the offset and limit are calculatd in terms of repos counted + start := bpt.offset + end := bpt.offset + bpt.limit + + // we'll return an empty array when the offset is greater than the number of elements + if start >= len(bpt.pageBuffer) { + start = len(bpt.pageBuffer) + end = start + } + + if end >= len(bpt.pageBuffer) { + end = len(bpt.pageBuffer) + } + + detailedReposPage := bpt.pageBuffer[start:end] + + if start == 0 && end == 0 { + detailedReposPage = bpt.pageBuffer + } + + repos := make([]RepoMetadata, 0, len(detailedReposPage)) + + for _, drm := range detailedReposPage { + repos = append(repos, drm.RepoMeta) + } + + return repos +} + +type ImagePageFinder struct { + limit int + offset int + sortBy SortCriteria + pageBuffer []DetailedRepoMeta +} + +func NewBaseImagePageFinder(limit, offset int, sortBy SortCriteria) (*ImagePageFinder, error) { + if sortBy == "" { + sortBy = AlphabeticAsc + } + + if limit < 0 { + return nil, zerr.ErrLimitIsNegative + } + + if offset < 0 { + return nil, zerr.ErrOffsetIsNegative + } + + if _, found := SortFunctions()[sortBy]; !found { + return nil, errors.Wrapf(zerr.ErrSortCriteriaNotSupported, "sorting repos by '%s' is not supported", sortBy) + } + + return &ImagePageFinder{ + limit: limit, + offset: offset, + sortBy: sortBy, + pageBuffer: make([]DetailedRepoMeta, 0, limit), + }, nil +} + +func (bpt *ImagePageFinder) Reset() { + bpt.pageBuffer = []DetailedRepoMeta{} +} + +func (bpt *ImagePageFinder) Add(namedRepoMeta DetailedRepoMeta) { + bpt.pageBuffer = append(bpt.pageBuffer, namedRepoMeta) +} + +func (bpt *ImagePageFinder) Page() []RepoMetadata { + if len(bpt.pageBuffer) == 0 { + return []RepoMetadata{} + } + + sort.Slice(bpt.pageBuffer, SortFunctions()[bpt.sortBy](bpt.pageBuffer)) + + repoStartIndex := 0 + tagStartIndex := 0 + + // the offset and limit are calculatd in terms of tags counted + remainingOffset := bpt.offset + remainingLimit := bpt.limit + + // bring cursor to position in RepoMeta array + for _, drm := range bpt.pageBuffer { + if remainingOffset < len(drm.RepoMeta.Tags) { + tagStartIndex = remainingOffset + + break + } + + remainingOffset -= len(drm.RepoMeta.Tags) + repoStartIndex++ + } + + // offset is larger than the number of tags + if repoStartIndex >= len(bpt.pageBuffer) { + return []RepoMetadata{} + } + + repos := make([]RepoMetadata, 0) + + // finish counting remaining tags inside the first repo meta + partialTags := map[string]Descriptor{} + firstRepoMeta := bpt.pageBuffer[repoStartIndex].RepoMeta + + tags := make([]string, 0, len(firstRepoMeta.Tags)) + for k := range firstRepoMeta.Tags { + tags = append(tags, k) + } + + sort.Strings(tags) + + for i := tagStartIndex; i < len(tags); i++ { + tag := tags[i] + + partialTags[tag] = firstRepoMeta.Tags[tag] + remainingLimit-- + + if remainingLimit == 0 { + firstRepoMeta.Tags = partialTags + repos = append(repos, firstRepoMeta) + + return repos + } + } + + firstRepoMeta.Tags = partialTags + repos = append(repos, firstRepoMeta) + repoStartIndex++ + + // continue with the remaining repos + for i := repoStartIndex; i < len(bpt.pageBuffer); i++ { + repoMeta := bpt.pageBuffer[i].RepoMeta + + if len(repoMeta.Tags) > remainingLimit { + partialTags := map[string]Descriptor{} + + tags := make([]string, 0, len(repoMeta.Tags)) + for k := range repoMeta.Tags { + tags = append(tags, k) + } + + sort.Strings(tags) + + for _, tag := range tags { + partialTags[tag] = repoMeta.Tags[tag] + remainingLimit-- + + if remainingLimit == 0 { + repoMeta.Tags = partialTags + repos = append(repos, repoMeta) + + break + } + } + + return repos + } + + // add the whole repo + repos = append(repos, repoMeta) + remainingLimit -= len(repoMeta.Tags) + + if remainingLimit == 0 { + return repos + } + } + + // we arrive here when the limit is bigger than the number of tags + + return repos +} diff --git a/pkg/meta/repodb/pagination_test.go b/pkg/meta/repodb/pagination_test.go new file mode 100644 index 00000000..be18d935 --- /dev/null +++ b/pkg/meta/repodb/pagination_test.go @@ -0,0 +1,178 @@ +package repodb_test + +import ( + "testing" + + ispec "github.com/opencontainers/image-spec/specs-go/v1" + . "github.com/smartystreets/goconvey/convey" + + "zotregistry.io/zot/pkg/meta/repodb" +) + +func TestPagination(t *testing.T) { + Convey("Repo Pagination", t, func() { + Convey("reset", func() { + pageFinder, err := repodb.NewBaseRepoPageFinder(1, 0, repodb.AlphabeticAsc) + So(err, ShouldBeNil) + So(pageFinder, ShouldNotBeNil) + + pageFinder.Add(repodb.DetailedRepoMeta{}) + pageFinder.Add(repodb.DetailedRepoMeta{}) + pageFinder.Add(repodb.DetailedRepoMeta{}) + + pageFinder.Reset() + + So(pageFinder.Page(), ShouldBeEmpty) + }) + }) + + Convey("Image Pagination", t, func() { + Convey("create new pageFinder errors", func() { + pageFinder, err := repodb.NewBaseImagePageFinder(-1, 10, repodb.AlphabeticAsc) + So(pageFinder, ShouldBeNil) + So(err, ShouldNotBeNil) + + pageFinder, err = repodb.NewBaseImagePageFinder(2, -1, repodb.AlphabeticAsc) + So(pageFinder, ShouldBeNil) + So(err, ShouldNotBeNil) + + pageFinder, err = repodb.NewBaseImagePageFinder(2, 1, "wrong sorting criteria") + So(pageFinder, ShouldBeNil) + So(err, ShouldNotBeNil) + }) + + Convey("Reset", func() { + pageFinder, err := repodb.NewBaseImagePageFinder(1, 0, repodb.AlphabeticAsc) + So(err, ShouldBeNil) + So(pageFinder, ShouldNotBeNil) + + pageFinder.Add(repodb.DetailedRepoMeta{}) + pageFinder.Add(repodb.DetailedRepoMeta{}) + pageFinder.Add(repodb.DetailedRepoMeta{}) + + pageFinder.Reset() + + So(pageFinder.Page(), ShouldBeEmpty) + }) + + Convey("Page", func() { + Convey("limit < len(tags)", func() { + pageFinder, err := repodb.NewBaseImagePageFinder(5, 2, repodb.AlphabeticAsc) + So(err, ShouldBeNil) + So(pageFinder, ShouldNotBeNil) + + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repodb.RepoMetadata{ + Name: "repo1", + Tags: map[string]repodb.Descriptor{ + "tag1": { + Digest: "dig1", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + }, + }) + + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repodb.RepoMetadata{ + Name: "repo2", + Tags: map[string]repodb.Descriptor{ + "Tag1": { + Digest: "dig1", + MediaType: ispec.MediaTypeImageManifest, + }, + "Tag2": { + Digest: "dig2", + MediaType: ispec.MediaTypeImageManifest, + }, + "Tag3": { + Digest: "dig3", + MediaType: ispec.MediaTypeImageManifest, + }, + "Tag4": { + Digest: "dig4", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + }, + }) + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repodb.RepoMetadata{ + Name: "repo3", + Tags: map[string]repodb.Descriptor{ + "Tag11": { + Digest: "dig11", + MediaType: ispec.MediaTypeImageManifest, + }, + "Tag12": { + Digest: "dig12", + MediaType: ispec.MediaTypeImageManifest, + }, + "Tag13": { + Digest: "dig13", + MediaType: ispec.MediaTypeImageManifest, + }, + "Tag14": { + Digest: "dig14", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + }, + }) + + result := pageFinder.Page() + So(result[0].Tags, ShouldContainKey, "Tag2") + So(result[0].Tags, ShouldContainKey, "Tag3") + So(result[0].Tags, ShouldContainKey, "Tag4") + So(result[1].Tags, ShouldContainKey, "Tag11") + So(result[1].Tags, ShouldContainKey, "Tag12") + }) + + Convey("limit > len(tags)", func() { + pageFinder, err := repodb.NewBaseImagePageFinder(3, 0, repodb.AlphabeticAsc) + So(err, ShouldBeNil) + So(pageFinder, ShouldNotBeNil) + + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repodb.RepoMetadata{ + Name: "repo1", + Tags: map[string]repodb.Descriptor{ + "tag1": { + Digest: "dig1", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + }, + }) + + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repodb.RepoMetadata{ + Name: "repo2", + Tags: map[string]repodb.Descriptor{ + "Tag1": { + Digest: "dig1", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + }, + }) + pageFinder.Add(repodb.DetailedRepoMeta{ + RepoMeta: repodb.RepoMetadata{ + Name: "repo3", + Tags: map[string]repodb.Descriptor{ + "Tag11": { + Digest: "dig11", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + }, + }) + + result := pageFinder.Page() + So(result[0].Tags, ShouldContainKey, "tag1") + So(result[1].Tags, ShouldContainKey, "Tag1") + So(result[2].Tags, ShouldContainKey, "Tag11") + }) + }) + }) +} diff --git a/pkg/meta/repodb/repodb.go b/pkg/meta/repodb/repodb.go new file mode 100644 index 00000000..9f4341cb --- /dev/null +++ b/pkg/meta/repodb/repodb.go @@ -0,0 +1,158 @@ +package repodb + +import ( + "context" + + godigest "github.com/opencontainers/go-digest" +) + +// MetadataDB. +const ( + ManifestDataBucket = "ManifestData" + UserMetadataBucket = "UserMeta" + RepoMetadataBucket = "RepoMetadata" + VersionBucket = "Version" +) + +const ( + SignaturesDirPath = "/tmp/zot/signatures" + SigKey = "dev.cosignproject.cosign/signature" + NotationType = "notation" + CosignType = "cosign" +) + +type RepoDB interface { //nolint:interfacebloat + // IncrementRepoStars adds 1 to the star count of an image + IncrementRepoStars(repo string) error + + // IncrementRepoStars subtracts 1 from the star count of an image + DecrementRepoStars(repo string) error + + // GetRepoStars returns the total number of stars a repo has + GetRepoStars(repo string) (int, error) + + // SetRepoTag sets the tag of a manifest in the tag list of a repo + SetRepoTag(repo string, tag string, manifestDigest godigest.Digest, mediaType string) error + + // DeleteRepoTag delets the tag from the tag list of a repo + DeleteRepoTag(repo string, tag string) error + + // GetRepoMeta returns RepoMetadata of a repo from the database + GetRepoMeta(repo string) (RepoMetadata, error) + + // GetMultipleRepoMeta returns information about all repositories as map[string]RepoMetadata filtered by the filter + // function + GetMultipleRepoMeta(ctx context.Context, filter func(repoMeta RepoMetadata) bool, requestedPage PageInput) ( + []RepoMetadata, error) + + // SetManifestData sets ManifestData for a given manifest in the database + SetManifestData(manifestDigest godigest.Digest, md ManifestData) error + + // GetManifestData return the manifest and it's related config + GetManifestData(manifestDigest godigest.Digest) (ManifestData, error) + + // GetManifestMeta returns ManifestMetadata for a given manifest from the database + GetManifestMeta(repo string, manifestDigest godigest.Digest) (ManifestMetadata, error) + + // GetManifestMeta sets ManifestMetadata for a given manifest in the database + SetManifestMeta(repo string, manifestDigest godigest.Digest, mm ManifestMetadata) error + + // IncrementManifestDownloads adds 1 to the download count of a manifest + IncrementImageDownloads(repo string, reference string) error + + // AddManifestSignature adds signature metadata to a given manifest in the database + AddManifestSignature(repo string, signedManifestDigest godigest.Digest, sm SignatureMetadata) error + + // DeleteSignature delets signature metadata to a given manifest from the database + DeleteSignature(repo string, signedManifestDigest godigest.Digest, sm SignatureMetadata) error + + // SearchRepos searches for repos given a search string + SearchRepos(ctx context.Context, searchText string, filter Filter, requestedPage PageInput) ( + []RepoMetadata, map[string]ManifestMetadata, error) + + // SearchTags searches for images(repo:tag) given a search string + SearchTags(ctx context.Context, searchText string, filter Filter, requestedPage PageInput) ( + []RepoMetadata, map[string]ManifestMetadata, error) + + PatchDB() error +} + +type ManifestMetadata struct { + ManifestBlob []byte + ConfigBlob []byte + DownloadCount int + Signatures ManifestSignatures +} + +type ManifestData struct { + ManifestBlob []byte + ConfigBlob []byte +} + +// Descriptor represents an image. Multiple images might have the same digests but different tags. +type Descriptor struct { + Digest string + MediaType string +} + +type DescriptorStatistics struct { + DownloadCount int +} + +type ManifestSignatures map[string][]SignatureInfo + +type RepoMetadata struct { + Name string + Tags map[string]Descriptor + + Statistics map[string]DescriptorStatistics + Signatures map[string]ManifestSignatures + Stars int +} + +type LayerInfo struct { + LayerDigest string + LayerContent []byte + SignatureKey string + Signer string +} + +type SignatureInfo struct { + SignatureManifestDigest string + LayersInfo []LayerInfo +} + +type SignatureMetadata struct { + SignatureType string + SignatureDigest string + LayersInfo []LayerInfo +} + +type SortCriteria string + +const ( + Relevance = SortCriteria("RELEVANCE") + UpdateTime = SortCriteria("UPDATE_TIME") + AlphabeticAsc = SortCriteria("ALPHABETIC_ASC") + AlphabeticDsc = SortCriteria("ALPHABETIC_DSC") + Stars = SortCriteria("STARS") + Downloads = SortCriteria("DOWNLOADS") +) + +type PageInput struct { + Limit int + Offset int + SortBy SortCriteria +} + +type Filter struct { + Os []*string + Arch []*string + HasToBeSigned *bool +} + +type FilterData struct { + OsList []string + ArchList []string + IsSigned bool +} diff --git a/pkg/meta/repodb/repodb_test.go b/pkg/meta/repodb/repodb_test.go new file mode 100644 index 00000000..461e69f5 --- /dev/null +++ b/pkg/meta/repodb/repodb_test.go @@ -0,0 +1,1430 @@ +package repodb_test + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "os" + "path" + "strconv" + "strings" + "testing" + "time" + + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + . "github.com/smartystreets/goconvey/convey" + + "zotregistry.io/zot/pkg/meta/repodb" + bolt "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" + "zotregistry.io/zot/pkg/meta/repodb/common" + dynamo "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper" + dynamoParams "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper/params" + localCtx "zotregistry.io/zot/pkg/requestcontext" +) + +const ( + LINUX = "linux" + WINDOWS = "windows" + AMD = "amd" +) + +func TestBoltDBWrapper(t *testing.T) { + Convey("BoltDB Wrapper creation", t, func() { + boltDBParams := bolt.DBParameters{} + searchDB, err := bolt.NewBoltDBWrapper(boltDBParams) + So(searchDB, ShouldNotBeNil) + So(err, ShouldBeNil) + + err = os.Chmod("repo.db", 0o200) + So(err, ShouldBeNil) + + searchDB, err = bolt.NewBoltDBWrapper(boltDBParams) + So(searchDB, ShouldBeNil) + So(err, ShouldNotBeNil) + + err = os.Chmod("repo.db", 0o600) + So(err, ShouldBeNil) + + defer os.Remove("repo.db") + }) + + Convey("BoltDB Wrapper", t, func() { + boltDBParams := bolt.DBParameters{} + boltdbWrapper, err := bolt.NewBoltDBWrapper(boltDBParams) + defer os.Remove("repo.db") + So(boltdbWrapper, ShouldNotBeNil) + So(err, ShouldBeNil) + + RunRepoDBTests(boltdbWrapper) + }) +} + +func TestDynamoDBWrapper(t *testing.T) { + skipIt(t) + + Convey("DynamoDB Wrapper", t, func() { + dynamoDBDriverParams := dynamoParams.DBDriverParameters{ + Endpoint: os.Getenv("DYNAMODBMOCK_ENDPOINT"), + RepoMetaTablename: "RepoMetadataTable", + ManifestDataTablename: "ManifestDataTable", + VersionTablename: "Version", + Region: "us-east-2", + } + + dynamoDriver, err := dynamo.NewDynamoDBWrapper(dynamoDBDriverParams) + So(dynamoDriver, ShouldNotBeNil) + So(err, ShouldBeNil) + + resetDynamoDBTables := func() error { + err := dynamoDriver.ResetRepoMetaTable() + if err != nil { + return err + } + + err = dynamoDriver.ResetManifestDataTable() + + return err + } + + RunRepoDBTests(dynamoDriver, resetDynamoDBTables) + }) +} + +func RunRepoDBTests(repoDB repodb.RepoDB, preparationFuncs ...func() error) { + Convey("Test RepoDB Interface implementation", func() { + for _, prepFunc := range preparationFuncs { + err := prepFunc() + So(err, ShouldBeNil) + } + + Convey("Test SetManifestData and GetManifestData", func() { + configBlob, manifestBlob, err := generateTestImage() + So(err, ShouldBeNil) + + manifestDigest := digest.FromBytes(manifestBlob) + + err = repoDB.SetManifestData(manifestDigest, repodb.ManifestData{ + ManifestBlob: manifestBlob, + ConfigBlob: configBlob, + }) + So(err, ShouldBeNil) + + mm, err := repoDB.GetManifestData(manifestDigest) + So(err, ShouldBeNil) + So(mm.ManifestBlob, ShouldResemble, manifestBlob) + So(mm.ConfigBlob, ShouldResemble, configBlob) + }) + + Convey("Test GetManifestMeta fails", func() { + _, err := repoDB.GetManifestMeta("repo", "bad digest") + So(err, ShouldNotBeNil) + }) + + Convey("Test SetRepoTag", func() { + // test behaviours + var ( + repo1 = "repo1" + repo2 = "repo2" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + + tag2 = "0.0.2" + manifestDigest2 = digest.FromString("fake-manifes2") + ) + + Convey("Setting a good repo", func() { + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Tags[tag1].Digest, ShouldEqual, manifestDigest1) + }) + + Convey("Set multiple tags for repo", func() { + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Tags[tag1].Digest, ShouldEqual, manifestDigest1) + So(repoMeta.Tags[tag2].Digest, ShouldEqual, manifestDigest2) + }) + + Convey("Set multiple repos", func() { + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo2, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + repoMeta1, err := repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + repoMeta2, err := repoDB.GetRepoMeta(repo2) + So(err, ShouldBeNil) + + So(repoMeta1.Tags[tag1].Digest, ShouldResemble, manifestDigest1.String()) + So(repoMeta2.Tags[tag2].Digest, ShouldResemble, manifestDigest2.String()) + }) + + Convey("Setting a repo with invalid fields", func() { + Convey("Repo name is not valid", func() { + err := repoDB.SetRepoTag("", tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldNotBeNil) + }) + + Convey("Tag is not valid", func() { + err := repoDB.SetRepoTag(repo1, "", manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldNotBeNil) + }) + + Convey("Manifest Digest is not valid", func() { + err := repoDB.SetRepoTag(repo1, tag1, "", ispec.MediaTypeImageManifest) + So(err, ShouldNotBeNil) + }) + }) + }) + + Convey("Test GetRepoMeta", func() { + var ( + repo1 = "repo1" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + + repo2 = "repo2" + tag2 = "0.0.2" + manifestDigest2 = digest.FromString("fake-manifest2") + + InexistentRepo = "InexistentRepo" + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetRepoTag(repo2, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + Convey("Get a existent repo", func() { + repoMeta1, err := repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta1.Tags[tag1].Digest, ShouldResemble, manifestDigest1.String()) + + repoMeta2, err := repoDB.GetRepoMeta(repo2) + So(err, ShouldBeNil) + So(repoMeta2.Tags[tag2].Digest, ShouldResemble, manifestDigest2.String()) + }) + + Convey("Get a repo that doesn't exist", func() { + repoMeta, err := repoDB.GetRepoMeta(InexistentRepo) + So(err, ShouldNotBeNil) + So(repoMeta, ShouldBeZeroValue) + }) + }) + + Convey("Test DeleteRepoTag", func() { + var ( + repo = "repo1" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + tag2 = "0.0.2" + manifestDigest2 = digest.FromString("fake-manifest2") + ) + + err := repoDB.SetRepoTag(repo, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetRepoTag(repo, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + Convey("Delete from repo a tag", func() { + _, err := repoDB.GetRepoMeta(repo) + So(err, ShouldBeNil) + + err = repoDB.DeleteRepoTag(repo, tag1) + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo) + So(err, ShouldBeNil) + + _, ok := repoMeta.Tags[tag1] + So(ok, ShouldBeFalse) + So(repoMeta.Tags[tag2].Digest, ShouldResemble, manifestDigest2.String()) + }) + + Convey("Delete all tags from repo", func() { + err := repoDB.DeleteRepoTag(repo, tag1) + So(err, ShouldBeNil) + err = repoDB.DeleteRepoTag(repo, tag2) + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo) + So(err, ShouldNotBeNil) + So(repoMeta, ShouldBeZeroValue) + }) + + Convey("Delete inexistent tag from repo", func() { + err := repoDB.DeleteRepoTag(repo, "InexistentTag") + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo) + So(err, ShouldBeNil) + + So(repoMeta.Tags[tag1].Digest, ShouldResemble, manifestDigest1.String()) + So(repoMeta.Tags[tag2].Digest, ShouldResemble, manifestDigest2.String()) + }) + + Convey("Delete from inexistent repo", func() { + err := repoDB.DeleteRepoTag("InexistentRepo", "InexistentTag") + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo) + So(err, ShouldBeNil) + + So(repoMeta.Tags[tag1].Digest, ShouldResemble, manifestDigest1.String()) + So(repoMeta.Tags[tag2].Digest, ShouldResemble, manifestDigest2.String()) + }) + }) + + Convey("Test GetMultipleRepoMeta", func() { + var ( + repo1 = "repo1" + repo2 = "repo2" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + tag2 = "0.0.2" + manifestDigest2 = digest.FromString("fake-manifest2") + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetRepoTag(repo1, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetRepoTag(repo2, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + Convey("Get all Repometa", func() { + repoMetaSlice, err := repoDB.GetMultipleRepoMeta(context.TODO(), func(repoMeta repodb.RepoMetadata) bool { + return true + }, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repoMetaSlice), ShouldEqual, 2) + }) + + Convey("Get repo with a tag", func() { + repoMetaSlice, err := repoDB.GetMultipleRepoMeta(context.TODO(), func(repoMeta repodb.RepoMetadata) bool { + for tag := range repoMeta.Tags { + if tag == tag1 { + return true + } + } + + return false + }, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repoMetaSlice), ShouldEqual, 1) + So(repoMetaSlice[0].Tags[tag1].Digest == manifestDigest1.String(), ShouldBeTrue) + }) + + Convey("Wrong page input", func() { + repoMetaSlice, err := repoDB.GetMultipleRepoMeta(context.TODO(), func(repoMeta repodb.RepoMetadata) bool { + for tag := range repoMeta.Tags { + if tag == tag1 { + return true + } + } + + return false + }, repodb.PageInput{Limit: -1, Offset: -1}) + + So(err, ShouldNotBeNil) + So(len(repoMetaSlice), ShouldEqual, 0) + }) + }) + + Convey("Test IncrementRepoStars", func() { + var ( + repo1 = "repo1" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.IncrementRepoStars(repo1) + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Stars, ShouldEqual, 1) + + err = repoDB.IncrementRepoStars(repo1) + So(err, ShouldBeNil) + + repoMeta, err = repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Stars, ShouldEqual, 2) + + err = repoDB.IncrementRepoStars(repo1) + So(err, ShouldBeNil) + + repoMeta, err = repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Stars, ShouldEqual, 3) + }) + + Convey("Test DecrementRepoStars", func() { + var ( + repo1 = "repo1" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.IncrementRepoStars(repo1) + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Stars, ShouldEqual, 1) + + err = repoDB.DecrementRepoStars(repo1) + So(err, ShouldBeNil) + + repoMeta, err = repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Stars, ShouldEqual, 0) + + err = repoDB.DecrementRepoStars(repo1) + So(err, ShouldBeNil) + + repoMeta, err = repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Stars, ShouldEqual, 0) + + repoMeta, err = repoDB.GetRepoMeta("badRepo") + So(err, ShouldNotBeNil) + }) + + Convey("Test GetRepoStars", func() { + var ( + repo1 = "repo1" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.IncrementRepoStars(repo1) + So(err, ShouldBeNil) + + stars, err := repoDB.GetRepoStars(repo1) + So(err, ShouldBeNil) + So(stars, ShouldEqual, 1) + + err = repoDB.IncrementRepoStars(repo1) + So(err, ShouldBeNil) + err = repoDB.IncrementRepoStars(repo1) + So(err, ShouldBeNil) + + stars, err = repoDB.GetRepoStars(repo1) + So(err, ShouldBeNil) + So(stars, ShouldEqual, 3) + + _, err = repoDB.GetRepoStars("badRepo") + So(err, ShouldNotBeNil) + }) + + Convey("Test IncrementImageDownloads", func() { + var ( + repo1 = "repo1" + tag1 = "0.0.1" + ) + + configBlob, manifestBlob, err := generateTestImage() + So(err, ShouldBeNil) + + manifestDigest := digest.FromBytes(manifestBlob) + + err = repoDB.SetRepoTag(repo1, tag1, manifestDigest, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest, repodb.ManifestMetadata{ + ManifestBlob: manifestBlob, + ConfigBlob: configBlob, + }) + So(err, ShouldBeNil) + + err = repoDB.IncrementImageDownloads(repo1, tag1) + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + + So(repoMeta.Statistics[manifestDigest.String()].DownloadCount, ShouldEqual, 1) + + err = repoDB.IncrementImageDownloads(repo1, tag1) + So(err, ShouldBeNil) + + repoMeta, err = repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + + So(repoMeta.Statistics[manifestDigest.String()].DownloadCount, ShouldEqual, 2) + + _, err = repoDB.GetManifestMeta(repo1, "badManiestDigest") + So(err, ShouldNotBeNil) + }) + + Convey("Test AddImageSignature", func() { + var ( + repo1 = "repo1" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, repodb.ManifestMetadata{}) + So(err, ShouldBeNil) + + err = repoDB.AddManifestSignature(repo1, manifestDigest1, repodb.SignatureMetadata{ + SignatureType: "cosign", + SignatureDigest: "digest", + }) + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Signatures[manifestDigest1.String()]["cosign"][0].SignatureManifestDigest, + ShouldResemble, "digest") + + _, err = repoDB.GetManifestMeta(repo1, "badDigest") + So(err, ShouldNotBeNil) + }) + + Convey("Test DeleteSignature", func() { + var ( + repo1 = "repo1" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, repodb.ManifestMetadata{}) + So(err, ShouldBeNil) + + err = repoDB.AddManifestSignature(repo1, manifestDigest1, repodb.SignatureMetadata{ + SignatureType: "cosign", + SignatureDigest: "digest", + }) + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Signatures[manifestDigest1.String()]["cosign"][0].SignatureManifestDigest, + ShouldResemble, "digest") + + err = repoDB.DeleteSignature(repo1, manifestDigest1, repodb.SignatureMetadata{ + SignatureType: "cosign", + SignatureDigest: "digest", + }) + So(err, ShouldBeNil) + + repoMeta, err = repoDB.GetRepoMeta(repo1) + So(err, ShouldBeNil) + So(repoMeta.Signatures[manifestDigest1.String()]["cosign"], ShouldBeEmpty) + + err = repoDB.DeleteSignature(repo1, "badDigest", repodb.SignatureMetadata{ + SignatureType: "cosign", + SignatureDigest: "digest", + }) + So(err, ShouldNotBeNil) + }) + + Convey("Test SearchRepos", func() { + var ( + repo1 = "repo1" + repo2 = "repo2" + repo3 = "repo3" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + tag2 = "0.0.2" + manifestDigest2 = digest.FromString("fake-manifest2") + tag3 = "0.0.3" + manifestDigest3 = digest.FromString("fake-manifest3") + ctx = context.Background() + emptyManifest ispec.Manifest + emptyConfig ispec.Manifest + ) + emptyManifestBlob, err := json.Marshal(emptyManifest) + So(err, ShouldBeNil) + + emptyConfigBlob, err := json.Marshal(emptyConfig) + So(err, ShouldBeNil) + + emptyRepoMeta := repodb.ManifestMetadata{ + ManifestBlob: emptyManifestBlob, + ConfigBlob: emptyConfigBlob, + } + + Convey("Search all repos", func() { + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo2, tag3, manifestDigest3, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo1, manifestDigest2, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo1, manifestDigest3, emptyRepoMeta) + So(err, ShouldBeNil) + + repos, manifesMetaMap, err := repoDB.SearchRepos(ctx, "", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 2) + So(len(manifesMetaMap), ShouldEqual, 3) + So(manifesMetaMap, ShouldContainKey, manifestDigest1.String()) + So(manifesMetaMap, ShouldContainKey, manifestDigest2.String()) + So(manifesMetaMap, ShouldContainKey, manifestDigest3.String()) + }) + + Convey("Search a repo by name", func() { + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + + repos, manifesMetaMap, err := repoDB.SearchRepos(ctx, repo1, repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(len(manifesMetaMap), ShouldEqual, 1) + So(manifesMetaMap, ShouldContainKey, manifestDigest1.String()) + }) + + Convey("Search non-existing repo by name", func() { + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetRepoTag(repo1, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + repos, manifesMetaMap, err := repoDB.SearchRepos(ctx, "RepoThatDoesntExist", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 0) + So(len(manifesMetaMap), ShouldEqual, 0) + }) + + Convey("Search with partial match", func() { + err := repoDB.SetRepoTag("alpine", tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("pine", tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag("golang", tag3, manifestDigest3, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta("alpine", manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta("pine", manifestDigest2, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta("golang", manifestDigest3, emptyRepoMeta) + So(err, ShouldBeNil) + + repos, manifesMetaMap, err := repoDB.SearchRepos(ctx, "pine", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 2) + So(manifesMetaMap, ShouldContainKey, manifestDigest1.String()) + So(manifesMetaMap, ShouldContainKey, manifestDigest2.String()) + So(manifesMetaMap, ShouldNotContainKey, manifestDigest3.String()) + }) + + Convey("Search multiple repos that share manifests", func() { + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo2, tag2, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo3, tag3, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo2, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo3, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + + repos, manifesMetaMap, err := repoDB.SearchRepos(ctx, "", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 3) + So(len(manifesMetaMap), ShouldEqual, 1) + }) + + Convey("Search repos with access control", func() { + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo2, tag2, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo3, tag3, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo2, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo3, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + + acCtx := localCtx.AccessControlContext{ + ReadGlobPatterns: map[string]bool{ + repo1: true, + repo2: true, + }, + Username: "username", + } + authzCtxKey := localCtx.GetContextKey() + ctx := context.WithValue(context.Background(), authzCtxKey, acCtx) + + repos, _, err := repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 2) + for _, k := range repos { + So(k.Name, ShouldBeIn, []string{repo1, repo2}) + } + }) + + Convey("Search paginated repos", func() { + reposCount := 50 + repoNameBuilder := strings.Builder{} + + for _, i := range rand.Perm(reposCount) { + manifestDigest := digest.FromString("fakeManifest" + strconv.Itoa(i)) + timeString := fmt.Sprintf("1%02d0-01-01 04:35", i) + createdTime, err := time.Parse("2006-01-02 15:04", timeString) + So(err, ShouldBeNil) + + configContent := ispec.Image{ + History: []ispec.History{ + { + Created: &createdTime, + }, + }, + } + + configBlob, err := json.Marshal(configContent) + So(err, ShouldBeNil) + + manifestMeta := repodb.ManifestMetadata{ + ManifestBlob: emptyManifestBlob, + ConfigBlob: configBlob, + DownloadCount: i, + } + repoName := "repo" + strconv.Itoa(i) + + err = repoDB.SetRepoTag(repoName, tag1, manifestDigest, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repoName, manifestDigest, manifestMeta) + So(err, ShouldBeNil) + + repoNameBuilder.Reset() + } + + repos, _, err := repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, reposCount) + + repos, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 20, + SortBy: repodb.AlphabeticAsc, + }) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 20) + + repos, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 0, + SortBy: repodb.AlphabeticAsc, + }) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(repos[0].Name, ShouldResemble, "repo0") + + repos, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 1, + SortBy: repodb.AlphabeticAsc, + }) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(repos[0].Name, ShouldResemble, "repo1") + + repos, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 49, + SortBy: repodb.AlphabeticAsc, + }) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(repos[0].Name, ShouldResemble, "repo9") + + repos, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 49, + SortBy: repodb.AlphabeticDsc, + }) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(repos[0].Name, ShouldResemble, "repo0") + + repos, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 0, + SortBy: repodb.AlphabeticDsc, + }) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(repos[0].Name, ShouldResemble, "repo9") + + // sort by downloads + repos, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 0, + SortBy: repodb.Downloads, + }) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(repos[0].Name, ShouldResemble, "repo49") + + // sort by last update + repos, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 0, + SortBy: repodb.UpdateTime, + }) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(repos[0].Name, ShouldResemble, "repo49") + + repos, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 100, + SortBy: repodb.UpdateTime, + }) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 0) + So(repos, ShouldBeEmpty) + }) + + Convey("Search with wrong pagination input", func() { + _, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 100, + SortBy: repodb.UpdateTime, + }) + So(err, ShouldBeNil) + + _, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: -1, + Offset: 100, + SortBy: repodb.UpdateTime, + }) + So(err, ShouldNotBeNil) + + _, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: -1, + SortBy: repodb.UpdateTime, + }) + So(err, ShouldNotBeNil) + + _, _, err = repoDB.SearchRepos(ctx, "repo", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 1, + SortBy: repodb.SortCriteria("InvalidSortingCriteria"), + }) + So(err, ShouldNotBeNil) + }) + }) + + Convey("Test SearchTags", func() { + var ( + repo1 = "repo1" + repo2 = "repo2" + manifestDigest1 = digest.FromString("fake-manifest1") + manifestDigest2 = digest.FromString("fake-manifest2") + manifestDigest3 = digest.FromString("fake-manifest3") + ctx = context.Background() + emptyManifest ispec.Manifest + emptyConfig ispec.Manifest + ) + + emptyManifestBlob, err := json.Marshal(emptyManifest) + So(err, ShouldBeNil) + + emptyConfigBlob, err := json.Marshal(emptyConfig) + So(err, ShouldBeNil) + + emptyRepoMeta := repodb.ManifestMetadata{ + ManifestBlob: emptyManifestBlob, + ConfigBlob: emptyConfigBlob, + } + + err = repoDB.SetRepoTag(repo1, "0.0.1", manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, "0.0.2", manifestDigest3, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, "0.1.0", manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, "1.0.0", manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, "1.0.1", manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo2, "0.0.1", manifestDigest3, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo1, manifestDigest2, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo1, manifestDigest3, emptyRepoMeta) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo2, manifestDigest3, emptyRepoMeta) + So(err, ShouldBeNil) + + Convey("With exact match", func() { + repos, manifesMetaMap, err := repoDB.SearchTags(ctx, "repo1:0.0.1", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(len(repos[0].Tags), ShouldEqual, 1) + So(repos[0].Tags, ShouldContainKey, "0.0.1") + So(manifesMetaMap, ShouldContainKey, manifestDigest1.String()) + }) + + Convey("With partial repo path", func() { + repos, manifesMetaMap, err := repoDB.SearchTags(ctx, "repo:0.0.1", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 0) + So(len(manifesMetaMap), ShouldEqual, 0) + }) + + Convey("With partial tag", func() { + repos, manifesMetaMap, err := repoDB.SearchTags(ctx, "repo1:0.0", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(len(repos[0].Tags), ShouldEqual, 2) + So(repos[0].Tags, ShouldContainKey, "0.0.2") + So(repos[0].Tags, ShouldContainKey, "0.0.1") + So(manifesMetaMap, ShouldContainKey, manifestDigest1.String()) + So(manifesMetaMap, ShouldContainKey, manifestDigest3.String()) + + repos, manifesMetaMap, err = repoDB.SearchTags(ctx, "repo1:0.", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(len(repos[0].Tags), ShouldEqual, 3) + So(repos[0].Tags, ShouldContainKey, "0.0.1") + So(repos[0].Tags, ShouldContainKey, "0.0.2") + So(repos[0].Tags, ShouldContainKey, "0.1.0") + So(manifesMetaMap, ShouldContainKey, manifestDigest1.String()) + So(manifesMetaMap, ShouldContainKey, manifestDigest2.String()) + So(manifesMetaMap, ShouldContainKey, manifestDigest3.String()) + }) + + Convey("With bad query", func() { + repos, manifesMetaMap, err := repoDB.SearchTags(ctx, "repo:0.0.1:test", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldNotBeNil) + So(len(repos), ShouldEqual, 0) + So(len(manifesMetaMap), ShouldEqual, 0) + }) + + Convey("Search with access control", func() { + var ( + repo1 = "repo1" + repo2 = "repo2" + repo3 = "repo3" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + tag2 = "0.0.2" + tag3 = "0.0.3" + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo2, tag2, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo3, tag3, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + config := ispec.Image{} + configBlob, err := json.Marshal(config) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, repodb.ManifestMetadata{ConfigBlob: configBlob}) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo2, manifestDigest1, repodb.ManifestMetadata{ConfigBlob: configBlob}) + So(err, ShouldBeNil) + err = repoDB.SetManifestMeta(repo3, manifestDigest1, repodb.ManifestMetadata{ConfigBlob: configBlob}) + So(err, ShouldBeNil) + + acCtx := localCtx.AccessControlContext{ + ReadGlobPatterns: map[string]bool{ + repo1: true, + repo2: false, + }, + Username: "username", + } + authzCtxKey := localCtx.GetContextKey() + ctx := context.WithValue(context.Background(), authzCtxKey, acCtx) + + repos, _, err := repoDB.SearchTags(ctx, "repo1:", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(repos[0].Name, ShouldResemble, repo1) + + repos, _, err = repoDB.SearchTags(ctx, "repo2:", repodb.Filter{}, repodb.PageInput{}) + So(err, ShouldBeNil) + So(repos, ShouldBeEmpty) + }) + + Convey("With wrong pagination input", func() { + repos, _, err := repoDB.SearchTags(ctx, "repo2:", repodb.Filter{}, repodb.PageInput{ + Limit: -1, + }) + So(err, ShouldNotBeNil) + So(repos, ShouldBeEmpty) + }) + }) + + Convey("Paginated tag search", func() { + var ( + repo1 = "repo1" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + tag2 = "0.0.2" + tag3 = "0.0.3" + tag4 = "0.0.4" + tag5 = "0.0.5" + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, tag2, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, tag3, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, tag4, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, tag5, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + config := ispec.Image{} + configBlob, err := json.Marshal(config) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, repodb.ManifestMetadata{ConfigBlob: configBlob}) + So(err, ShouldBeNil) + + repos, _, err := repoDB.SearchTags(context.TODO(), "repo1:", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 0, + SortBy: repodb.AlphabeticAsc, + }) + + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + keys := make([]string, 0, len(repos[0].Tags)) + for k := range repos[0].Tags { + keys = append(keys, k) + } + + repos, _, err = repoDB.SearchTags(context.TODO(), "repo1:", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 1, + SortBy: repodb.AlphabeticAsc, + }) + + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + for k := range repos[0].Tags { + keys = append(keys, k) + } + + repos, _, err = repoDB.SearchTags(context.TODO(), "repo1:", repodb.Filter{}, repodb.PageInput{ + Limit: 1, + Offset: 2, + SortBy: repodb.AlphabeticAsc, + }) + + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + for k := range repos[0].Tags { + keys = append(keys, k) + } + + So(keys, ShouldContain, tag1) + So(keys, ShouldContain, tag2) + So(keys, ShouldContain, tag3) + }) + + Convey("Test repo search with filtering", func() { + var ( + repo1 = "repo1" + repo2 = "repo2" + repo3 = "repo3" + repo4 = "repo4" + tag1 = "0.0.1" + tag2 = "0.0.2" + manifestDigest1 = digest.FromString("fake-manifest1") + manifestDigest2 = digest.FromString("fake-manifest2") + manifestDigest3 = digest.FromString("fake-manifest3") + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo2, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo3, tag1, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo4, tag1, manifestDigest3, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + config1 := ispec.Image{ + Platform: ispec.Platform{ + Architecture: AMD, + OS: LINUX, + }, + } + configBlob1, err := json.Marshal(config1) + So(err, ShouldBeNil) + + config2 := ispec.Image{ + Platform: ispec.Platform{ + Architecture: "arch", + OS: WINDOWS, + }, + } + configBlob2, err := json.Marshal(config2) + So(err, ShouldBeNil) + + config3 := ispec.Image{} + configBlob3, err := json.Marshal(config3) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, repodb.ManifestMetadata{ConfigBlob: configBlob1}) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest2, repodb.ManifestMetadata{ConfigBlob: configBlob2}) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo2, manifestDigest1, repodb.ManifestMetadata{ConfigBlob: configBlob1}) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo3, manifestDigest2, repodb.ManifestMetadata{ConfigBlob: configBlob2}) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo4, manifestDigest3, repodb.ManifestMetadata{ConfigBlob: configBlob3}) + So(err, ShouldBeNil) + + opSys := LINUX + arch := "" + filter := repodb.Filter{ + Os: []*string{&opSys}, + } + + repos, _, err := repoDB.SearchRepos(context.TODO(), "", filter, repodb.PageInput{SortBy: repodb.AlphabeticAsc}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 2) + So(repos[0].Name, ShouldResemble, "repo1") + So(repos[1].Name, ShouldResemble, "repo2") + + opSys = WINDOWS + filter = repodb.Filter{ + Os: []*string{&opSys}, + } + repos, _, err = repoDB.SearchRepos(context.TODO(), "repo", filter, repodb.PageInput{SortBy: repodb.AlphabeticAsc}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 2) + So(repos[0].Name, ShouldResemble, "repo1") + So(repos[1].Name, ShouldResemble, "repo3") + + opSys = "wrong" + filter = repodb.Filter{ + Os: []*string{&opSys}, + } + repos, _, err = repoDB.SearchRepos(context.TODO(), "repo", filter, repodb.PageInput{SortBy: repodb.AlphabeticAsc}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 0) + + opSys = LINUX + arch = AMD + filter = repodb.Filter{ + Os: []*string{&opSys}, + Arch: []*string{&arch}, + } + repos, _, err = repoDB.SearchRepos(context.TODO(), "repo", filter, repodb.PageInput{SortBy: repodb.AlphabeticAsc}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 2) + So(repos[0].Name, ShouldResemble, "repo1") + So(repos[1].Name, ShouldResemble, "repo2") + + opSys = WINDOWS + arch = AMD + filter = repodb.Filter{ + Os: []*string{&opSys}, + Arch: []*string{&arch}, + } + repos, _, err = repoDB.SearchRepos(context.TODO(), "repo", filter, repodb.PageInput{SortBy: repodb.AlphabeticAsc}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + }) + + Convey("Test tags search with filtering", func() { + var ( + repo1 = "repo1" + repo2 = "repo2" + repo3 = "repo3" + repo4 = "repo4" + tag1 = "0.0.1" + tag2 = "0.0.2" + manifestDigest1 = digest.FromString("fake-manifest1") + manifestDigest2 = digest.FromString("fake-manifest2") + manifestDigest3 = digest.FromString("fake-manifest3") + ) + + err := repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo2, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo3, tag1, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo4, tag1, manifestDigest3, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + config1 := ispec.Image{ + Platform: ispec.Platform{ + Architecture: AMD, + OS: LINUX, + }, + } + configBlob1, err := json.Marshal(config1) + So(err, ShouldBeNil) + + config2 := ispec.Image{ + Platform: ispec.Platform{ + Architecture: "arch", + OS: WINDOWS, + }, + } + configBlob2, err := json.Marshal(config2) + So(err, ShouldBeNil) + + config3 := ispec.Image{} + configBlob3, err := json.Marshal(config3) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, repodb.ManifestMetadata{ConfigBlob: configBlob1}) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest2, repodb.ManifestMetadata{ConfigBlob: configBlob2}) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo2, manifestDigest1, repodb.ManifestMetadata{ConfigBlob: configBlob1}) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo3, manifestDigest2, repodb.ManifestMetadata{ConfigBlob: configBlob2}) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo4, manifestDigest3, repodb.ManifestMetadata{ConfigBlob: configBlob3}) + So(err, ShouldBeNil) + + opSys := LINUX + arch := AMD + filter := repodb.Filter{ + Os: []*string{&opSys}, + Arch: []*string{&arch}, + } + repos, _, err := repoDB.SearchTags(context.TODO(), "repo1:", filter, repodb.PageInput{SortBy: repodb.AlphabeticAsc}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 1) + So(repos[0].Tags, ShouldContainKey, tag1) + + opSys = LINUX + arch = "badArch" + filter = repodb.Filter{ + Os: []*string{&opSys}, + Arch: []*string{&arch}, + } + repos, _, err = repoDB.SearchTags(context.TODO(), "repo1:", filter, repodb.PageInput{SortBy: repodb.AlphabeticAsc}) + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 0) + }) + }) +} + +func TestRelevanceSorting(t *testing.T) { + Convey("Test Relevance Sorting", t, func() { + So(common.ScoreRepoName("alpine", "alpine"), ShouldEqual, 1) + So(common.ScoreRepoName("test/alpine", "alpine"), ShouldEqual, -1) + So(common.ScoreRepoName("alpine", "test/alpine"), ShouldEqual, 1) + So(common.ScoreRepoName("test", "test/alpine"), ShouldEqual, 10) + So(common.ScoreRepoName("pine", "test/alpine"), ShouldEqual, 3) + So(common.ScoreRepoName("pine", "alpine/alpine"), ShouldEqual, 3) + So(common.ScoreRepoName("pine", "alpine/test"), ShouldEqual, 30) + So(common.ScoreRepoName("test/pine", "alpine"), ShouldEqual, -1) + So(common.ScoreRepoName("repo/test", "repo/test/alpine"), ShouldEqual, 1) + So(common.ScoreRepoName("repo/test/golang", "repo/test2/alpine"), ShouldEqual, -1) + So(common.ScoreRepoName("repo/test/pine", "repo/test/alpine"), ShouldEqual, 3) + + Convey("Integration", func() { + filePath := path.Join(t.TempDir(), "repo.db") + boltDBParams := bolt.DBParameters{ + RootDir: t.TempDir(), + } + + repoDB, err := bolt.NewBoltDBWrapper(boltDBParams) + So(repoDB, ShouldNotBeNil) + So(err, ShouldBeNil) + + defer os.Remove(filePath) + + var ( + repo1 = "alpine" + repo2 = "alpine/test" + repo3 = "notalpine" + repo4 = "unmached/repo" + tag1 = "0.0.1" + manifestDigest1 = digest.FromString("fake-manifest1") + tag2 = "0.0.2" + manifestDigest2 = digest.FromString("fake-manifest2") + tag3 = "0.0.3" + manifestDigest3 = digest.FromString("fake-manifest3") + ctx = context.Background() + emptyManifest ispec.Manifest + emptyConfig ispec.Manifest + ) + emptyManifestBlob, err := json.Marshal(emptyManifest) + So(err, ShouldBeNil) + + emptyConfigBlob, err := json.Marshal(emptyConfig) + So(err, ShouldBeNil) + + emptyRepoMeta := repodb.ManifestMetadata{ + ManifestBlob: emptyManifestBlob, + ConfigBlob: emptyConfigBlob, + } + + err = repoDB.SetRepoTag(repo1, tag1, manifestDigest1, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo1, tag2, manifestDigest2, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo2, tag3, manifestDigest3, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo3, tag3, manifestDigest3, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + err = repoDB.SetRepoTag(repo4, tag1, manifestDigest3, ispec.MediaTypeImageManifest) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo1, manifestDigest2, emptyRepoMeta) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo2, manifestDigest1, emptyRepoMeta) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo3, manifestDigest2, emptyRepoMeta) + So(err, ShouldBeNil) + + err = repoDB.SetManifestMeta(repo4, manifestDigest3, emptyRepoMeta) + So(err, ShouldBeNil) + + repos, _, err := repoDB.SearchRepos(ctx, "pine", repodb.Filter{}, + repodb.PageInput{SortBy: repodb.Relevance}, + ) + + So(err, ShouldBeNil) + So(len(repos), ShouldEqual, 3) + So(repos[0].Name, ShouldEqual, repo1) + So(repos[1].Name, ShouldEqual, repo3) + So(repos[2].Name, ShouldEqual, repo2) + }) + }) +} + +func generateTestImage() ([]byte, []byte, error) { + config := ispec.Image{ + Platform: ispec.Platform{ + Architecture: "amd64", + OS: LINUX, + }, + RootFS: ispec.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{}, + }, + Author: "ZotUser", + } + + configBlob, err := json.Marshal(config) + if err != nil { + return []byte{}, []byte{}, err + } + + configDigest := digest.FromBytes(configBlob) + + layers := [][]byte{ + make([]byte, 100), + } + + // init layers with random values + for i := range layers { + //nolint:gosec + _, err := rand.Read(layers[i]) + if err != nil { + return []byte{}, []byte{}, err + } + } + + manifest := ispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ispec.Descriptor{ + MediaType: "application/vnd.oci.image.config.v1+json", + Digest: configDigest, + Size: int64(len(configBlob)), + }, + Layers: []ispec.Descriptor{ + { + MediaType: "application/vnd.oci.image.layer.v1.tar", + Digest: digest.FromBytes(layers[0]), + Size: int64(len(layers[0])), + }, + }, + } + + manifestBlob, err := json.Marshal(manifest) + if err != nil { + return []byte{}, []byte{}, err + } + + return configBlob, manifestBlob, nil +} diff --git a/pkg/meta/repodb/repodbfactory/repodb_factory.go b/pkg/meta/repodb/repodbfactory/repodb_factory.go new file mode 100644 index 00000000..747e2870 --- /dev/null +++ b/pkg/meta/repodb/repodbfactory/repodb_factory.go @@ -0,0 +1,36 @@ +package repodbfactory + +import ( + "zotregistry.io/zot/errors" + "zotregistry.io/zot/pkg/meta/repodb" + boltdb_wrapper "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" + dynamodb_wrapper "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper" + dynamoParams "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper/params" +) + +func Create(dbtype string, parameters interface{}) (repodb.RepoDB, error) { //nolint:contextcheck + switch dbtype { + case "boltdb": + { + properParameters, ok := parameters.(boltdb_wrapper.DBParameters) + if !ok { + panic("failed type assertion") + } + + return boltdb_wrapper.NewBoltDBWrapper(properParameters) + } + case "dynamodb": + { + properParameters, ok := parameters.(dynamoParams.DBDriverParameters) + if !ok { + panic("failed type assertion") + } + + return dynamodb_wrapper.NewDynamoDBWrapper(properParameters) + } + default: + { + return nil, errors.ErrBadConfig + } + } +} diff --git a/pkg/meta/repodb/repodbfactory/repodb_factory_test.go b/pkg/meta/repodb/repodbfactory/repodb_factory_test.go new file mode 100644 index 00000000..e270527d --- /dev/null +++ b/pkg/meta/repodb/repodbfactory/repodb_factory_test.go @@ -0,0 +1,62 @@ +package repodbfactory_test + +import ( + "os" + "testing" + + . "github.com/smartystreets/goconvey/convey" + + bolt "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" + dynamoParams "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper/params" + "zotregistry.io/zot/pkg/meta/repodb/repodbfactory" +) + +func TestCreateDynamo(t *testing.T) { + skipDynamo(t) + + Convey("Create", t, func() { + dynamoDBDriverParams := dynamoParams.DBDriverParameters{ + Endpoint: os.Getenv("DYNAMODBMOCK_ENDPOINT"), + RepoMetaTablename: "RepoMetadataTable", + ManifestDataTablename: "ManifestDataTable", + VersionTablename: "Version", + Region: "us-east-2", + } + + repoDB, err := repodbfactory.Create("dynamodb", dynamoDBDriverParams) + So(repoDB, ShouldNotBeNil) + So(err, ShouldBeNil) + }) + + Convey("Fails", t, func() { + So(func() { _, _ = repodbfactory.Create("dynamodb", bolt.DBParameters{RootDir: "root"}) }, ShouldPanic) + + repoDB, err := repodbfactory.Create("random", bolt.DBParameters{RootDir: "root"}) + So(repoDB, ShouldBeNil) + So(err, ShouldNotBeNil) + }) +} + +func TestCreateBoltDB(t *testing.T) { + Convey("Create", t, func() { + rootDir := t.TempDir() + + repoDB, err := repodbfactory.Create("boltdb", bolt.DBParameters{ + RootDir: rootDir, + }) + So(repoDB, ShouldNotBeNil) + So(err, ShouldBeNil) + }) + + Convey("fails", t, func() { + So(func() { _, _ = repodbfactory.Create("boltdb", dynamoParams.DBDriverParameters{}) }, ShouldPanic) + }) +} + +func skipDynamo(t *testing.T) { + t.Helper() + + if os.Getenv("DYNAMODBMOCK_ENDPOINT") == "" { + t.Skip("Skipping testing without AWS DynamoDB mock server") + } +} diff --git a/pkg/meta/repodb/sync_repodb.go b/pkg/meta/repodb/sync_repodb.go new file mode 100644 index 00000000..e02f4576 --- /dev/null +++ b/pkg/meta/repodb/sync_repodb.go @@ -0,0 +1,273 @@ +package repodb + +import ( + "encoding/json" + "errors" + + godigest "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + + zerr "zotregistry.io/zot/errors" + "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/storage" +) + +// SyncRepoDB will sync all repos found in the rootdirectory of the oci layout that zot was deployed on. +func SyncRepoDB(repoDB RepoDB, storeController storage.StoreController, log log.Logger) error { + allRepos, err := getAllRepos(storeController) + if err != nil { + rootDir := storeController.DefaultStore.RootDir() + log.Error().Err(err).Msgf("sync-repodb: failed to get all repo names present under %s", rootDir) + + return err + } + + for _, repo := range allRepos { + err := SyncRepo(repo, repoDB, storeController, log) + if err != nil { + log.Error().Err(err).Msgf("sync-repodb: failed to sync repo %s", repo) + + return err + } + } + + return nil +} + +// SyncRepo reads the contents of a repo and syncs all images signatures found. +func SyncRepo(repo string, repoDB RepoDB, storeController storage.StoreController, log log.Logger) error { + imageStore := storeController.GetImageStore(repo) + + indexBlob, err := imageStore.GetIndexContent(repo) + if err != nil { + log.Error().Err(err).Msgf("sync-repo: failed to read index.json for repo %s", repo) + + return err + } + + var indexContent ispec.Index + + err = json.Unmarshal(indexBlob, &indexContent) + if err != nil { + log.Error().Err(err).Msgf("sync-repo: failed to unmarshal index.json for repo %s", repo) + + return err + } + + err = resetRepoMetaTags(repo, repoDB, log) + if err != nil && !errors.Is(err, zerr.ErrRepoMetaNotFound) { + log.Error().Err(err).Msgf("sync-repo: failed to reset tag field in RepoMetadata for repo %s", repo) + + return err + } + + type foundSignatureData struct { + repo string + tag string + signatureType string + signedManifestDigest string + signatureDigest string + } + + var signaturesFound []foundSignatureData + + for _, manifest := range indexContent.Manifests { + tag, hasTag := manifest.Annotations[ispec.AnnotationRefName] + + if !hasTag { + log.Warn().Msgf("sync-repo: image without tag found, will not be synced into RepoDB") + + continue + } + + manifestMetaIsPresent, err := isManifestMetaPresent(repo, manifest, repoDB) + if err != nil { + log.Error().Err(err).Msgf("sync-repo: error checking manifestMeta in RepoDB") + + return err + } + + if manifestMetaIsPresent { + err = repoDB.SetRepoTag(repo, tag, manifest.Digest, manifest.MediaType) + if err != nil { + log.Error().Err(err).Msgf("sync-repo: failed to set repo tag for %s:%s", repo, tag) + + return err + } + + continue + } + + manifestBlob, digest, _, err := imageStore.GetImageManifest(repo, manifest.Digest.String()) + if err != nil { + log.Error().Err(err).Msgf("sync-repo: failed to set repo tag for %s:%s", repo, tag) + + return err + } + + isSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, + manifestBlob, tag, storeController) + if err != nil { + if errors.Is(err, zerr.ErrOrphanSignature) { + continue + } else { + log.Error().Err(err).Msgf("sync-repo: failed checking if image is signature for %s:%s", repo, tag) + + return err + } + } + + if isSignature { + // We'll ignore signatures now because the order in which the signed image and signature are added into + // the DB matters. First we add the normal images then the signatures + signaturesFound = append(signaturesFound, foundSignatureData{ + repo: repo, + tag: tag, + signatureType: signatureType, + signedManifestDigest: signedManifestDigest.String(), + signatureDigest: digest.String(), + }) + + continue + } + + manifestData, err := NewManifestData(repo, manifestBlob, storeController) + if err != nil { + log.Error().Err(err).Msgf("sync-repo: failed to create manifest data for image %s:%s manifest digest %s ", + repo, tag, manifest.Digest.String()) + + return err + } + + err = repoDB.SetManifestMeta(repo, manifest.Digest, ManifestMetadata{ + ManifestBlob: manifestData.ManifestBlob, + ConfigBlob: manifestData.ConfigBlob, + DownloadCount: 0, + Signatures: ManifestSignatures{}, + }) + if err != nil { + log.Error().Err(err).Msgf("sync-repo: failed to set manifest meta for image %s:%s manifest digest %s ", + repo, tag, manifest.Digest.String()) + + return err + } + + err = repoDB.SetRepoTag(repo, tag, manifest.Digest, manifest.MediaType) + if err != nil { + log.Error().Err(err).Msgf("sync-repo: failed to repo tag for repo %s and tag %s", + repo, tag) + + return err + } + } + + // manage the signatures found + for _, sigData := range signaturesFound { + err := repoDB.AddManifestSignature(repo, godigest.Digest(sigData.signedManifestDigest), SignatureMetadata{ + SignatureType: sigData.signatureType, + SignatureDigest: sigData.signatureDigest, + }) + if err != nil { + log.Error().Err(err).Msgf("sync-repo: failed set signature meta for signed image %s:%s manifest digest %s ", + sigData.repo, sigData.tag, sigData.signedManifestDigest) + + return err + } + } + + return nil +} + +// resetRepoMetaTags will delete all tags from a repometadata. +func resetRepoMetaTags(repo string, repoDB RepoDB, log log.Logger) error { + repoMeta, err := repoDB.GetRepoMeta(repo) + if err != nil && !errors.Is(err, zerr.ErrRepoMetaNotFound) { + log.Error().Err(err).Msgf("sync-repo: failed to get RepoMeta for repo %s", repo) + + return err + } + + if errors.Is(err, zerr.ErrRepoMetaNotFound) { + log.Info().Msgf("sync-repo: RepoMeta not found for repo %s, new RepoMeta will be created", repo) + + return nil + } + + for tag := range repoMeta.Tags { + // We should have a way to delete all tags at once + err := repoDB.DeleteRepoTag(repo, tag) + if err != nil { + log.Error().Err(err).Msgf("sync-repo: failed to delete tag %s from RepoMeta for repo %s", tag, repo) + + return err + } + } + + return nil +} + +func getAllRepos(storeController storage.StoreController) ([]string, error) { + allRepos, err := storeController.DefaultStore.GetRepositories() + if err != nil { + return nil, err + } + + if storeController.SubStore != nil { + for _, store := range storeController.SubStore { + substoreRepos, err := store.GetRepositories() + if err != nil { + return nil, err + } + + allRepos = append(allRepos, substoreRepos...) + } + } + + return allRepos, nil +} + +// isManifestMetaPresent checks if the manifest with a certain digest is present in a certain repo. +func isManifestMetaPresent(repo string, manifest ispec.Descriptor, repoDB RepoDB) (bool, error) { + _, err := repoDB.GetManifestMeta(repo, manifest.Digest) + if err != nil && !errors.Is(err, zerr.ErrManifestMetaNotFound) { + return false, err + } + + if errors.Is(err, zerr.ErrManifestMetaNotFound) { + return false, nil + } + + return true, nil +} + +// NewManifestMeta takes raw data about an image and createa a new ManifestMetadate object. +func NewManifestData(repoName string, manifestBlob []byte, storeController storage.StoreController, +) (ManifestData, error) { + var ( + manifestContent ispec.Manifest + configContent ispec.Image + manifestData ManifestData + ) + + imgStore := storeController.GetImageStore(repoName) + + err := json.Unmarshal(manifestBlob, &manifestContent) + if err != nil { + return ManifestData{}, err + } + + configBlob, err := imgStore.GetBlobContent(repoName, manifestContent.Config.Digest) + if err != nil { + return ManifestData{}, err + } + + err = json.Unmarshal(configBlob, &configContent) + if err != nil { + return ManifestData{}, err + } + + manifestData.ManifestBlob = manifestBlob + manifestData.ConfigBlob = configBlob + + return manifestData, nil +} diff --git a/pkg/meta/repodb/sync_repodb_test.go b/pkg/meta/repodb/sync_repodb_test.go new file mode 100644 index 00000000..cce2f240 --- /dev/null +++ b/pkg/meta/repodb/sync_repodb_test.go @@ -0,0 +1,647 @@ +package repodb_test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path" + "testing" + + godigest "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + oras "github.com/oras-project/artifacts-spec/specs-go/v1" + . "github.com/smartystreets/goconvey/convey" + + zerr "zotregistry.io/zot/errors" + "zotregistry.io/zot/pkg/extensions/monitoring" + "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" + bolt "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" + dynamo "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper" + dynamoParams "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper/params" + "zotregistry.io/zot/pkg/storage" + "zotregistry.io/zot/pkg/storage/local" + "zotregistry.io/zot/pkg/test" + "zotregistry.io/zot/pkg/test/mocks" +) + +const repo = "repo" + +var ErrTestError = errors.New("test error") + +func TestSyncRepoDBErrors(t *testing.T) { + Convey("SyncRepoDB", t, func() { + imageStore := mocks.MockedImageStore{ + GetIndexContentFn: func(repo string) ([]byte, error) { + return nil, ErrTestError + }, + GetRepositoriesFn: func() ([]string, error) { + return []string{"repo1", "repo2"}, nil + }, + } + storeController := storage.StoreController{DefaultStore: imageStore} + repoDB := mocks.RepoDBMock{} + + // sync repo fail + err := repodb.SyncRepoDB(repoDB, storeController, log.NewLogger("debug", "")) + So(err, ShouldNotBeNil) + + Convey("getAllRepos errors", func() { + imageStore1 := mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return []string{"repo1", "repo2"}, nil + }, + } + imageStore2 := mocks.MockedImageStore{ + GetRepositoriesFn: func() ([]string, error) { + return nil, ErrTestError + }, + } + storeController := storage.StoreController{ + DefaultStore: imageStore1, + SubStore: map[string]storage.ImageStore{ + "a": imageStore2, + }, + } + + err := repodb.SyncRepoDB(repoDB, storeController, log.NewLogger("debug", "")) + So(err, ShouldNotBeNil) + }) + }) + + Convey("SyncRepo", t, func() { + imageStore := mocks.MockedImageStore{} + storeController := storage.StoreController{DefaultStore: &imageStore} + repoDB := mocks.RepoDBMock{} + log := log.NewLogger("debug", "") + + Convey("imageStore.GetIndexContent errors", func() { + imageStore.GetIndexContentFn = func(repo string) ([]byte, error) { + return nil, ErrTestError + } + + err := repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + + Convey("json.Unmarshal errors", func() { + imageStore.GetIndexContentFn = func(repo string) ([]byte, error) { + return []byte("Invalid JSON"), nil + } + + err := repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + + Convey("resetRepoMetaTags errors", func() { + imageStore.GetIndexContentFn = func(repo string) ([]byte, error) { + return []byte("{}"), nil + } + + Convey("repoDB.GetRepoMeta errors", func() { + repoDB.GetRepoMetaFn = func(repo string) (repodb.RepoMetadata, error) { + return repodb.RepoMetadata{}, ErrTestError + } + + err := repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + + Convey("repoDB.DeleteRepoTag errors", func() { + repoDB.GetRepoMetaFn = func(repo string) (repodb.RepoMetadata, error) { + return repodb.RepoMetadata{ + Tags: map[string]repodb.Descriptor{ + "digest1": { + Digest: "tag1", + MediaType: ispec.MediaTypeImageManifest, + }, + }, + }, nil + } + repoDB.DeleteRepoTagFn = func(repo, tag string) error { + return ErrTestError + } + + err := repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + }) + + Convey("isManifestMetaPresent errors", func() { + indexContent := ispec.Index{ + Manifests: []ispec.Descriptor{ + { + Digest: godigest.FromString("manifest1"), + MediaType: ispec.MediaTypeImageManifest, + Annotations: map[string]string{ + ispec.AnnotationRefName: "tag1", + }, + }, + }, + } + indexBlob, err := json.Marshal(indexContent) + So(err, ShouldBeNil) + + imageStore.GetIndexContentFn = func(repo string) ([]byte, error) { + return indexBlob, nil + } + + Convey("repoDB.GetManifestMeta errors", func() { + repoDB.GetManifestMetaFn = func(repo string, manifestDigest godigest.Digest) (repodb.ManifestMetadata, error) { + return repodb.ManifestMetadata{}, ErrTestError + } + + err = repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + }) + + Convey("manifestMetaIsPresent true", func() { + indexContent := ispec.Index{ + Manifests: []ispec.Descriptor{ + { + Digest: godigest.FromString("manifest1"), + MediaType: ispec.MediaTypeImageManifest, + Annotations: map[string]string{ + ispec.AnnotationRefName: "tag1", + }, + }, + }, + } + indexBlob, err := json.Marshal(indexContent) + So(err, ShouldBeNil) + + imageStore.GetIndexContentFn = func(repo string) ([]byte, error) { + return indexBlob, nil + } + + Convey("repoDB.SetRepoTag", func() { + repoDB.SetRepoTagFn = func(repo, tag string, manifestDigest godigest.Digest, mediaType string) error { + return ErrTestError + } + + err = repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + }) + + Convey("manifestMetaIsPresent false", func() { + indexContent := ispec.Index{ + Manifests: []ispec.Descriptor{ + { + Digest: godigest.FromString("manifest1"), + MediaType: ispec.MediaTypeImageManifest, + Annotations: map[string]string{ + ispec.AnnotationRefName: "tag1", + }, + }, + }, + } + indexBlob, err := json.Marshal(indexContent) + So(err, ShouldBeNil) + + imageStore.GetIndexContentFn = func(repo string) ([]byte, error) { + return indexBlob, nil + } + + repoDB.GetManifestMetaFn = func(repo string, manifestDigest godigest.Digest) (repodb.ManifestMetadata, error) { + return repodb.ManifestMetadata{}, zerr.ErrManifestMetaNotFound + } + + Convey("GetImageManifest errors", func() { + imageStore.GetImageManifestFn = func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return nil, "", "", ErrTestError + } + err = repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + + Convey("CheckIsImageSignature errors", func() { + // CheckIsImageSignature will fail because of a invalid json + imageStore.GetImageManifestFn = func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte("Invalid JSON"), "", "", nil + } + err = repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + Convey("CheckIsImageSignature -> not signature", func() { + manifestContent := ispec.Manifest{} + manifestBlob, err := json.Marshal(manifestContent) + So(err, ShouldBeNil) + + imageStore.GetImageManifestFn = func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return manifestBlob, "", "", nil + } + + Convey("imgStore.GetBlobContent errors", func() { + imageStore.GetBlobContentFn = func(repo string, digest godigest.Digest) ([]byte, error) { + return nil, ErrTestError + } + + err = repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + + Convey("json.Unmarshal(configBlob errors", func() { + imageStore.GetBlobContentFn = func(repo string, digest godigest.Digest) ([]byte, error) { + return []byte("invalid JSON"), nil + } + + err = repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + }) + + Convey("CheckIsImageSignature -> is signature", func() { + manifestContent := oras.Manifest{ + Subject: &oras.Descriptor{ + Digest: "123", + }, + } + manifestBlob, err := json.Marshal(manifestContent) + So(err, ShouldBeNil) + + imageStore.GetImageManifestFn = func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return manifestBlob, "", "", nil + } + + repoDB.AddManifestSignatureFn = func(repo string, signedManifestDigest godigest.Digest, + sm repodb.SignatureMetadata, + ) error { + return ErrTestError + } + + err = repodb.SyncRepo("repo", repoDB, storeController, log) + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestSyncRepoDBWithStorage(t *testing.T) { + Convey("Boltdb", t, func() { + rootDir := t.TempDir() + + imageStore := local.NewImageStore(rootDir, false, 0, false, false, + log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil) + + storeController := storage.StoreController{DefaultStore: imageStore} + manifests := []ispec.Manifest{} + for i := 0; i < 3; i++ { + config, layers, manifest, err := test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + manifests = append(manifests, manifest) + + err = test.WriteImageToFileSystem( + test.Image{ + Config: config, + Layers: layers, + Manifest: manifest, + Tag: fmt.Sprintf("tag%d", i), + }, + repo, + storeController) + So(err, ShouldBeNil) + } + + // add fake signature for tag1 + signatureTag, err := test.GetCosignSignatureTagForManifest(manifests[1]) + So(err, ShouldBeNil) + + manifestBlob, err := json.Marshal(manifests[1]) + So(err, ShouldBeNil) + + signedManifestDigest := godigest.FromBytes(manifestBlob) + + config, layers, manifest, err := test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + err = test.WriteImageToFileSystem( + test.Image{ + Config: config, + Layers: layers, + Manifest: manifest, + Tag: signatureTag, + }, + repo, + storeController) + So(err, ShouldBeNil) + + // remove tag2 from index.json + indexPath := path.Join(rootDir, repo, "index.json") + indexFile, err := os.Open(indexPath) + So(err, ShouldBeNil) + buf, err := io.ReadAll(indexFile) + So(err, ShouldBeNil) + + var index ispec.Index + if err = json.Unmarshal(buf, &index); err == nil { + for _, manifest := range index.Manifests { + if val, ok := manifest.Annotations[ispec.AnnotationRefName]; ok && val == "tag2" { + delete(manifest.Annotations, ispec.AnnotationRefName) + + break + } + } + } + buf, err = json.Marshal(index) + So(err, ShouldBeNil) + + err = os.WriteFile(indexPath, buf, 0o600) + So(err, ShouldBeNil) + + repoDB, err := bolt.NewBoltDBWrapper(bolt.DBParameters{ + RootDir: rootDir, + }) + So(err, ShouldBeNil) + + err = repodb.SyncRepoDB(repoDB, storeController, log.NewLogger("debug", "")) + So(err, ShouldBeNil) + + repos, err := repoDB.GetMultipleRepoMeta( + context.Background(), + func(repoMeta repodb.RepoMetadata) bool { return true }, + repodb.PageInput{}, + ) + So(err, ShouldBeNil) + + So(len(repos), ShouldEqual, 1) + So(len(repos[0].Tags), ShouldEqual, 2) + + for _, descriptor := range repos[0].Tags { + manifestMeta, err := repoDB.GetManifestMeta(repo, godigest.Digest(descriptor.Digest)) + So(err, ShouldBeNil) + So(manifestMeta.ManifestBlob, ShouldNotBeNil) + So(manifestMeta.ConfigBlob, ShouldNotBeNil) + + if descriptor.Digest == signedManifestDigest.String() { + So(repos[0].Signatures[descriptor.Digest], ShouldNotBeEmpty) + So(manifestMeta.Signatures["cosign"], ShouldNotBeEmpty) + } + } + }) + + Convey("Ignore orphan signatures", t, func() { + rootDir := t.TempDir() + + imageStore := local.NewImageStore(rootDir, false, 0, false, false, + log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil) + + storeController := storage.StoreController{DefaultStore: imageStore} + // add an image + config, layers, manifest, err := test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + err = test.WriteImageToFileSystem( + test.Image{ + Config: config, + Layers: layers, + Manifest: manifest, + Tag: "tag1", + }, + repo, + storeController) + So(err, ShouldBeNil) + + // add mock cosign signature without pushing the signed image + _, _, manifest, err = test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + signatureTag, err := test.GetCosignSignatureTagForManifest(manifest) + So(err, ShouldBeNil) + + // get the body of the signature + config, layers, manifest, err = test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + err = test.WriteImageToFileSystem( + test.Image{ + Config: config, + Layers: layers, + Manifest: manifest, + Tag: signatureTag, + }, + repo, + storeController) + So(err, ShouldBeNil) + + // test that we have only 1 image inside the repo + repoDB, err := bolt.NewBoltDBWrapper(bolt.DBParameters{ + RootDir: rootDir, + }) + So(err, ShouldBeNil) + + err = repodb.SyncRepoDB(repoDB, storeController, log.NewLogger("debug", "")) + So(err, ShouldBeNil) + + repos, err := repoDB.GetMultipleRepoMeta( + context.Background(), + func(repoMeta repodb.RepoMetadata) bool { return true }, + repodb.PageInput{}, + ) + So(err, ShouldBeNil) + + So(len(repos), ShouldEqual, 1) + So(repos[0].Tags, ShouldContainKey, "tag1") + So(repos[0].Tags, ShouldNotContainKey, signatureTag) + }) +} + +func TestSyncRepoDBDynamoWrapper(t *testing.T) { + skipIt(t) + + Convey("Dynamodb", t, func() { + rootDir := t.TempDir() + + imageStore := local.NewImageStore(rootDir, false, 0, false, false, + log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil) + + storeController := storage.StoreController{DefaultStore: imageStore} + manifests := []ispec.Manifest{} + for i := 0; i < 3; i++ { + config, layers, manifest, err := test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + manifests = append(manifests, manifest) + + err = test.WriteImageToFileSystem( + test.Image{ + Config: config, + Layers: layers, + Manifest: manifest, + Tag: fmt.Sprintf("tag%d", i), + }, + repo, + storeController) + So(err, ShouldBeNil) + } + + // add fake signature for tag1 + signatureTag, err := test.GetCosignSignatureTagForManifest(manifests[1]) + So(err, ShouldBeNil) + + manifestBlob, err := json.Marshal(manifests[1]) + So(err, ShouldBeNil) + + signedManifestDigest := godigest.FromBytes(manifestBlob) + + config, layers, manifest, err := test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + err = test.WriteImageToFileSystem( + test.Image{ + Config: config, + Layers: layers, + Manifest: manifest, + Tag: signatureTag, + }, + repo, + storeController) + So(err, ShouldBeNil) + + // remove tag2 from index.json + indexPath := path.Join(rootDir, repo, "index.json") + indexFile, err := os.Open(indexPath) + So(err, ShouldBeNil) + buf, err := io.ReadAll(indexFile) + So(err, ShouldBeNil) + + var index ispec.Index + if err = json.Unmarshal(buf, &index); err == nil { + for _, manifest := range index.Manifests { + if val, ok := manifest.Annotations[ispec.AnnotationRefName]; ok && val == "tag2" { + delete(manifest.Annotations, ispec.AnnotationRefName) + + break + } + } + } + buf, err = json.Marshal(index) + So(err, ShouldBeNil) + + err = os.WriteFile(indexPath, buf, 0o600) + So(err, ShouldBeNil) + + dynamoWrapper, err := dynamo.NewDynamoDBWrapper(dynamoParams.DBDriverParameters{ + Endpoint: os.Getenv("DYNAMODBMOCK_ENDPOINT"), + Region: "us-east-2", + RepoMetaTablename: "RepoMetadataTable", + ManifestDataTablename: "ManifestDataTable", + VersionTablename: "Version", + }) + So(err, ShouldBeNil) + + err = dynamoWrapper.ResetManifestDataTable() + So(err, ShouldBeNil) + + err = dynamoWrapper.ResetRepoMetaTable() + So(err, ShouldBeNil) + + err = repodb.SyncRepoDB(dynamoWrapper, storeController, log.NewLogger("debug", "")) + So(err, ShouldBeNil) + + repos, err := dynamoWrapper.GetMultipleRepoMeta( + context.Background(), + func(repoMeta repodb.RepoMetadata) bool { return true }, + repodb.PageInput{}, + ) + t.Logf("%#v", repos) + So(err, ShouldBeNil) + + So(len(repos), ShouldEqual, 1) + So(len(repos[0].Tags), ShouldEqual, 2) + + for _, descriptor := range repos[0].Tags { + manifestMeta, err := dynamoWrapper.GetManifestMeta(repo, godigest.Digest(descriptor.Digest)) + So(err, ShouldBeNil) + So(manifestMeta.ManifestBlob, ShouldNotBeNil) + So(manifestMeta.ConfigBlob, ShouldNotBeNil) + + if descriptor.Digest == signedManifestDigest.String() { + So(manifestMeta.Signatures, ShouldNotBeEmpty) + } + } + }) + + Convey("Ignore orphan signatures", t, func() { + rootDir := t.TempDir() + + imageStore := local.NewImageStore(rootDir, false, 0, false, false, + log.NewLogger("debug", ""), monitoring.NewMetricsServer(false, log.NewLogger("debug", "")), nil, nil) + + storeController := storage.StoreController{DefaultStore: imageStore} + // add an image + config, layers, manifest, err := test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + err = test.WriteImageToFileSystem( + test.Image{ + Config: config, + Layers: layers, + Manifest: manifest, + Tag: "tag1", + }, + repo, + storeController) + So(err, ShouldBeNil) + + // add mock cosign signature without pushing the signed image + _, _, manifest, err = test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + signatureTag, err := test.GetCosignSignatureTagForManifest(manifest) + So(err, ShouldBeNil) + + // get the body of the signature + config, layers, manifest, err = test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + err = test.WriteImageToFileSystem( + test.Image{ + Config: config, + Layers: layers, + Manifest: manifest, + Tag: signatureTag, + }, + repo, + storeController) + So(err, ShouldBeNil) + + // test that we have only 1 image inside the repo + repoDB, err := dynamo.NewDynamoDBWrapper(dynamoParams.DBDriverParameters{ + Endpoint: os.Getenv("DYNAMODBMOCK_ENDPOINT"), + Region: "us-east-2", + RepoMetaTablename: "RepoMetadataTable", + ManifestDataTablename: "ManifestDataTable", + VersionTablename: "Version", + }) + So(err, ShouldBeNil) + + err = repodb.SyncRepoDB(repoDB, storeController, log.NewLogger("debug", "")) + So(err, ShouldBeNil) + + repos, err := repoDB.GetMultipleRepoMeta( + context.Background(), + func(repoMeta repodb.RepoMetadata) bool { return true }, + repodb.PageInput{}, + ) + So(err, ShouldBeNil) + t.Logf("%#v", repos) + + So(len(repos), ShouldEqual, 1) + So(repos[0].Tags, ShouldContainKey, "tag1") + So(repos[0].Tags, ShouldNotContainKey, signatureTag) + }) +} + +func skipIt(t *testing.T) { + t.Helper() + + if os.Getenv("S3MOCK_ENDPOINT") == "" { + t.Skip("Skipping testing without AWS S3 mock server") + } +} diff --git a/pkg/meta/repodb/update/update.go b/pkg/meta/repodb/update/update.go new file mode 100644 index 00000000..497566c9 --- /dev/null +++ b/pkg/meta/repodb/update/update.go @@ -0,0 +1,205 @@ +package update + +import ( + godigest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + + zerr "zotregistry.io/zot/errors" + "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/meta/repodb" + "zotregistry.io/zot/pkg/storage" +) + +// OnUpdateManifest is called when a new manifest is added. It updates repodb according to the type +// of image pushed(normal images, signatues, etc.). In care of any errors, it makes sure to keep +// consistency between repodb and the image store. +func OnUpdateManifest(name, reference, mediaType string, digest godigest.Digest, body []byte, + storeController storage.StoreController, repoDB repodb.RepoDB, log log.Logger, +) error { + imgStore := storeController.GetImageStore(name) + + // check if image is a signature + isSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(name, body, reference, + storeController) + if err != nil { + if errors.Is(err, zerr.ErrOrphanSignature) { + log.Warn().Err(err).Msg("image has signature format but it doesn't sign any image") + + return zerr.ErrOrphanSignature + } + + log.Error().Err(err).Msg("can't check if image is a signature or not") + + if err := imgStore.DeleteImageManifest(name, reference, false); err != nil { + log.Error().Err(err).Msgf("couldn't remove image manifest %s in repo %s", reference, name) + + return err + } + + return err + } + + metadataSuccessfullySet := true + + if isSignature { + err = repoDB.AddManifestSignature(name, signedManifestDigest, repodb.SignatureMetadata{ + SignatureType: signatureType, + SignatureDigest: digest.String(), + }) + if err != nil { + log.Error().Err(err).Msg("repodb: error while putting repo meta") + metadataSuccessfullySet = false + } + } else { + err := SetMetadataFromInput(name, reference, mediaType, digest, body, + storeController, repoDB, log) + if err != nil { + metadataSuccessfullySet = false + } + } + + if !metadataSuccessfullySet { + log.Info().Msgf("uploding image meta was unsuccessful for tag %s in repo %s", reference, name) + + if err := imgStore.DeleteImageManifest(name, reference, false); err != nil { + log.Error().Err(err).Msgf("couldn't remove image manifest %s in repo %s", reference, name) + + return err + } + + return err + } + + return nil +} + +// OnDeleteManifest is called when a manifest is deleted. It updates repodb according to the type +// of image pushed(normal images, signatues, etc.). In care of any errors, it makes sure to keep +// consistency between repodb and the image store. +func OnDeleteManifest(name, reference, mediaType string, digest godigest.Digest, manifestBlob []byte, + storeController storage.StoreController, repoDB repodb.RepoDB, log log.Logger, +) error { + imgStore := storeController.GetImageStore(name) + + isSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(name, manifestBlob, + reference, storeController) + if err != nil { + if errors.Is(err, zerr.ErrOrphanSignature) { + log.Warn().Err(err).Msg("image has signature format but it doesn't sign any image") + + return zerr.ErrOrphanSignature + } + + log.Error().Err(err).Msg("can't check if image is a signature or not") + + return err + } + + manageRepoMetaSuccessfully := true + + if isSignature { + err = repoDB.DeleteSignature(name, signedManifestDigest, repodb.SignatureMetadata{ + SignatureDigest: digest.String(), + SignatureType: signatureType, + }) + if err != nil { + log.Error().Err(err).Msg("repodb: can't check if image is a signature or not") + manageRepoMetaSuccessfully = false + } + } else { + err = repoDB.DeleteRepoTag(name, reference) + if err != nil { + log.Info().Msg("repodb: restoring image store") + + // restore image store + _, err := imgStore.PutImageManifest(name, reference, mediaType, manifestBlob) + if err != nil { + log.Error().Err(err).Msg("repodb: error while restoring image store, database is not consistent") + } + + manageRepoMetaSuccessfully = false + } + } + + if !manageRepoMetaSuccessfully { + log.Info().Msgf("repodb: deleting image meta was unsuccessful for tag %s in repo %s", reference, name) + + return err + } + + return nil +} + +// OnDeleteManifest is called when a manifest is downloaded. It increments the download couter on that manifest. +func OnGetManifest(name, reference string, digest godigest.Digest, body []byte, + storeController storage.StoreController, repoDB repodb.RepoDB, log log.Logger, +) error { + // check if image is a signature + isSignature, _, _, err := storage.CheckIsImageSignature(name, body, reference, + storeController) + if err != nil { + if errors.Is(err, zerr.ErrOrphanSignature) { + log.Warn().Err(err).Msg("image has signature format but it doesn't sign any image") + + return err + } + + log.Error().Err(err).Msg("can't check if manifest is a signature or not") + + return err + } + + if !isSignature { + err := repoDB.IncrementImageDownloads(name, reference) + if err != nil { + log.Error().Err(err).Msg("unexpected error") + + return err + } + } + + return nil +} + +// SetMetadataFromInput receives raw information about the manifest pushed and tries to set manifest metadata +// and update repo metadata by adding the current tag (in case the reference is a tag). +// The function expects image manifest. +func SetMetadataFromInput(repo, reference, mediaType string, digest godigest.Digest, manifestBlob []byte, + storeController storage.StoreController, repoDB repodb.RepoDB, log log.Logger, +) error { + imageMetadata, err := repodb.NewManifestData(repo, manifestBlob, storeController) + if err != nil { + return err + } + + err = repoDB.SetManifestMeta(repo, digest, repodb.ManifestMetadata{ + ManifestBlob: imageMetadata.ManifestBlob, + ConfigBlob: imageMetadata.ConfigBlob, + DownloadCount: 0, + Signatures: repodb.ManifestSignatures{}, + }) + if err != nil { + log.Error().Err(err).Msg("repodb: error while putting image meta") + + return err + } + + if refferenceIsDigest(reference) { + return nil + } + + err = repoDB.SetRepoTag(repo, reference, digest, mediaType) + if err != nil { + log.Error().Err(err).Msg("repodb: error while putting repo meta") + + return err + } + + return nil +} + +func refferenceIsDigest(reference string) bool { + _, err := godigest.Parse(reference) + + return err == nil +} diff --git a/pkg/meta/repodb/update/update_test.go b/pkg/meta/repodb/update/update_test.go new file mode 100644 index 00000000..5f028b6a --- /dev/null +++ b/pkg/meta/repodb/update/update_test.go @@ -0,0 +1,185 @@ +package update_test + +import ( + "encoding/json" + "errors" + "testing" + "time" + + godigest "github.com/opencontainers/go-digest" + ispec "github.com/opencontainers/image-spec/specs-go/v1" + oras "github.com/oras-project/artifacts-spec/specs-go/v1" + . "github.com/smartystreets/goconvey/convey" + + zerr "zotregistry.io/zot/errors" + "zotregistry.io/zot/pkg/extensions/monitoring" + "zotregistry.io/zot/pkg/log" + bolt_wrapper "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" + repoDBUpdate "zotregistry.io/zot/pkg/meta/repodb/update" + "zotregistry.io/zot/pkg/storage" + "zotregistry.io/zot/pkg/storage/local" + "zotregistry.io/zot/pkg/test" + "zotregistry.io/zot/pkg/test/mocks" +) + +var ErrTestError = errors.New("test error") + +func TestOnUpdateManifest(t *testing.T) { + Convey("On UpdateManifest", t, func() { + rootDir := t.TempDir() + storeController := storage.StoreController{} + log := log.NewLogger("debug", "") + metrics := monitoring.NewMetricsServer(false, log) + storeController.DefaultStore = local.NewImageStore(rootDir, true, 1*time.Second, + true, true, log, metrics, nil, nil, + ) + + repoDB, err := bolt_wrapper.NewBoltDBWrapper(bolt_wrapper.DBParameters{ + RootDir: rootDir, + }) + So(err, ShouldBeNil) + + config, layers, manifest, err := test.GetRandomImageComponents(100) + So(err, ShouldBeNil) + + err = test.WriteImageToFileSystem(test.Image{Config: config, Manifest: manifest, Layers: layers, Tag: "tag1"}, + "repo", storeController) + So(err, ShouldBeNil) + + manifestBlob, err := json.Marshal(manifest) + So(err, ShouldBeNil) + + digest := godigest.FromBytes(manifestBlob) + + err = repoDBUpdate.OnUpdateManifest("repo", "tag1", "", digest, manifestBlob, storeController, repoDB, log) + So(err, ShouldBeNil) + + repoMeta, err := repoDB.GetRepoMeta("repo") + So(err, ShouldBeNil) + + So(repoMeta.Tags, ShouldContainKey, "tag1") + }) +} + +func TestUpdateErrors(t *testing.T) { + Convey("Update operations", t, func() { + Convey("On UpdateManifest", func() { + imageStore := mocks.MockedImageStore{} + storeController := storage.StoreController{DefaultStore: &imageStore} + repoDB := mocks.RepoDBMock{} + log := log.NewLogger("debug", "") + + Convey("zerr.ErrOrphanSignature", func() { + manifestContent := oras.Manifest{ + Subject: &oras.Descriptor{ + Digest: "123", + }, + } + manifestBlob, err := json.Marshal(manifestContent) + So(err, ShouldBeNil) + + imageStore.GetImageManifestFn = func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte{}, "", "", zerr.ErrManifestNotFound + } + + err = repoDBUpdate.OnUpdateManifest("repo", "tag1", "", "digest", manifestBlob, + storeController, repoDB, log) + So(err, ShouldNotBeNil) + }) + }) + + Convey("On DeleteManifest", func() { + imageStore := mocks.MockedImageStore{} + storeController := storage.StoreController{DefaultStore: &imageStore} + repoDB := mocks.RepoDBMock{} + log := log.NewLogger("debug", "") + + Convey("CheckIsImageSignature errors", func() { + manifestContent := oras.Manifest{ + Subject: &oras.Descriptor{ + Digest: "123", + }, + } + manifestBlob, err := json.Marshal(manifestContent) + So(err, ShouldBeNil) + + imageStore.GetImageManifestFn = func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte{}, "", "", zerr.ErrManifestNotFound + } + + err = repoDBUpdate.OnDeleteManifest("repo", "tag1", "digest", "media", manifestBlob, + storeController, repoDB, log) + So(err, ShouldNotBeNil) + + imageStore.GetImageManifestFn = func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte{}, "", "", ErrTestError + } + + err = repoDBUpdate.OnDeleteManifest("repo", "tag1", "digest", "media", manifestBlob, + storeController, repoDB, log) + So(err, ShouldNotBeNil) + }) + }) + + Convey("On GetManifest", func() { + imageStore := mocks.MockedImageStore{} + storeController := storage.StoreController{DefaultStore: &imageStore} + repoDB := mocks.RepoDBMock{} + log := log.NewLogger("debug", "") + + Convey("CheckIsImageSignature errors", func() { + manifestContent := oras.Manifest{ + Subject: &oras.Descriptor{ + Digest: "123", + }, + } + manifestBlob, err := json.Marshal(manifestContent) + So(err, ShouldBeNil) + + imageStore.GetImageManifestFn = func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte{}, "", "", zerr.ErrManifestNotFound + } + + err = repoDBUpdate.OnGetManifest("repo", "tag1", "digest", manifestBlob, + storeController, repoDB, log) + So(err, ShouldNotBeNil) + + imageStore.GetImageManifestFn = func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return []byte{}, "", "", ErrTestError + } + + err = repoDBUpdate.OnGetManifest("repo", "tag1", "media", manifestBlob, + storeController, repoDB, log) + So(err, ShouldNotBeNil) + }) + }) + + Convey("SetMetadataFromInput", func() { + imageStore := mocks.MockedImageStore{} + storeController := storage.StoreController{DefaultStore: &imageStore} + repoDB := mocks.RepoDBMock{} + log := log.NewLogger("debug", "") + + err := repoDBUpdate.SetMetadataFromInput("repo", "ref", "digest", "", []byte("BadManifestBlob"), + storeController, repoDB, log) + So(err, ShouldNotBeNil) + + // reference is digest + + manifestContent := ispec.Manifest{} + manifestBlob, err := json.Marshal(manifestContent) + So(err, ShouldBeNil) + + imageStore.GetImageManifestFn = func(repo, reference string) ([]byte, godigest.Digest, string, error) { + return manifestBlob, "", "", nil + } + imageStore.GetBlobContentFn = func(repo string, digest godigest.Digest) ([]byte, error) { + return []byte("{}"), nil + } + + err = repoDBUpdate.SetMetadataFromInput("repo", string(godigest.FromString("reference")), "", "digest", + manifestBlob, storeController, repoDB, log) + So(err, ShouldBeNil) + }) + }) +} diff --git a/pkg/meta/repodb/version/common.go b/pkg/meta/repodb/version/common.go new file mode 100644 index 00000000..c62d3556 --- /dev/null +++ b/pkg/meta/repodb/version/common.go @@ -0,0 +1,31 @@ +package version + +const ( + Version1 = "V1" + Version2 = "V2" + Version3 = "V3" + + CurrentVersion = Version1 +) + +const ( + versionV1Index = iota + versionV2Index + versionV3Index +) + +const DBVersionKey = "DBVersion" + +func GetVersionIndex(dbVersion string) int { + index, ok := map[string]int{ + Version1: versionV1Index, + Version2: versionV2Index, + Version3: versionV3Index, + }[dbVersion] + + if !ok { + return -1 + } + + return index +} diff --git a/pkg/meta/repodb/version/patches.go b/pkg/meta/repodb/version/patches.go new file mode 100644 index 00000000..bc125a05 --- /dev/null +++ b/pkg/meta/repodb/version/patches.go @@ -0,0 +1,14 @@ +package version + +import ( + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "go.etcd.io/bbolt" +) + +func GetBoltDBPatches() []func(DB *bbolt.DB) error { + return []func(DB *bbolt.DB) error{} +} + +func GetDynamoDBPatches() []func(client *dynamodb.Client, tableNames map[string]string) error { + return []func(client *dynamodb.Client, tableNames map[string]string) error{} +} diff --git a/pkg/meta/repodb/version/version_test.go b/pkg/meta/repodb/version/version_test.go new file mode 100644 index 00000000..dfa7f06e --- /dev/null +++ b/pkg/meta/repodb/version/version_test.go @@ -0,0 +1,194 @@ +package version_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/aws-sdk-go/aws" + . "github.com/smartystreets/goconvey/convey" + "go.etcd.io/bbolt" + + "zotregistry.io/zot/pkg/meta/repodb" + bolt "zotregistry.io/zot/pkg/meta/repodb/boltdb-wrapper" + dynamo "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper" + dynamoParams "zotregistry.io/zot/pkg/meta/repodb/dynamodb-wrapper/params" + "zotregistry.io/zot/pkg/meta/repodb/version" +) + +var ErrTestError = errors.New("test error") + +func TestVersioningBoltDB(t *testing.T) { + Convey("Tests", t, func() { + tmpDir := t.TempDir() + boltDBParams := bolt.DBParameters{RootDir: tmpDir} + boltdbWrapper, err := bolt.NewBoltDBWrapper(boltDBParams) + defer os.Remove("repo.db") + So(boltdbWrapper, ShouldNotBeNil) + So(err, ShouldBeNil) + + boltdbWrapper.Patches = []func(DB *bbolt.DB) error{ + func(DB *bbolt.DB) error { + return nil + }, + } + + Convey("success", func() { + boltdbWrapper.Patches = []func(DB *bbolt.DB) error{ + func(DB *bbolt.DB) error { // V1 to V2 + return nil + }, + } + + err := setBoltDBVersion(boltdbWrapper.DB, version.Version1) + So(err, ShouldBeNil) + + err = boltdbWrapper.PatchDB() + So(err, ShouldBeNil) + }) + + Convey("DBVersion is empty", func() { + err := boltdbWrapper.DB.Update(func(tx *bbolt.Tx) error { + versionBuck := tx.Bucket([]byte(repodb.VersionBucket)) + + return versionBuck.Put([]byte(version.DBVersionKey), []byte("")) + }) + So(err, ShouldBeNil) + + err = boltdbWrapper.PatchDB() + So(err, ShouldNotBeNil) + }) + + Convey("iterate patches with skip", func() { + boltdbWrapper.Patches = []func(DB *bbolt.DB) error{ + func(DB *bbolt.DB) error { // V1 to V2 + return nil + }, + func(DB *bbolt.DB) error { // V2 to V3 + return nil + }, + func(DB *bbolt.DB) error { // V3 to V4 + return nil + }, + } + + err := setBoltDBVersion(boltdbWrapper.DB, version.Version1) + So(err, ShouldBeNil) + // we should skip the first patch + + err = boltdbWrapper.PatchDB() + So(err, ShouldBeNil) + }) + + Convey("patch has error", func() { + boltdbWrapper.Patches = []func(DB *bbolt.DB) error{ + func(DB *bbolt.DB) error { // V1 to V2 + return ErrTestError + }, + } + + err = boltdbWrapper.PatchDB() + So(err, ShouldNotBeNil) + }) + }) +} + +func setBoltDBVersion(db *bbolt.DB, vers string) error { + err := db.Update(func(tx *bbolt.Tx) error { + versionBuck := tx.Bucket([]byte(repodb.VersionBucket)) + + return versionBuck.Put([]byte(version.DBVersionKey), []byte(vers)) + }) + + return err +} + +func TestVersioningDynamoDB(t *testing.T) { + const ( + endpoint = "http://localhost:4566" + region = "us-east-2" + ) + + Convey("Tests", t, func() { + dynamoWrapper, err := dynamo.NewDynamoDBWrapper(dynamoParams.DBDriverParameters{ + Endpoint: endpoint, + Region: region, + RepoMetaTablename: "RepoMetadataTable", + ManifestDataTablename: "ManifestDataTable", + VersionTablename: "Version", + }) + So(err, ShouldBeNil) + + So(dynamoWrapper.ResetManifestDataTable(), ShouldBeNil) + So(dynamoWrapper.ResetRepoMetaTable(), ShouldBeNil) + + Convey("DBVersion is empty", func() { + err := setDynamoDBVersion(dynamoWrapper.Client, "") + So(err, ShouldBeNil) + + err = dynamoWrapper.PatchDB() + So(err, ShouldNotBeNil) + }) + + Convey("iterate patches with skip", func() { + dynamoWrapper.Patches = []func(client *dynamodb.Client, tableNames map[string]string) error{ + func(client *dynamodb.Client, tableNames map[string]string) error { // V1 to V2 + return nil + }, + func(client *dynamodb.Client, tableNames map[string]string) error { // V2 to V3 + return nil + }, + func(client *dynamodb.Client, tableNames map[string]string) error { // V3 to V4 + return nil + }, + } + + err := setDynamoDBVersion(dynamoWrapper.Client, version.Version1) + So(err, ShouldBeNil) + // we should skip the first patch + + err = dynamoWrapper.PatchDB() + So(err, ShouldBeNil) + }) + + Convey("patch has error", func() { + dynamoWrapper.Patches = []func(client *dynamodb.Client, tableNames map[string]string) error{ + func(client *dynamodb.Client, tableNames map[string]string) error { // V1 to V2 + return ErrTestError + }, + } + + err = dynamoWrapper.PatchDB() + So(err, ShouldNotBeNil) + }) + }) +} + +func setDynamoDBVersion(client *dynamodb.Client, vers string) error { + mdAttributeValue, err := attributevalue.Marshal(vers) + if err != nil { + return err + } + + _, err = client.UpdateItem(context.TODO(), &dynamodb.UpdateItemInput{ + ExpressionAttributeNames: map[string]string{ + "#V": "Version", + }, + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":Version": mdAttributeValue, + }, + Key: map[string]types.AttributeValue{ + "VersionKey": &types.AttributeValueMemberS{ + Value: version.DBVersionKey, + }, + }, + TableName: aws.String("Version"), + UpdateExpression: aws.String("SET #V = :Version"), + }) + + return err +} diff --git a/pkg/requestcontext/checkrepo.go b/pkg/requestcontext/checkrepo.go new file mode 100644 index 00000000..cfc1b8cf --- /dev/null +++ b/pkg/requestcontext/checkrepo.go @@ -0,0 +1,28 @@ +package requestcontext + +import ( + "context" + + zerr "zotregistry.io/zot/errors" +) + +func RepoIsUserAvailable(ctx context.Context, repoName string) (bool, error) { + authzCtxKey := GetContextKey() + + if authCtx := ctx.Value(authzCtxKey); authCtx != nil { + acCtx, ok := authCtx.(AccessControlContext) + if !ok { + err := zerr.ErrBadCtxFormat + + return false, err + } + + if acCtx.IsAdmin || acCtx.CanReadRepo(repoName) { + return true, nil + } + + return false, nil + } + + return true, nil +} diff --git a/pkg/requestcontext/context.go b/pkg/requestcontext/context.go index 98248a84..f3f50d70 100644 --- a/pkg/requestcontext/context.go +++ b/pkg/requestcontext/context.go @@ -4,6 +4,7 @@ import ( "context" glob "github.com/bmatcuk/doublestar/v4" //nolint:gci + "zotregistry.io/zot/errors" ) diff --git a/pkg/storage/cache/dynamodb.go b/pkg/storage/cache/dynamodb.go index 5d135759..ac252415 100644 --- a/pkg/storage/cache/dynamodb.go +++ b/pkg/storage/cache/dynamodb.go @@ -2,6 +2,7 @@ package cache import ( "context" + "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" @@ -51,7 +52,7 @@ func (d *DynamoDBDriver) NewTable(tableName string) error { WriteCapacityUnits: aws.Int64(5), }, }) - if err != nil { + if err != nil && !strings.Contains(err.Error(), "Table already exists") { return err } @@ -87,8 +88,15 @@ func NewDynamoDBCache(parameters interface{}, log zlog.Logger) Cache { return nil } + driver := &DynamoDBDriver{client: dynamodb.NewFromConfig(cfg), tableName: properParameters.TableName, log: log} + + err = driver.NewTable(driver.tableName) + if err != nil { + log.Error().Err(err).Msgf("unable to create table for cache '%s'", driver.tableName) + } + // Using the Config value, create the DynamoDB client - return &DynamoDBDriver{client: dynamodb.NewFromConfig(cfg), tableName: properParameters.TableName, log: log} + return driver } func (d *DynamoDBDriver) Name() string { diff --git a/pkg/storage/common.go b/pkg/storage/common.go index a87d6e73..7b706df9 100644 --- a/pkg/storage/common.go +++ b/pkg/storage/common.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/docker/distribution/registry/storage/driver" + "github.com/gobwas/glob" "github.com/notaryproject/notation-go" godigest "github.com/opencontainers/go-digest" imeta "github.com/opencontainers/image-spec/specs-go" @@ -664,3 +665,75 @@ func IsSupportedMediaType(mediaType string) bool { mediaType == ispec.MediaTypeArtifactManifest || mediaType == oras.MediaTypeArtifactManifest } + +// imageIsSignature checks if the given image (repo:tag) represents a signature. The function +// returns: +// +// - bool: if the image is a signature or not +// +// - string: the type of signature +// +// - string: the digest of the image it signs +// +// - error: any errors that occur. +func CheckIsImageSignature(repoName string, manifestBlob []byte, reference string, + storeController StoreController, +) (bool, string, godigest.Digest, error) { + const cosign = "cosign" + + var manifestContent oras.Manifest + + err := json.Unmarshal(manifestBlob, &manifestContent) + if err != nil { + return false, "", "", err + } + + // check notation signature + if manifestContent.Subject != nil { + imgStore := storeController.GetImageStore(repoName) + + _, signedImageManifestDigest, _, err := imgStore.GetImageManifest(repoName, + manifestContent.Subject.Digest.String()) + if err != nil { + if errors.Is(err, zerr.ErrManifestNotFound) { + return true, "notation", signedImageManifestDigest, zerr.ErrOrphanSignature + } + + return false, "", "", err + } + + return true, "notation", signedImageManifestDigest, nil + } + + // check cosign + cosignTagRule := glob.MustCompile("sha256-*.sig") + + if tag := reference; cosignTagRule.Match(reference) { + prefixLen := len("sha256-") + digestLen := 64 + signedImageManifestDigestEncoded := tag[prefixLen : prefixLen+digestLen] + + signedImageManifestDigest := godigest.NewDigestFromEncoded(godigest.SHA256, + signedImageManifestDigestEncoded) + + imgStore := storeController.GetImageStore(repoName) + + _, signedImageManifestDigest, _, err := imgStore.GetImageManifest(repoName, + signedImageManifestDigest.String()) + if err != nil { + if errors.Is(err, zerr.ErrManifestNotFound) { + return true, cosign, signedImageManifestDigest, zerr.ErrOrphanSignature + } + + return false, "", "", err + } + + if signedImageManifestDigest.String() == "" { + return true, cosign, signedImageManifestDigest, zerr.ErrOrphanSignature + } + + return true, cosign, signedImageManifestDigest, nil + } + + return false, "", "", nil +} diff --git a/pkg/storage/s3/s3.go b/pkg/storage/s3/s3.go index 6ec9f597..8cd834bf 100644 --- a/pkg/storage/s3/s3.go +++ b/pkg/storage/s3/s3.go @@ -1023,7 +1023,7 @@ func (is *ObjectStorage) checkCacheBlob(digest godigest.Digest) (string, error) return dstRecord, nil } -func (is *ObjectStorage) copyBlob(repo string, blobPath string, dstRecord string) (int64, error) { +func (is *ObjectStorage) copyBlob(repo string, blobPath, dstRecord string) (int64, error) { if err := is.initRepo(repo); err != nil { is.log.Error().Err(err).Str("repo", repo).Msg("unable to initialize an empty repo") diff --git a/pkg/storage/s3/s3_test.go b/pkg/storage/s3/s3_test.go index afb12eea..7c9ee7a1 100644 --- a/pkg/storage/s3/s3_test.go +++ b/pkg/storage/s3/s3_test.go @@ -706,7 +706,7 @@ func TestNegativeCasesObjectsStorage(t *testing.T) { controller := api.NewController(conf) So(controller, ShouldNotBeNil) - err = controller.InitImageStore(context.TODO()) + err = controller.InitImageStore(context.Background()) So(err, ShouldBeNil) }) diff --git a/pkg/test/common.go b/pkg/test/common.go index 03e9d439..e2752c4d 100644 --- a/pkg/test/common.go +++ b/pkg/test/common.go @@ -1,6 +1,7 @@ package test import ( + "bytes" "context" "crypto/rand" "encoding/json" @@ -12,16 +13,22 @@ import ( "net/http" "net/url" "os" + "os/exec" "path" "strings" "time" godigest "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" - imagespec "github.com/opencontainers/image-spec/specs-go/v1" + ispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/umoci" "github.com/phayes/freeport" + "github.com/sigstore/cosign/cmd/cosign/cli/generate" + "github.com/sigstore/cosign/cmd/cosign/cli/options" + "github.com/sigstore/cosign/cmd/cosign/cli/sign" "gopkg.in/resty.v1" + + "zotregistry.io/zot/pkg/storage" ) const ( @@ -59,8 +66,8 @@ var ( ) type Image struct { - Manifest imagespec.Manifest - Config imagespec.Image + Manifest ispec.Manifest + Config ispec.Image Layers [][]byte Tag string } @@ -219,6 +226,50 @@ func NewControllerManager(controller Controller) ControllerManager { return cm } +func WriteImageToFileSystem(image Image, repoName string, storeController storage.StoreController) error { + store := storeController.GetImageStore(repoName) + + err := store.InitRepo(repoName) + if err != nil { + return err + } + + for _, layerBlob := range image.Layers { + layerReader := bytes.NewReader(layerBlob) + layerDigest := godigest.FromBytes(layerBlob) + + _, _, err = store.FullBlobUpload(repoName, layerReader, layerDigest) + if err != nil { + return err + } + } + + configBlob, err := json.Marshal(image.Config) + if err != nil { + return err + } + + configReader := bytes.NewReader(configBlob) + configDigest := godigest.FromBytes(configBlob) + + _, _, err = store.FullBlobUpload(repoName, configReader, configDigest) + if err != nil { + return err + } + + manifestBlob, err := json.Marshal(image.Manifest) + if err != nil { + return err + } + + _, err = store.PutImageManifest(repoName, image.Tag, ispec.MediaTypeImageManifest, manifestBlob) + if err != nil { + return err + } + + return nil +} + func WaitTillServerReady(url string) { for { _, err := resty.R().Get(url) @@ -241,7 +292,7 @@ func WaitTillTrivyDBDownloadStarted(rootDir string) { } // Adapted from https://gist.github.com/dopey/c69559607800d2f2f90b1b1ed4e550fb -func randomString(n int) string { +func RandomString(n int) string { const letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-" ret := make([]byte, n) @@ -261,14 +312,14 @@ func randomString(n int) string { func GetRandomImageConfig() ([]byte, godigest.Digest) { const maxLen = 16 - randomAuthor := randomString(maxLen) + randomAuthor := RandomString(maxLen) - config := imagespec.Image{ - Platform: imagespec.Platform{ + config := ispec.Image{ + Platform: ispec.Platform{ Architecture: "amd64", OS: "linux", }, - RootFS: imagespec.RootFS{ + RootFS: ispec.RootFS{ Type: "layers", DiffIDs: []godigest.Digest{}, }, @@ -286,7 +337,7 @@ func GetRandomImageConfig() ([]byte, godigest.Digest) { } func GetEmptyImageConfig() ([]byte, godigest.Digest) { - config := imagespec.Image{} + config := ispec.Image{} configBlobContent, err := json.MarshalIndent(&config, "", "\t") if err != nil { @@ -299,12 +350,12 @@ func GetEmptyImageConfig() ([]byte, godigest.Digest) { } func GetImageConfig() ([]byte, godigest.Digest) { - config := imagespec.Image{ - Platform: imagespec.Platform{ + config := ispec.Image{ + Platform: ispec.Platform{ Architecture: "amd64", OS: "linux", }, - RootFS: imagespec.RootFS{ + RootFS: ispec.RootFS{ Type: "layers", DiffIDs: []godigest.Digest{}, }, @@ -355,7 +406,7 @@ func GetOciLayoutDigests(imagePath string) (godigest.Digest, godigest.Digest, go panic(err) } - var manifest imagespec.Manifest + var manifest ispec.Manifest err = json.Unmarshal(manifestBuf, &manifest) if err != nil { @@ -372,13 +423,13 @@ func GetOciLayoutDigests(imagePath string) (godigest.Digest, godigest.Digest, go return manifestDigest, configDigest, layerDigest } -func GetImageComponents(layerSize int) (imagespec.Image, [][]byte, imagespec.Manifest, error) { - config := imagespec.Image{ - Platform: imagespec.Platform{ +func GetImageComponents(layerSize int) (ispec.Image, [][]byte, ispec.Manifest, error) { + config := ispec.Image{ + Platform: ispec.Platform{ Architecture: "amd64", OS: "linux", }, - RootFS: imagespec.RootFS{ + RootFS: ispec.RootFS{ Type: "layers", DiffIDs: []godigest.Digest{}, }, @@ -387,7 +438,7 @@ func GetImageComponents(layerSize int) (imagespec.Image, [][]byte, imagespec.Man configBlob, err := json.Marshal(config) if err = Error(err); err != nil { - return imagespec.Image{}, [][]byte{}, imagespec.Manifest{}, err + return ispec.Image{}, [][]byte{}, ispec.Manifest{}, err } configDigest := godigest.FromBytes(configBlob) @@ -398,16 +449,16 @@ func GetImageComponents(layerSize int) (imagespec.Image, [][]byte, imagespec.Man schemaVersion := 2 - manifest := imagespec.Manifest{ + manifest := ispec.Manifest{ Versioned: specs.Versioned{ SchemaVersion: schemaVersion, }, - Config: imagespec.Descriptor{ + Config: ispec.Descriptor{ MediaType: "application/vnd.oci.image.config.v1+json", Digest: configDigest, Size: int64(len(configBlob)), }, - Layers: []imagespec.Descriptor{ + Layers: []ispec.Descriptor{ { MediaType: "application/vnd.oci.image.layer.v1.tar", Digest: godigest.FromBytes(layers[0]), @@ -419,6 +470,118 @@ func GetImageComponents(layerSize int) (imagespec.Image, [][]byte, imagespec.Man return config, layers, manifest, nil } +func GetRandomImageComponents(layerSize int) (ispec.Image, [][]byte, ispec.Manifest, error) { + config := ispec.Image{ + Platform: ispec.Platform{ + Architecture: "amd64", + OS: "linux", + }, + RootFS: ispec.RootFS{ + Type: "layers", + DiffIDs: []godigest.Digest{}, + }, + Author: "ZotUser", + } + + configBlob, err := json.Marshal(config) + if err = Error(err); err != nil { + return ispec.Image{}, [][]byte{}, ispec.Manifest{}, err + } + + configDigest := godigest.FromBytes(configBlob) + + layer := make([]byte, layerSize) + + _, err = rand.Read(layer) + if err != nil { + return ispec.Image{}, [][]byte{}, ispec.Manifest{}, err + } + + layers := [][]byte{ + layer, + } + + schemaVersion := 2 + + manifest := ispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: schemaVersion, + }, + Config: ispec.Descriptor{ + MediaType: "application/vnd.oci.image.config.v1+json", + Digest: configDigest, + Size: int64(len(configBlob)), + }, + Layers: []ispec.Descriptor{ + { + MediaType: "application/vnd.oci.image.layer.v1.tar", + Digest: godigest.FromBytes(layers[0]), + Size: int64(len(layers[0])), + }, + }, + } + + return config, layers, manifest, nil +} + +func GetImageWithConfig(conf ispec.Image) (ispec.Image, [][]byte, ispec.Manifest, error) { + configBlob, err := json.Marshal(conf) + if err = Error(err); err != nil { + return ispec.Image{}, [][]byte{}, ispec.Manifest{}, err + } + + configDigest := godigest.FromBytes(configBlob) + + layerSize := 100 + layer := make([]byte, layerSize) + + _, err = rand.Read(layer) + if err != nil { + return ispec.Image{}, [][]byte{}, ispec.Manifest{}, err + } + + layers := [][]byte{ + layer, + } + + schemaVersion := 2 + + manifest := ispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: schemaVersion, + }, + Config: ispec.Descriptor{ + MediaType: "application/vnd.oci.image.config.v1+json", + Digest: configDigest, + Size: int64(len(configBlob)), + }, + Layers: []ispec.Descriptor{ + { + MediaType: "application/vnd.oci.image.layer.v1.tar", + Digest: godigest.FromBytes(layers[0]), + Size: int64(len(layers[0])), + }, + }, + } + + return conf, layers, manifest, nil +} + +func GetCosignSignatureTagForManifest(manifest ispec.Manifest) (string, error) { + manifestBlob, err := json.Marshal(manifest) + if err != nil { + return "", err + } + + manifestDigest := godigest.FromBytes(manifestBlob) + + return GetCosignSignatureTagForDigest(manifestDigest), nil +} + +func GetCosignSignatureTagForDigest(manifestDigest godigest.Digest) string { + return manifestDigest.Algorithm().String() + "-" + manifestDigest.Encoded() + ".sig" +} + func UploadImage(img Image, baseURL, repo string) error { for _, blob := range img.Layers { resp, err := resty.R().Post(baseURL + "/v2/" + repo + "/blobs/uploads/") @@ -463,7 +626,7 @@ func UploadImage(img Image, baseURL, repo string) error { return err } - if ErrStatusCode(resp.StatusCode()) != http.StatusAccepted && ErrStatusCode(resp.StatusCode()) == -1 { + if ErrStatusCode(resp.StatusCode()) != http.StatusAccepted || ErrStatusCode(resp.StatusCode()) == -1 { return ErrPostBlob } @@ -480,7 +643,7 @@ func UploadImage(img Image, baseURL, repo string) error { return err } - if ErrStatusCode(resp.StatusCode()) != http.StatusCreated && ErrStatusCode(resp.StatusCode()) == -1 { + if ErrStatusCode(resp.StatusCode()) != http.StatusCreated || ErrStatusCode(resp.StatusCode()) == -1 { return ErrPostBlob } @@ -498,7 +661,7 @@ func UploadImage(img Image, baseURL, repo string) error { return err } -func UploadArtifact(baseURL, repo string, artifactManifest *imagespec.Artifact) error { +func UploadArtifact(baseURL, repo string, artifactManifest *ispec.Artifact) error { // put manifest artifactManifestBlob, err := json.Marshal(artifactManifest) if err != nil { @@ -508,7 +671,7 @@ func UploadArtifact(baseURL, repo string, artifactManifest *imagespec.Artifact) artifactManifestDigest := godigest.FromBytes(artifactManifestBlob) _, err = resty.R(). - SetHeader("Content-type", imagespec.MediaTypeArtifactManifest). + SetHeader("Content-type", ispec.MediaTypeArtifactManifest). SetBody(artifactManifestBlob). Put(baseURL + "/v2/" + repo + "/manifests/" + artifactManifestDigest.String()) @@ -567,3 +730,164 @@ func ReadLogFileAndSearchString(logPath string, stringToMatch string, timeout ti } } } + +func UploadImageWithBasicAuth(img Image, baseURL, repo, user, password string) error { + for _, blob := range img.Layers { + resp, err := resty.R(). + SetBasicAuth(user, password). + Post(baseURL + "/v2/" + repo + "/blobs/uploads/") + if err != nil { + return err + } + + if resp.StatusCode() != http.StatusAccepted { + return ErrPostBlob + } + + loc := resp.Header().Get("Location") + + digest := godigest.FromBytes(blob).String() + + resp, err = resty.R(). + SetBasicAuth(user, password). + SetHeader("Content-Length", fmt.Sprintf("%d", len(blob))). + SetHeader("Content-Type", "application/octet-stream"). + SetQueryParam("digest", digest). + SetBody(blob). + Put(baseURL + loc) + + if err != nil { + return err + } + + if resp.StatusCode() != http.StatusCreated { + return ErrPutBlob + } + } + // upload config + cblob, err := json.Marshal(img.Config) + if err = Error(err); err != nil { + return err + } + + cdigest := godigest.FromBytes(cblob) + + resp, err := resty.R(). + SetBasicAuth(user, password). + Post(baseURL + "/v2/" + repo + "/blobs/uploads/") + if err = Error(err); err != nil { + return err + } + + if ErrStatusCode(resp.StatusCode()) != http.StatusAccepted || ErrStatusCode(resp.StatusCode()) == -1 { + return ErrPostBlob + } + + loc := Location(baseURL, resp) + + // uploading blob should get 201 + resp, err = resty.R(). + SetBasicAuth(user, password). + SetHeader("Content-Length", fmt.Sprintf("%d", len(cblob))). + SetHeader("Content-Type", "application/octet-stream"). + SetQueryParam("digest", cdigest.String()). + SetBody(cblob). + Put(loc) + if err = Error(err); err != nil { + return err + } + + if ErrStatusCode(resp.StatusCode()) != http.StatusCreated || ErrStatusCode(resp.StatusCode()) == -1 { + return ErrPostBlob + } + + // put manifest + manifestBlob, err := json.Marshal(img.Manifest) + if err = Error(err); err != nil { + return err + } + + _, err = resty.R(). + SetBasicAuth(user, password). + SetHeader("Content-type", "application/vnd.oci.image.manifest.v1+json"). + SetBody(manifestBlob). + Put(baseURL + "/v2/" + repo + "/manifests/" + img.Tag) + + return err +} + +func SignImageUsingCosign(repoTag, port string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + defer func() { _ = os.Chdir(cwd) }() + + tdir, err := os.MkdirTemp("", "cosign") + if err != nil { + return err + } + + defer os.RemoveAll(tdir) + + _ = os.Chdir(tdir) + + // generate a keypair + os.Setenv("COSIGN_PASSWORD", "") + + err = generate.GenerateKeyPairCmd(context.TODO(), "", nil) + if err != nil { + return err + } + + imageURL := fmt.Sprintf("localhost:%s/%s", port, repoTag) + + // sign the image + return sign.SignCmd(&options.RootOptions{Verbose: true, Timeout: 1 * time.Minute}, + options.KeyOpts{KeyRef: path.Join(tdir, "cosign.key"), PassFunc: generate.GetPass}, + options.RegistryOptions{AllowInsecure: true}, + map[string]interface{}{"tag": "1.0"}, + []string{imageURL}, + "", "", true, "", "", "", false, false, "", true) +} + +func SignImageUsingNotary(repoTag, port string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + + defer func() { _ = os.Chdir(cwd) }() + + tdir, err := os.MkdirTemp("", "notation") + if err != nil { + return err + } + + defer os.RemoveAll(tdir) + + _ = os.Chdir(tdir) + + _, err = exec.LookPath("notation") + if err != nil { + return err + } + + os.Setenv("XDG_CONFIG_HOME", tdir) + + // generate a keypair + cmd := exec.Command("notation", "cert", "generate-test", "--trust", "notation-sign-test") + + err = cmd.Run() + if err != nil { + return err + } + + // sign the image + image := fmt.Sprintf("localhost:%s/%s", port, repoTag) + + cmd = exec.Command("notation", "sign", "--key", "notation-sign-test", "--plain-http", image) + + return cmd.Run() +} diff --git a/pkg/test/common_test.go b/pkg/test/common_test.go index e1a05055..ef53fd95 100644 --- a/pkg/test/common_test.go +++ b/pkg/test/common_test.go @@ -6,6 +6,7 @@ package test_test import ( "context" "encoding/json" + "fmt" "os" "path" "testing" @@ -14,6 +15,7 @@ import ( godigest "github.com/opencontainers/go-digest" ispec "github.com/opencontainers/image-spec/specs-go/v1" . "github.com/smartystreets/goconvey/convey" + "golang.org/x/crypto/bcrypt" "zotregistry.io/zot/pkg/api" "zotregistry.io/zot/pkg/api/config" @@ -387,6 +389,78 @@ func TestUploadImage(t *testing.T) { So(err, ShouldBeNil) }) + Convey("Upload image with authentification", t, func() { + tempDir := t.TempDir() + conf := config.New() + port := test.GetFreePort() + baseURL := test.GetBaseURL(port) + + user1 := "test" + password1 := "test" + testString1 := getCredString(user1, password1) + htpasswdPath := test.MakeHtpasswdFileFromString(testString1) + defer os.Remove(htpasswdPath) + conf.HTTP.Auth = &config.AuthConfig{ + HTPasswd: config.AuthHTPasswd{ + Path: htpasswdPath, + }, + } + + conf.HTTP.Port = port + + conf.AccessControl = &config.AccessControlConfig{ + Repositories: config.Repositories{ + "repo": config.PolicyGroup{ + Policies: []config.Policy{ + { + Users: []string{user1}, + Actions: []string{"read", "create"}, + }, + }, + DefaultPolicy: []string{}, + }, + "inaccessibleRepo": config.PolicyGroup{ + Policies: []config.Policy{ + { + Users: []string{user1}, + Actions: []string{"create"}, + }, + }, + DefaultPolicy: []string{}, + }, + }, + AdminPolicy: config.Policy{ + Users: []string{}, + Actions: []string{}, + }, + } + + ctlr := api.NewController(conf) + + ctlr.Config.Storage.RootDirectory = tempDir + + go startServer(ctlr) + defer stopServer(ctlr) + test.WaitTillServerReady(baseURL) + + Convey("Request fail while pushing layer", func() { + err := test.UploadImageWithBasicAuth(test.Image{Layers: [][]byte{{1, 2, 3}}}, "badURL", "", "", "") + So(err, ShouldNotBeNil) + }) + Convey("Request status is not StatusOk while pushing layer", func() { + err := test.UploadImageWithBasicAuth(test.Image{Layers: [][]byte{{1, 2, 3}}}, baseURL, "repo", "", "") + So(err, ShouldNotBeNil) + }) + Convey("Request fail while pushing config", func() { + err := test.UploadImageWithBasicAuth(test.Image{}, "badURL", "", "", "") + So(err, ShouldNotBeNil) + }) + Convey("Request status is not StatusOk while pushing config", func() { + err := test.UploadImageWithBasicAuth(test.Image{}, baseURL, "repo", "", "") + So(err, ShouldNotBeNil) + }) + }) + Convey("Blob upload wrong response status code", t, func() { port := test.GetFreePort() baseURL := test.GetBaseURL(port) @@ -481,6 +555,17 @@ func TestUploadImage(t *testing.T) { }) } +func getCredString(username, password string) string { + hash, err := bcrypt.GenerateFromPassword([]byte(password), 10) + if err != nil { + panic(err) + } + + usernameAndHash := fmt.Sprintf("%s:%s", username, string(hash)) + + return usernameAndHash +} + func TestInjectUploadImage(t *testing.T) { Convey("Inject failures for unreachable lines", t, func() { port := test.GetFreePort() @@ -566,6 +651,81 @@ func TestReadLogFileAndSearchString(t *testing.T) { }) } +func TestInjectUploadImageWithBasicAuth(t *testing.T) { + Convey("Inject failures for unreachable lines", t, func() { + port := test.GetFreePort() + baseURL := test.GetBaseURL(port) + + tempDir := t.TempDir() + conf := config.New() + conf.HTTP.Port = port + conf.Storage.RootDirectory = tempDir + + user := "user" + password := "password" + testString := getCredString(user, password) + htpasswdPath := test.MakeHtpasswdFileFromString(testString) + defer os.Remove(htpasswdPath) + conf.HTTP.Auth = &config.AuthConfig{ + HTPasswd: config.AuthHTPasswd{ + Path: htpasswdPath, + }, + } + + ctlr := api.NewController(conf) + go startServer(ctlr) + defer stopServer(ctlr) + + test.WaitTillServerReady(baseURL) + + layerBlob := []byte("test") + layerPath := path.Join(tempDir, "test", ".uploads") + + if _, err := os.Stat(layerPath); os.IsNotExist(err) { + err = os.MkdirAll(layerPath, 0o700) + if err != nil { + t.Fatal(err) + } + } + + img := test.Image{ + Layers: [][]byte{ + layerBlob, + }, // invalid format that will result in an error + Config: ispec.Image{}, + } + + Convey("first marshal", func() { + injected := test.InjectFailure(0) + if injected { + err := test.UploadImageWithBasicAuth(img, baseURL, "test", "user", "password") + So(err, ShouldNotBeNil) + } + }) + Convey("CreateBlobUpload POST call", func() { + injected := test.InjectFailure(1) + if injected { + err := test.UploadImageWithBasicAuth(img, baseURL, "test", "user", "password") + So(err, ShouldNotBeNil) + } + }) + Convey("UpdateBlobUpload PUT call", func() { + injected := test.InjectFailure(3) + if injected { + err := test.UploadImageWithBasicAuth(img, baseURL, "test", "user", "password") + So(err, ShouldNotBeNil) + } + }) + Convey("second marshal", func() { + injected := test.InjectFailure(5) + if injected { + err := test.UploadImageWithBasicAuth(img, baseURL, "test", "user", "password") + So(err, ShouldNotBeNil) + } + }) + }) +} + func startServer(c *api.Controller) { // this blocks ctx := context.Background() diff --git a/pkg/test/mocks/oci_mock.go b/pkg/test/mocks/oci_mock.go index d4feef08..5af34bae 100644 --- a/pkg/test/mocks/oci_mock.go +++ b/pkg/test/mocks/oci_mock.go @@ -14,7 +14,6 @@ type OciLayoutUtilsMock struct { GetImageInfoFn func(repo string, digest godigest.Digest) (ispec.Image, error) GetImageTagsWithTimestampFn func(repo string) ([]common.TagInfo, error) GetImagePlatformFn func(imageInfo ispec.Image) (string, string) - GetImageVendorFn func(imageInfo ispec.Image) string GetImageManifestSizeFn func(repo string, manifestDigest godigest.Digest) int64 GetImageConfigSizeFn func(repo string, manifestDigest godigest.Digest) int64 GetRepoLastUpdatedFn func(repo string) (common.TagInfo, error) @@ -81,14 +80,6 @@ func (olum OciLayoutUtilsMock) GetImagePlatform(imageInfo ispec.Image) (string, return "", "" } -func (olum OciLayoutUtilsMock) GetImageVendor(imageInfo ispec.Image) string { - if olum.GetImageVendorFn != nil { - return olum.GetImageVendorFn(imageInfo) - } - - return "" -} - func (olum OciLayoutUtilsMock) GetImageManifestSize(repo string, manifestDigest godigest.Digest) int64 { if olum.GetImageManifestSizeFn != nil { return olum.GetImageManifestSizeFn(repo, manifestDigest) diff --git a/pkg/test/mocks/repo_db_mock.go b/pkg/test/mocks/repo_db_mock.go new file mode 100644 index 00000000..de5f2886 --- /dev/null +++ b/pkg/test/mocks/repo_db_mock.go @@ -0,0 +1,263 @@ +package mocks + +import ( + "context" + + godigest "github.com/opencontainers/go-digest" + + "zotregistry.io/zot/pkg/meta/repodb" +) + +type RepoDBMock struct { + SetRepoDescriptionFn func(repo, description string) error + + IncrementRepoStarsFn func(repo string) error + + DecrementRepoStarsFn func(repo string) error + + GetRepoStarsFn func(repo string) (int, error) + + SetRepoLogoFn func(repo string, logoPath string) error + + SetRepoTagFn func(repo string, tag string, manifestDigest godigest.Digest, mediaType string) error + + DeleteRepoTagFn func(repo string, tag string) error + + GetRepoMetaFn func(repo string) (repodb.RepoMetadata, error) + + GetMultipleRepoMetaFn func(ctx context.Context, filter func(repoMeta repodb.RepoMetadata) bool, + requestedPage repodb.PageInput) ([]repodb.RepoMetadata, error) + + GetManifestDataFn func(manifestDigest godigest.Digest) (repodb.ManifestData, error) + + SetManifestDataFn func(manifestDigest godigest.Digest, mm repodb.ManifestData) error + + GetManifestMetaFn func(repo string, manifestDigest godigest.Digest) (repodb.ManifestMetadata, error) + + SetManifestMetaFn func(repo string, manifestDigest godigest.Digest, mm repodb.ManifestMetadata) error + + IncrementImageDownloadsFn func(repo string, reference string) error + + AddManifestSignatureFn func(repo string, signedManifestDigest godigest.Digest, sm repodb.SignatureMetadata) error + + DeleteSignatureFn func(repo string, signedManifestDigest godigest.Digest, sm repodb.SignatureMetadata) error + + SearchReposFn func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput) ( + []repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) + + SearchTagsFn func(ctx context.Context, searchText string, filter repodb.Filter, requestedPage repodb.PageInput) ( + []repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) + + SearchDigestsFn func(ctx context.Context, searchText string, requestedPage repodb.PageInput) ( + []repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) + + SearchLayersFn func(ctx context.Context, searchText string, requestedPage repodb.PageInput) ( + []repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) + + SearchForAscendantImagesFn func(ctx context.Context, searchText string, requestedPage repodb.PageInput) ( + []repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) + + SearchForDescendantImagesFn func(ctx context.Context, searchText string, requestedPage repodb.PageInput) ( + []repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) + + PatchDBFn func() error +} + +func (sdm RepoDBMock) SetRepoDescription(repo, description string) error { + if sdm.SetRepoDescriptionFn != nil { + return sdm.SetRepoDescriptionFn(repo, description) + } + + return nil +} + +func (sdm RepoDBMock) IncrementRepoStars(repo string) error { + if sdm.IncrementRepoStarsFn != nil { + return sdm.IncrementRepoStarsFn(repo) + } + + return nil +} + +func (sdm RepoDBMock) DecrementRepoStars(repo string) error { + if sdm.DecrementRepoStarsFn != nil { + return sdm.DecrementRepoStarsFn(repo) + } + + return nil +} + +func (sdm RepoDBMock) GetRepoStars(repo string) (int, error) { + if sdm.GetRepoStarsFn != nil { + return sdm.GetRepoStarsFn(repo) + } + + return 0, nil +} + +func (sdm RepoDBMock) SetRepoLogo(repo string, logoPath string) error { + if sdm.SetRepoLogoFn != nil { + return sdm.SetRepoLogoFn(repo, logoPath) + } + + return nil +} + +func (sdm RepoDBMock) SetRepoTag(repo string, tag string, manifestDigest godigest.Digest, mediaType string) error { + if sdm.SetRepoTagFn != nil { + return sdm.SetRepoTagFn(repo, tag, manifestDigest, mediaType) + } + + return nil +} + +func (sdm RepoDBMock) DeleteRepoTag(repo string, tag string) error { + if sdm.DeleteRepoTagFn != nil { + return sdm.DeleteRepoTagFn(repo, tag) + } + + return nil +} + +func (sdm RepoDBMock) GetRepoMeta(repo string) (repodb.RepoMetadata, error) { + if sdm.GetRepoMetaFn != nil { + return sdm.GetRepoMetaFn(repo) + } + + return repodb.RepoMetadata{}, nil +} + +func (sdm RepoDBMock) GetMultipleRepoMeta(ctx context.Context, filter func(repoMeta repodb.RepoMetadata) bool, + requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, error) { + if sdm.GetMultipleRepoMetaFn != nil { + return sdm.GetMultipleRepoMetaFn(ctx, filter, requestedPage) + } + + return []repodb.RepoMetadata{}, nil +} + +func (sdm RepoDBMock) GetManifestData(manifestDigest godigest.Digest) (repodb.ManifestData, error) { + if sdm.GetManifestDataFn != nil { + return sdm.GetManifestData(manifestDigest) + } + + return repodb.ManifestData{}, nil +} + +func (sdm RepoDBMock) SetManifestData(manifestDigest godigest.Digest, md repodb.ManifestData) error { + if sdm.SetManifestDataFn != nil { + return sdm.SetManifestData(manifestDigest, md) + } + + return nil +} + +func (sdm RepoDBMock) GetManifestMeta(repo string, manifestDigest godigest.Digest) (repodb.ManifestMetadata, error) { + if sdm.GetManifestMetaFn != nil { + return sdm.GetManifestMetaFn(repo, manifestDigest) + } + + return repodb.ManifestMetadata{}, nil +} + +func (sdm RepoDBMock) SetManifestMeta(repo string, manifestDigest godigest.Digest, mm repodb.ManifestMetadata) error { + if sdm.SetManifestMetaFn != nil { + return sdm.SetManifestMetaFn(repo, manifestDigest, mm) + } + + return nil +} + +func (sdm RepoDBMock) IncrementImageDownloads(repo string, reference string) error { + if sdm.IncrementImageDownloadsFn != nil { + return sdm.IncrementImageDownloadsFn(repo, reference) + } + + return nil +} + +func (sdm RepoDBMock) AddManifestSignature(repo string, signedManifestDigest godigest.Digest, + sm repodb.SignatureMetadata, +) error { + if sdm.AddManifestSignatureFn != nil { + return sdm.AddManifestSignatureFn(repo, signedManifestDigest, sm) + } + + return nil +} + +func (sdm RepoDBMock) DeleteSignature(repo string, signedManifestDigest godigest.Digest, + sm repodb.SignatureMetadata, +) error { + if sdm.DeleteSignatureFn != nil { + return sdm.DeleteSignatureFn(repo, signedManifestDigest, sm) + } + + return nil +} + +func (sdm RepoDBMock) SearchRepos(ctx context.Context, searchText string, filter repodb.Filter, + requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + if sdm.SearchReposFn != nil { + return sdm.SearchReposFn(ctx, searchText, filter, requestedPage) + } + + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil +} + +func (sdm RepoDBMock) SearchTags(ctx context.Context, searchText string, filter repodb.Filter, + requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + if sdm.SearchTagsFn != nil { + return sdm.SearchTagsFn(ctx, searchText, filter, requestedPage) + } + + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil +} + +func (sdm RepoDBMock) SearchDigests(ctx context.Context, searchText string, requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + if sdm.SearchDigestsFn != nil { + return sdm.SearchDigestsFn(ctx, searchText, requestedPage) + } + + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil +} + +func (sdm RepoDBMock) SearchLayers(ctx context.Context, searchText string, requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + if sdm.SearchLayersFn != nil { + return sdm.SearchLayersFn(ctx, searchText, requestedPage) + } + + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil +} + +func (sdm RepoDBMock) SearchForAscendantImages(ctx context.Context, searchText string, requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + if sdm.SearchForAscendantImagesFn != nil { + return sdm.SearchForAscendantImagesFn(ctx, searchText, requestedPage) + } + + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil +} + +func (sdm RepoDBMock) SearchForDescendantImages(ctx context.Context, searchText string, + requestedPage repodb.PageInput, +) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) { + if sdm.SearchForDescendantImagesFn != nil { + return sdm.SearchForDescendantImagesFn(ctx, searchText, requestedPage) + } + + return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil +} + +func (sdm RepoDBMock) PatchDB() error { + if sdm.PatchDBFn != nil { + return sdm.PatchDBFn() + } + + return nil +} diff --git a/test/blackbox/cloud-only.bats b/test/blackbox/cloud-only.bats index 7aa8952f..91c697ae 100644 --- a/test/blackbox/cloud-only.bats +++ b/test/blackbox/cloud-only.bats @@ -34,7 +34,10 @@ function setup() { "name": "dynamodb", "endpoint": "http://localhost:4566", "region": "us-east-2", - "tableName": "BlobTable" + "cacheTablename": "BlobTable", + "repoMetaTablename": "RepoMetadataTable", + "manifestDataTablename": "ManifestDataTable", + "versionTablename": "Version" } }, "http": { @@ -63,6 +66,8 @@ function setup() { EOF awslocal s3 --region "us-east-2" mb s3://zot-storage awslocal dynamodb --region "us-east-2" create-table --table-name "BlobTable" --attribute-definitions AttributeName=Digest,AttributeType=S --key-schema AttributeName=Digest,KeyType=HASH --provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=5 + awslocal dynamodb --region "us-east-2" create-table --table-name "RepoMetadataTable" --attribute-definitions AttributeName=RepoName,AttributeType=S --key-schema AttributeName=RepoName,KeyType=HASH --provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=5 + awslocal dynamodb --region "us-east-2" create-table --table-name "ManifestDataTable" --attribute-definitions AttributeName=Digest,AttributeType=S --key-schema AttributeName=Digest,KeyType=HASH --provisioned-throughput ReadCapacityUnits=10,WriteCapacityUnits=5 zot_serve_strace ${zot_config_file} wait_zot_reachable "http://127.0.0.1:8080/v2/_catalog" }