mirror of
https://github.com/project-zot/zot.git
synced 2024-12-16 21:56:37 -05:00
boltdb query logic
fix transaction problem change bolt-db version Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com>
This commit is contained in:
parent
dbe96efa00
commit
e3cb60b856
31 changed files with 6192 additions and 629 deletions
|
@ -56,4 +56,7 @@ var (
|
|||
ErrImageLintAnnotations = errors.New("routes: lint checks failed")
|
||||
ErrParsingAuthHeader = errors.New("auth: failed parsing authorization header")
|
||||
ErrBadType = errors.New("core: invalid type")
|
||||
ErrManifestMetaNotFound = errors.New("repodb: image metadata not found for given manifest digest")
|
||||
ErrRepoMetaNotFound = errors.New("repodb: repo metadata not found for given repo name")
|
||||
ErrTypeAssertionFailed = errors.New("storage: failed DatabaseDriver type assertion")
|
||||
)
|
||||
|
|
25
examples/config-bolt-repodb.json
Normal file
25
examples/config-bolt-repodb.json
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"distSpecVersion": "1.0.1-dev",
|
||||
"storage": {
|
||||
"rootDirectory": "/tmp/zot",
|
||||
"repoDBDriver": {
|
||||
"name": "boltdb",
|
||||
"rootDirectory": "/tmp/zot/cachedb"
|
||||
}
|
||||
},
|
||||
"extensions": {
|
||||
"search": {
|
||||
"enable": true,
|
||||
"cve": {
|
||||
"updateInterval": "24h"
|
||||
}
|
||||
}
|
||||
},
|
||||
"http": {
|
||||
"address": "127.0.0.1",
|
||||
"port": "8080"
|
||||
},
|
||||
"log": {
|
||||
"level": "debug"
|
||||
}
|
||||
}
|
|
@ -17,5 +17,6 @@
|
|||
"updateInterval": "24h"
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
2
go.mod
2
go.mod
|
@ -53,6 +53,7 @@ require (
|
|||
require (
|
||||
github.com/aquasecurity/trivy v0.0.0-00010101000000-000000000000
|
||||
github.com/containers/image/v5 v5.22.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/notaryproject/notation-go v0.10.0-alpha.3
|
||||
github.com/opencontainers/distribution-spec/specs-go v0.0.0-20220620172159-4ab4752c3b86
|
||||
github.com/sigstore/cosign v1.11.1
|
||||
|
@ -184,7 +185,6 @@ require (
|
|||
github.com/go-playground/validator/v10 v10.11.0 // indirect
|
||||
github.com/go-redis/redis/v8 v8.11.5 // indirect
|
||||
github.com/go-restruct/restruct v0.0.0-20191227155143-5734170a48a1 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
|
||||
|
|
|
@ -26,6 +26,7 @@ type StorageConfig struct {
|
|||
GCDelay time.Duration
|
||||
GCInterval time.Duration
|
||||
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
|
||||
RepoDBDriver map[string]interface{} `mapstructure:",omitempty"`
|
||||
}
|
||||
|
||||
type TLSConfig struct {
|
||||
|
@ -101,6 +102,7 @@ type GlobalStorageConfig struct {
|
|||
RootDirectory string
|
||||
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
|
||||
SubPaths map[string]StorageConfig
|
||||
RepoDBDriver map[string]interface{} `mapstructure:",omitempty"`
|
||||
}
|
||||
|
||||
type AccessControlConfig struct {
|
||||
|
|
|
@ -26,6 +26,8 @@ import (
|
|||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/repodb"
|
||||
"zotregistry.io/zot/pkg/storage/repodb/repodbfactory"
|
||||
"zotregistry.io/zot/pkg/storage/s3"
|
||||
)
|
||||
|
||||
|
@ -36,6 +38,7 @@ const (
|
|||
type Controller struct {
|
||||
Config *config.Config
|
||||
Router *mux.Router
|
||||
RepoDB repodb.RepoDB
|
||||
StoreController storage.StoreController
|
||||
Log log.Logger
|
||||
Audit *log.Logger
|
||||
|
@ -158,6 +161,10 @@ func (c *Controller) Run(reloadCtx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := c.InitRepoDB(reloadCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
monitoring.SetServerInfo(c.Metrics, c.Config.Commit, c.Config.BinaryType, c.Config.GoVersion,
|
||||
c.Config.DistSpecVersion)
|
||||
|
||||
|
@ -418,6 +425,68 @@ func compareImageStore(root1, root2 string) bool {
|
|||
return isSameFile
|
||||
}
|
||||
|
||||
func (c *Controller) InitRepoDB(reloadCtx context.Context) error {
|
||||
if c.Config.Extensions != nil && c.Config.Extensions.Search != nil && *c.Config.Extensions.Search.Enable {
|
||||
driver, err := c.createRepoDBDriver(reloadCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.RepoDB = driver
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) createRepoDBDriver(reloadCtx context.Context) (repodb.RepoDB, error) {
|
||||
repoDBConfig := c.Config.Storage.RepoDBDriver
|
||||
|
||||
if repoDBConfig != nil {
|
||||
if val, ok := repoDBConfig["name"]; ok {
|
||||
assertedDriverNameVal, okAssert := val.(string)
|
||||
if !okAssert {
|
||||
c.Log.Error().Err(errors.ErrTypeAssertionFailed).Msgf("Failed type assertion for %v to string",
|
||||
"cacheDatabaseDriverName")
|
||||
|
||||
return nil, errors.ErrTypeAssertionFailed
|
||||
}
|
||||
|
||||
switch assertedDriverNameVal {
|
||||
case "boltdb":
|
||||
params := repodb.BoltDBParameters{}
|
||||
boltRootDirCfgVarName := "rootDirectory"
|
||||
|
||||
// default values
|
||||
params.RootDir = c.StoreController.DefaultStore.RootDir()
|
||||
|
||||
if rootDirVal, ok := repoDBConfig[boltRootDirCfgVarName]; ok {
|
||||
assertedRootDir, okAssert := rootDirVal.(string)
|
||||
if !okAssert {
|
||||
c.Log.Error().Err(errors.ErrTypeAssertionFailed).Msgf("Failed type assertion for %v to string", rootDirVal)
|
||||
|
||||
return nil, errors.ErrTypeAssertionFailed
|
||||
}
|
||||
params.RootDir = assertedRootDir
|
||||
}
|
||||
|
||||
return repodbfactory.Create("boltdb", params)
|
||||
default:
|
||||
c.Log.Warn().Msgf("Cache DB driver not found for %v: defaulting to boltdb (local storage)", val)
|
||||
|
||||
return repodbfactory.Create("boltdb", repodb.BoltDBParameters{
|
||||
RootDir: c.StoreController.DefaultStore.RootDir(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.Log.Warn().Msg(`Something went wrong when reading the cachedb config. Defulting to BoltDB`)
|
||||
|
||||
return repodbfactory.Create("boltdb", repodb.BoltDBParameters{
|
||||
RootDir: c.StoreController.DefaultStore.RootDir(),
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Controller) LoadNewConfig(reloadCtx context.Context, config *config.Config) {
|
||||
// reload access control config
|
||||
c.Config.AccessControl = config.AccessControl
|
||||
|
|
|
@ -5971,48 +5971,9 @@ func TestPeriodicTasks(t *testing.T) {
|
|||
|
||||
func TestSearchRoutes(t *testing.T) {
|
||||
Convey("Upload image for test", t, func(c C) {
|
||||
port := test.GetFreePort()
|
||||
baseURL := test.GetBaseURL(port)
|
||||
conf := config.New()
|
||||
conf.HTTP.Port = port
|
||||
tempDir := t.TempDir()
|
||||
|
||||
ctlr := api.NewController(conf)
|
||||
ctlr.Config.Storage.RootDirectory = tempDir
|
||||
|
||||
go startServer(ctlr)
|
||||
defer stopServer(ctlr)
|
||||
|
||||
test.WaitTillServerReady(baseURL)
|
||||
|
||||
repoName := "testrepo"
|
||||
inaccessibleRepo := "inaccessible"
|
||||
cfg, layers, manifest, err := test.GetImageComponents(10000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = test.UploadImage(
|
||||
test.Image{
|
||||
Config: cfg,
|
||||
Layers: layers,
|
||||
Manifest: manifest,
|
||||
Tag: "latest",
|
||||
}, baseURL, repoName)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// data for the inaccessible repo
|
||||
cfg, layers, manifest, err = test.GetImageComponents(10000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = test.UploadImage(
|
||||
test.Image{
|
||||
Config: cfg,
|
||||
Layers: layers,
|
||||
Manifest: manifest,
|
||||
Tag: "latest",
|
||||
}, baseURL, inaccessibleRepo)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("GlobalSearch with authz enabled", func(c C) {
|
||||
conf := config.New()
|
||||
|
@ -6048,7 +6009,7 @@ func TestSearchRoutes(t *testing.T) {
|
|||
Policies: []config.Policy{
|
||||
{
|
||||
Users: []string{user1},
|
||||
Actions: []string{"read"},
|
||||
Actions: []string{"read", "create"},
|
||||
},
|
||||
},
|
||||
DefaultPolicy: []string{},
|
||||
|
@ -6056,8 +6017,8 @@ func TestSearchRoutes(t *testing.T) {
|
|||
inaccessibleRepo: config.PolicyGroup{
|
||||
Policies: []config.Policy{
|
||||
{
|
||||
Users: []string{},
|
||||
Actions: []string{},
|
||||
Users: []string{user1},
|
||||
Actions: []string{"create"},
|
||||
},
|
||||
},
|
||||
DefaultPolicy: []string{},
|
||||
|
@ -6077,9 +6038,38 @@ func TestSearchRoutes(t *testing.T) {
|
|||
defer stopServer(ctlr)
|
||||
test.WaitTillServerReady(baseURL)
|
||||
|
||||
cfg, layers, manifest, err := test.GetImageComponents(10000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = test.UploadImageWithBasicAuth(
|
||||
test.Image{
|
||||
Config: cfg,
|
||||
Layers: layers,
|
||||
Manifest: manifest,
|
||||
Tag: "latest",
|
||||
}, baseURL, repoName,
|
||||
user1, password1)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// data for the inaccessible repo
|
||||
cfg, layers, manifest, err = test.GetImageComponents(10000)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err = test.UploadImageWithBasicAuth(
|
||||
test.Image{
|
||||
Config: cfg,
|
||||
Layers: layers,
|
||||
Manifest: manifest,
|
||||
Tag: "latest",
|
||||
}, baseURL, inaccessibleRepo,
|
||||
user1, password1)
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
query := `
|
||||
{
|
||||
GlobalSearch(query:""){
|
||||
GlobalSearch(query:"testrepo"){
|
||||
Repos {
|
||||
Name
|
||||
Score
|
||||
|
@ -6104,24 +6094,41 @@ func TestSearchRoutes(t *testing.T) {
|
|||
So(resp, ShouldNotBeNil)
|
||||
So(resp.StatusCode(), ShouldEqual, http.StatusUnauthorized)
|
||||
|
||||
// credentials for user unauthorized to access repo
|
||||
user2 := "notWorking"
|
||||
password2 := "notWorking"
|
||||
testString2 := getCredString(user2, password2)
|
||||
htpasswdPath2 := test.MakeHtpasswdFileFromString(testString2)
|
||||
defer os.Remove(htpasswdPath2)
|
||||
|
||||
ctlr.Config.HTTP.Auth = &config.AuthConfig{
|
||||
HTPasswd: config.AuthHTPasswd{
|
||||
Path: htpasswdPath2,
|
||||
conf.AccessControl = &config.AccessControlConfig{
|
||||
Repositories: config.Repositories{
|
||||
repoName: config.PolicyGroup{
|
||||
Policies: []config.Policy{
|
||||
{
|
||||
Users: []string{user1},
|
||||
Actions: []string{},
|
||||
},
|
||||
},
|
||||
DefaultPolicy: []string{},
|
||||
},
|
||||
inaccessibleRepo: config.PolicyGroup{
|
||||
Policies: []config.Policy{
|
||||
{
|
||||
Users: []string{},
|
||||
Actions: []string{},
|
||||
},
|
||||
},
|
||||
DefaultPolicy: []string{},
|
||||
},
|
||||
},
|
||||
AdminPolicy: config.Policy{
|
||||
Users: []string{},
|
||||
Actions: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
// authenticated, but no access to resource
|
||||
resp, err = resty.R().SetBasicAuth(user2, password2).Get(baseURL + constants.ExtSearchPrefix +
|
||||
resp, err = resty.R().SetBasicAuth(user1, password1).Get(baseURL + constants.ExtSearchPrefix +
|
||||
"?query=" + url.QueryEscape(query))
|
||||
So(err, ShouldBeNil)
|
||||
So(resp, ShouldNotBeNil)
|
||||
So(resp.StatusCode(), ShouldEqual, http.StatusUnauthorized)
|
||||
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
|
||||
So(string(resp.Body()), ShouldNotContainSubstring, repoName)
|
||||
So(string(resp.Body()), ShouldNotContainSubstring, inaccessibleRepo)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -22,6 +23,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
"github.com/gorilla/mux"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
notreg "github.com/notaryproject/notation-go/registry"
|
||||
|
@ -35,7 +37,8 @@ import (
|
|||
"zotregistry.io/zot/pkg/log"
|
||||
localCtx "zotregistry.io/zot/pkg/requestcontext"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/test" // nolint:goimports
|
||||
"zotregistry.io/zot/pkg/storage/repodb"
|
||||
"zotregistry.io/zot/pkg/test" // nolint: goimports
|
||||
// as required by swaggo.
|
||||
_ "zotregistry.io/zot/swagger"
|
||||
)
|
||||
|
@ -125,7 +128,7 @@ func (rh *RouteHandler) SetupRoutes() {
|
|||
} else {
|
||||
// extended build
|
||||
ext.SetupMetricsRoutes(rh.c.Config, rh.c.Router, rh.c.StoreController, AuthHandler(rh.c), rh.c.Log)
|
||||
ext.SetupSearchRoutes(rh.c.Config, rh.c.Router, rh.c.StoreController, AuthHandler(rh.c), rh.c.Log)
|
||||
ext.SetupSearchRoutes(rh.c.Config, rh.c.Router, rh.c.StoreController, AuthHandler(rh.c), rh.c.RepoDB, rh.c.Log)
|
||||
ext.SetupUIRoutes(rh.c.Config, rh.c.Router, rh.c.StoreController, rh.c.Log)
|
||||
}
|
||||
}
|
||||
|
@ -400,6 +403,16 @@ func (rh *RouteHandler) GetManifest(response http.ResponseWriter, request *http.
|
|||
return
|
||||
}
|
||||
|
||||
if rh.c.RepoDB != nil {
|
||||
err := rh.c.RepoDB.IncrementManifestDownloads(digest)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
response.Header().Set(constants.DistContentDigestKey, digest)
|
||||
WriteData(response, http.StatusOK, mediaType, content)
|
||||
}
|
||||
|
@ -494,11 +507,177 @@ func (rh *RouteHandler) UpdateManifest(response http.ResponseWriter, request *ht
|
|||
return
|
||||
}
|
||||
|
||||
if rh.c.RepoDB != nil {
|
||||
// check is image is a signature
|
||||
isSignature, signatureType, signedManifestDigest, err := imageIsSignature(name, body, digest, reference,
|
||||
rh.c.StoreController)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("can't check if image is a signature or not")
|
||||
|
||||
if err = imgStore.DeleteImageManifest(name, reference); err != nil {
|
||||
rh.c.Log.Error().Err(err).Msgf("couldn't remove image manifest %s in repo %s", reference, name)
|
||||
}
|
||||
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
metadataSuccessfullySet := true
|
||||
|
||||
if isSignature {
|
||||
err := rh.c.RepoDB.AddManifestSignature(signedManifestDigest, repodb.SignatureMetadata{
|
||||
SignatureType: signatureType,
|
||||
SignatureDigest: digest,
|
||||
})
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("repodb: error while putting repo meta")
|
||||
metadataSuccessfullySet = false
|
||||
}
|
||||
} else {
|
||||
imageMetadata, err := newManifestMeta(name, body, digest, reference, rh.c.StoreController)
|
||||
if err == nil {
|
||||
err := rh.c.RepoDB.SetManifestMeta(digest, imageMetadata)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("repodb: error while putting image meta")
|
||||
metadataSuccessfullySet = false
|
||||
} else {
|
||||
// If SetManifestMeta is successful and SetRepoTag is not, the data inserted by SetManifestMeta
|
||||
// will be garbage collected later
|
||||
// Q: There will be a problem if we write a manifest without a tag
|
||||
// Q: When will we write a manifest where the reference will be a digest?
|
||||
err = rh.c.RepoDB.SetRepoTag(name, reference, digest)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("repodb: error while putting repo meta")
|
||||
metadataSuccessfullySet = false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
metadataSuccessfullySet = false
|
||||
}
|
||||
}
|
||||
|
||||
if !metadataSuccessfullySet {
|
||||
rh.c.Log.Info().Msgf("uploding image meta was unsuccessful for tag %s in repo %s", reference, name)
|
||||
|
||||
if err = imgStore.DeleteImageManifest(name, reference); err != nil {
|
||||
rh.c.Log.Error().Err(err).Msgf("couldn't remove image manifest %s in repo %s", reference, name)
|
||||
}
|
||||
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
response.Header().Set("Location", fmt.Sprintf("/v2/%s/manifests/%s", name, digest))
|
||||
response.Header().Set(constants.DistContentDigestKey, digest)
|
||||
response.WriteHeader(http.StatusCreated)
|
||||
}
|
||||
|
||||
// imageIsSignature checks if the given image (repo:tag) represents a signature. The function
|
||||
// returns:
|
||||
//
|
||||
// - bool: if the image is a signature or not
|
||||
//
|
||||
// - string: the type of signature
|
||||
//
|
||||
// - string: the digest of the image it signs
|
||||
//
|
||||
// - error: any errors that occur.
|
||||
func imageIsSignature(repoName string, manifestBlob []byte, manifestDigest, reference string,
|
||||
storeController storage.StoreController,
|
||||
) (bool, string, string, error) {
|
||||
var manifestContent artifactspec.Manifest
|
||||
|
||||
err := json.Unmarshal(manifestBlob, &manifestContent)
|
||||
if err != nil {
|
||||
return false, "", "", err
|
||||
}
|
||||
|
||||
// check notation signature
|
||||
if manifestContent.Subject != nil {
|
||||
imgStore := storeController.GetImageStore(repoName)
|
||||
|
||||
_, signedImageManifestDigest, _, err := imgStore.GetImageManifest(repoName,
|
||||
manifestContent.Subject.Digest.String())
|
||||
if err == nil && signedImageManifestDigest != "" {
|
||||
return true, "notation", signedImageManifestDigest, nil
|
||||
}
|
||||
}
|
||||
|
||||
// check cosign
|
||||
cosignTagRule := glob.MustCompile("sha256-*.sig")
|
||||
|
||||
if tag := reference; cosignTagRule.Match(reference) {
|
||||
prefixLen := len("sha256-")
|
||||
digestLen := 64
|
||||
signedImageManifestDigest := tag[prefixLen : prefixLen+digestLen]
|
||||
|
||||
var builder strings.Builder
|
||||
|
||||
builder.WriteString("sha256:")
|
||||
builder.WriteString(signedImageManifestDigest)
|
||||
signedImageManifestDigest = builder.String()
|
||||
|
||||
imgStore := storeController.GetImageStore(repoName)
|
||||
|
||||
_, signedImageManifestDigest, _, err := imgStore.GetImageManifest(repoName,
|
||||
signedImageManifestDigest)
|
||||
if err == nil && signedImageManifestDigest != "" {
|
||||
return true, "cosign", signedImageManifestDigest, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, "", "", nil
|
||||
}
|
||||
|
||||
func newManifestMeta(repoName string, manifestBlob []byte, digest, reference string,
|
||||
storeController storage.StoreController,
|
||||
) (repodb.ManifestMetadata, error) {
|
||||
const (
|
||||
configCount = 1
|
||||
manifestCount = 1
|
||||
)
|
||||
|
||||
var manifestMeta repodb.ManifestMetadata
|
||||
|
||||
var manifestContent ispec.Manifest
|
||||
|
||||
err := json.Unmarshal(manifestBlob, &manifestContent)
|
||||
if err != nil {
|
||||
return repodb.ManifestMetadata{}, err
|
||||
}
|
||||
|
||||
imgStore := storeController.GetImageStore(repoName)
|
||||
|
||||
configBlob, err := imgStore.GetBlobContent(repoName, manifestContent.Config.Digest.String())
|
||||
if err != nil {
|
||||
return repodb.ManifestMetadata{}, err
|
||||
}
|
||||
|
||||
var configContent ispec.Image
|
||||
|
||||
err = json.Unmarshal(configBlob, &configContent)
|
||||
if err != nil {
|
||||
return repodb.ManifestMetadata{}, err
|
||||
}
|
||||
|
||||
manifestMeta.BlobsSize = len(configBlob) + len(manifestBlob)
|
||||
for _, layer := range manifestContent.Layers {
|
||||
manifestMeta.BlobsSize += int(layer.Size)
|
||||
}
|
||||
|
||||
manifestMeta.BlobCount = configCount + manifestCount + len(manifestContent.Layers)
|
||||
manifestMeta.ManifestBlob = manifestBlob
|
||||
manifestMeta.ConfigBlob = configBlob
|
||||
|
||||
// manifestMeta.Dependants
|
||||
// manifestMeta.Dependencies
|
||||
|
||||
return manifestMeta, nil
|
||||
}
|
||||
|
||||
// DeleteManifest godoc
|
||||
// @Summary Delete image manifest
|
||||
// @Description Delete an image's manifest given a reference or a digest
|
||||
|
@ -527,7 +706,8 @@ func (rh *RouteHandler) DeleteManifest(response http.ResponseWriter, request *ht
|
|||
return
|
||||
}
|
||||
|
||||
err := imgStore.DeleteImageManifest(name, reference)
|
||||
// backupManifest
|
||||
manifestBlob, manifestDigest, mediaType, err := imgStore.GetImageManifest(name, reference)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrRepoNotFound) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain
|
||||
WriteJSON(response, http.StatusBadRequest,
|
||||
|
@ -546,6 +726,71 @@ func (rh *RouteHandler) DeleteManifest(response http.ResponseWriter, request *ht
|
|||
return
|
||||
}
|
||||
|
||||
err = imgStore.DeleteImageManifest(name, reference)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrRepoNotFound) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain
|
||||
WriteJSON(response, http.StatusBadRequest,
|
||||
NewErrorList(NewError(NAME_UNKNOWN, map[string]string{"name": name})))
|
||||
} else if errors.Is(err, zerr.ErrManifestNotFound) {
|
||||
WriteJSON(response, http.StatusNotFound,
|
||||
NewErrorList(NewError(MANIFEST_UNKNOWN, map[string]string{"reference": reference})))
|
||||
} else if errors.Is(err, zerr.ErrBadManifest) {
|
||||
WriteJSON(response, http.StatusBadRequest,
|
||||
NewErrorList(NewError(UNSUPPORTED, map[string]string{"reference": reference})))
|
||||
} else {
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if rh.c.RepoDB != nil {
|
||||
isSignature, signatureType, signedManifestDigest, err := imageIsSignature(name, manifestBlob, manifestDigest,
|
||||
reference, rh.c.StoreController)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("can't check if image is a signature or not")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
manageRepoMetaSuccessfully := true
|
||||
|
||||
if isSignature {
|
||||
err := rh.c.RepoDB.DeleteSignature(signedManifestDigest, repodb.SignatureMetadata{
|
||||
SignatureDigest: manifestDigest,
|
||||
SignatureType: signatureType,
|
||||
})
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("repodb: can't check if image is a signature or not")
|
||||
manageRepoMetaSuccessfully = false
|
||||
}
|
||||
} else {
|
||||
// Q: Should this work with digests also? For now it accepts only tags
|
||||
err := rh.c.RepoDB.DeleteRepoTag(name, reference)
|
||||
if err != nil {
|
||||
rh.c.Log.Info().Msg("repodb: restoring image store")
|
||||
|
||||
// restore image store
|
||||
_, err = imgStore.PutImageManifest(name, reference, mediaType, manifestBlob)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("repodb: error while restoring image store, database is not consistent")
|
||||
}
|
||||
|
||||
manageRepoMetaSuccessfully = false
|
||||
}
|
||||
}
|
||||
|
||||
if !manageRepoMetaSuccessfully {
|
||||
rh.c.Log.Info().Msgf("repodb: deleting image meta was unsuccessful for tag %s in repo %s", reference, name)
|
||||
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
response.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ package cli //nolint:testpackage
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -14,6 +15,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/resty.v1"
|
||||
|
@ -22,6 +24,9 @@ import (
|
|||
"zotregistry.io/zot/pkg/api/config"
|
||||
"zotregistry.io/zot/pkg/api/constants"
|
||||
extconf "zotregistry.io/zot/pkg/extensions/config"
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
)
|
||||
|
||||
|
@ -786,6 +791,8 @@ func TestServerCVEResponse(t *testing.T) {
|
|||
}(ctlr)
|
||||
|
||||
Convey("Test CVE by image name", t, func() {
|
||||
err = triggerUploadForTestImages(port, url)
|
||||
|
||||
args := []string{"cvetest", "--image", "zot-cve-test:0.0.1"}
|
||||
configPath := makeConfigFile(fmt.Sprintf(`{"configs":[{"_name":"cvetest","url":"%s","showspinner":false}]}`, url))
|
||||
defer os.Remove(configPath)
|
||||
|
@ -930,6 +937,53 @@ func TestServerCVEResponse(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
// triggerUploadForTestImages is paired with testSetup and is supposed to trigger events when pushing an image
|
||||
// by pushing just the manifest.
|
||||
func triggerUploadForTestImages(port, baseURL string) error {
|
||||
log := log.NewLogger("debug", "")
|
||||
metrics := monitoring.NewMetricsServer(false, log)
|
||||
storage := storage.NewImageStore("../../test/data/", false, storage.DefaultGCDelay,
|
||||
false, false, log, metrics, nil)
|
||||
|
||||
repos, err := storage.GetRepositories()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, repo := range repos {
|
||||
indexBlob, err := storage.GetIndexContent(repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var indexJSON ispec.Index
|
||||
|
||||
err = json.Unmarshal(indexBlob, &indexJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, manifest := range indexJSON.Manifests {
|
||||
tag := manifest.Annotations[ispec.AnnotationRefName]
|
||||
|
||||
manifestBlob, _, _, err := storage.GetImageManifest(repo, tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = resty.R().
|
||||
SetHeader("Content-type", "application/vnd.oci.image.manifest.v1+json").
|
||||
SetBody(manifestBlob).
|
||||
Put(baseURL + "/v2/" + repo + "/manifests/" + tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func MockNewCveCommand(searchService SearchService) *cobra.Command {
|
||||
searchCveParams := make(map[string]*string)
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"zotregistry.io/zot/pkg/extensions/search/gql_generated"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/repodb"
|
||||
)
|
||||
|
||||
func EnableSearchExtension(config *config.Config, log log.Logger, rootDir string) {
|
||||
|
@ -56,7 +57,7 @@ func downloadTrivyDB(dbDir string, log log.Logger, updateInterval time.Duration)
|
|||
}
|
||||
|
||||
func SetupSearchRoutes(config *config.Config, router *mux.Router, storeController storage.StoreController,
|
||||
authFunc mux.MiddlewareFunc, l log.Logger,
|
||||
authFunc mux.MiddlewareFunc, searchDB repodb.RepoDB, l log.Logger,
|
||||
) {
|
||||
// fork a new zerolog child to avoid data race
|
||||
log := log.Logger{Logger: l.With().Caller().Timestamp().Logger()}
|
||||
|
@ -66,9 +67,9 @@ func SetupSearchRoutes(config *config.Config, router *mux.Router, storeControlle
|
|||
var resConfig gql_generated.Config
|
||||
|
||||
if config.Extensions.Search.CVE != nil {
|
||||
resConfig = search.GetResolverConfig(log, storeController, true)
|
||||
resConfig = search.GetResolverConfig(log, storeController, searchDB, true)
|
||||
} else {
|
||||
resConfig = search.GetResolverConfig(log, storeController, false)
|
||||
resConfig = search.GetResolverConfig(log, storeController, searchDB, false)
|
||||
}
|
||||
|
||||
extRouter := router.PathPrefix(constants.ExtSearchPrefix).Subrouter()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"zotregistry.io/zot/pkg/api/config"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/repodb"
|
||||
)
|
||||
|
||||
// EnableSearchExtension ...
|
||||
|
@ -18,8 +19,8 @@ func EnableSearchExtension(config *config.Config, log log.Logger, rootDir string
|
|||
}
|
||||
|
||||
// SetupSearchRoutes ...
|
||||
func SetupSearchRoutes(conf *config.Config, router *mux.Router,
|
||||
storeController storage.StoreController, authFunc mux.MiddlewareFunc, log log.Logger,
|
||||
func SetupSearchRoutes(config *config.Config, router *mux.Router, storeController storage.StoreController,
|
||||
repoDB repodb.RepoDB, authFunc mux.MiddlewareFunc, log log.Logger,
|
||||
) {
|
||||
log.Warn().Msg("skipping setting up search routes because given zot binary doesn't include this feature," +
|
||||
"please build a binary that does so")
|
||||
|
|
|
@ -15,7 +15,11 @@ const (
|
|||
LabelAnnotationCreated = "org.label-schema.build-date"
|
||||
LabelAnnotationVendor = "org.label-schema.vendor"
|
||||
LabelAnnotationDescription = "org.label-schema.description"
|
||||
// Q I don't see this in the compatibility table.
|
||||
LabelAnnotationLicenses = "org.label-schema.license"
|
||||
LabelAnnotationTitle = "org.label-schema.name"
|
||||
LabelAnnotationDocumentation = "org.label-schema.usage"
|
||||
LabelAnnotationSource = "org.label-schema.vcs-url"
|
||||
)
|
||||
|
||||
type TagInfo struct {
|
||||
|
@ -103,40 +107,53 @@ func GetRoutePrefix(name string) string {
|
|||
return fmt.Sprintf("/%s", names[0])
|
||||
}
|
||||
|
||||
func GetDescription(labels map[string]string) string {
|
||||
desc, ok := labels[ispec.AnnotationDescription]
|
||||
if !ok {
|
||||
desc, ok = labels[LabelAnnotationDescription]
|
||||
if !ok {
|
||||
desc = ""
|
||||
}
|
||||
}
|
||||
|
||||
return desc
|
||||
type ImageAnnotations struct {
|
||||
Description string
|
||||
Licenses string
|
||||
Title string
|
||||
Documentation string
|
||||
Source string
|
||||
Labels string
|
||||
Vendor string
|
||||
}
|
||||
|
||||
func GetLicense(labels map[string]string) string {
|
||||
license, ok := labels[ispec.AnnotationLicenses]
|
||||
/* OCI annotation/label with backwards compatibility
|
||||
arg can be either lables or annotations
|
||||
https://github.com/opencontainers/image-spec/blob/main/annotations.md.*/
|
||||
func GetAnnotationValue(annotations map[string]string, annotationKey, labelKey string) string {
|
||||
value, ok := annotations[annotationKey]
|
||||
if !ok || value == "" {
|
||||
value, ok = annotations[labelKey]
|
||||
if !ok {
|
||||
license, ok = labels[LabelAnnotationLicenses]
|
||||
if !ok {
|
||||
license = ""
|
||||
value = ""
|
||||
}
|
||||
}
|
||||
|
||||
return license
|
||||
return value
|
||||
}
|
||||
|
||||
func GetVendor(labels map[string]string) string {
|
||||
vendor, ok := labels[ispec.AnnotationVendor]
|
||||
if !ok {
|
||||
vendor, ok = labels[LabelAnnotationVendor]
|
||||
if !ok {
|
||||
vendor = ""
|
||||
}
|
||||
}
|
||||
func GetDescription(annotations map[string]string) string {
|
||||
return GetAnnotationValue(annotations, ispec.AnnotationDescription, LabelAnnotationDescription)
|
||||
}
|
||||
|
||||
return vendor
|
||||
func GetLicenses(annotations map[string]string) string {
|
||||
return GetAnnotationValue(annotations, ispec.AnnotationLicenses, LabelAnnotationLicenses)
|
||||
}
|
||||
|
||||
func GetVendor(annotations map[string]string) string {
|
||||
return GetAnnotationValue(annotations, ispec.AnnotationVendor, LabelAnnotationVendor)
|
||||
}
|
||||
|
||||
func GetTitle(annotations map[string]string) string {
|
||||
return GetAnnotationValue(annotations, ispec.AnnotationTitle, LabelAnnotationTitle)
|
||||
}
|
||||
|
||||
func GetDocumentation(annotations map[string]string) string {
|
||||
return GetAnnotationValue(annotations, ispec.AnnotationDocumentation, LabelAnnotationDocumentation)
|
||||
}
|
||||
|
||||
func GetSource(annotations map[string]string) string {
|
||||
return GetAnnotationValue(annotations, ispec.AnnotationSource, LabelAnnotationSource)
|
||||
}
|
||||
|
||||
func GetCategories(labels map[string]string) string {
|
||||
|
@ -144,3 +161,50 @@ func GetCategories(labels map[string]string) string {
|
|||
|
||||
return categories
|
||||
}
|
||||
|
||||
func GetAnnotations(annotations, labels map[string]string) ImageAnnotations {
|
||||
description := GetDescription(annotations)
|
||||
if description == "" {
|
||||
description = GetDescription(labels)
|
||||
}
|
||||
|
||||
title := GetTitle(annotations)
|
||||
if title == "" {
|
||||
title = GetTitle(labels)
|
||||
}
|
||||
|
||||
documentation := GetDocumentation(annotations)
|
||||
if documentation == "" {
|
||||
documentation = GetDocumentation(annotations)
|
||||
}
|
||||
|
||||
source := GetSource(annotations)
|
||||
if source == "" {
|
||||
source = GetSource(labels)
|
||||
}
|
||||
|
||||
licenses := GetLicenses(annotations)
|
||||
if licenses == "" {
|
||||
licenses = GetLicenses(labels)
|
||||
}
|
||||
|
||||
categories := GetCategories(annotations)
|
||||
if categories == "" {
|
||||
categories = GetCategories(labels)
|
||||
}
|
||||
|
||||
vendor := GetVendor(annotations)
|
||||
if vendor == "" {
|
||||
vendor = GetVendor(labels)
|
||||
}
|
||||
|
||||
return ImageAnnotations{
|
||||
Description: description,
|
||||
Title: title,
|
||||
Documentation: documentation,
|
||||
Source: source,
|
||||
Licenses: licenses,
|
||||
Labels: categories,
|
||||
Vendor: vendor,
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -356,7 +356,7 @@ func (olu BaseOciLayoutUtils) GetImageConfigInfo(repo string, manifestDigest god
|
|||
}
|
||||
|
||||
func (olu BaseOciLayoutUtils) GetImageVendor(imageConfig ispec.Image) string {
|
||||
return imageConfig.Config.Labels["vendor"]
|
||||
return imageConfig.Config.Labels[ispec.AnnotationVendor]
|
||||
}
|
||||
|
||||
func (olu BaseOciLayoutUtils) GetImageManifestSize(repo string, manifestDigest godigest.Digest) int64 {
|
||||
|
|
|
@ -59,6 +59,7 @@ type ComplexityRoot struct {
|
|||
GlobalSearchResult struct {
|
||||
Images func(childComplexity int) int
|
||||
Layers func(childComplexity int) int
|
||||
Page func(childComplexity int) int
|
||||
Repos func(childComplexity int) int
|
||||
}
|
||||
|
||||
|
@ -66,6 +67,7 @@ type ComplexityRoot struct {
|
|||
ConfigDigest func(childComplexity int) int
|
||||
Description func(childComplexity int) int
|
||||
Digest func(childComplexity int) int
|
||||
Documentation func(childComplexity int) int
|
||||
DownloadCount func(childComplexity int) int
|
||||
IsSigned func(childComplexity int) int
|
||||
Labels func(childComplexity int) int
|
||||
|
@ -76,7 +78,9 @@ type ComplexityRoot struct {
|
|||
RepoName func(childComplexity int) int
|
||||
Score func(childComplexity int) int
|
||||
Size func(childComplexity int) int
|
||||
Source func(childComplexity int) int
|
||||
Tag func(childComplexity int) int
|
||||
Title func(childComplexity int) int
|
||||
Vendor func(childComplexity int) int
|
||||
}
|
||||
|
||||
|
@ -97,11 +101,18 @@ type ComplexityRoot struct {
|
|||
Name func(childComplexity int) int
|
||||
}
|
||||
|
||||
PageInfo struct {
|
||||
NextPage func(childComplexity int) int
|
||||
ObjectCount func(childComplexity int) int
|
||||
Pages func(childComplexity int) int
|
||||
PreviousPage func(childComplexity int) int
|
||||
}
|
||||
|
||||
Query struct {
|
||||
CVEListForImage func(childComplexity int, image string) int
|
||||
DerivedImageList func(childComplexity int, image string) int
|
||||
ExpandedRepoInfo func(childComplexity int, repo string) int
|
||||
GlobalSearch func(childComplexity int, query string) int
|
||||
GlobalSearch func(childComplexity int, query string, requestedPage *PageInput) int
|
||||
ImageList func(childComplexity int, repo string) int
|
||||
ImageListForCve func(childComplexity int, id string) int
|
||||
ImageListForDigest func(childComplexity int, id string) int
|
||||
|
@ -117,6 +128,7 @@ type ComplexityRoot struct {
|
|||
RepoSummary struct {
|
||||
DownloadCount func(childComplexity int) int
|
||||
IsBookmarked func(childComplexity int) int
|
||||
IsStarred func(childComplexity int) int
|
||||
LastUpdated func(childComplexity int) int
|
||||
Name func(childComplexity int) int
|
||||
NewestImage func(childComplexity int) int
|
||||
|
@ -136,7 +148,7 @@ type QueryResolver interface {
|
|||
RepoListWithNewestImage(ctx context.Context) ([]*RepoSummary, error)
|
||||
ImageList(ctx context.Context, repo string) ([]*ImageSummary, error)
|
||||
ExpandedRepoInfo(ctx context.Context, repo string) (*RepoInfo, error)
|
||||
GlobalSearch(ctx context.Context, query string) (*GlobalSearchResult, error)
|
||||
GlobalSearch(ctx context.Context, query string, requestedPage *PageInput) (*GlobalSearchResult, error)
|
||||
DerivedImageList(ctx context.Context, image string) ([]*ImageSummary, error)
|
||||
}
|
||||
|
||||
|
@ -218,6 +230,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
|
||||
return e.complexity.GlobalSearchResult.Layers(childComplexity), true
|
||||
|
||||
case "GlobalSearchResult.Page":
|
||||
if e.complexity.GlobalSearchResult.Page == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.GlobalSearchResult.Page(childComplexity), true
|
||||
|
||||
case "GlobalSearchResult.Repos":
|
||||
if e.complexity.GlobalSearchResult.Repos == nil {
|
||||
break
|
||||
|
@ -246,6 +265,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
|
||||
return e.complexity.ImageSummary.Digest(childComplexity), true
|
||||
|
||||
case "ImageSummary.Documentation":
|
||||
if e.complexity.ImageSummary.Documentation == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.ImageSummary.Documentation(childComplexity), true
|
||||
|
||||
case "ImageSummary.DownloadCount":
|
||||
if e.complexity.ImageSummary.DownloadCount == nil {
|
||||
break
|
||||
|
@ -316,6 +342,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
|
||||
return e.complexity.ImageSummary.Size(childComplexity), true
|
||||
|
||||
case "ImageSummary.Source":
|
||||
if e.complexity.ImageSummary.Source == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.ImageSummary.Source(childComplexity), true
|
||||
|
||||
case "ImageSummary.Tag":
|
||||
if e.complexity.ImageSummary.Tag == nil {
|
||||
break
|
||||
|
@ -323,6 +356,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
|
||||
return e.complexity.ImageSummary.Tag(childComplexity), true
|
||||
|
||||
case "ImageSummary.Title":
|
||||
if e.complexity.ImageSummary.Title == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.ImageSummary.Title(childComplexity), true
|
||||
|
||||
case "ImageSummary.Vendor":
|
||||
if e.complexity.ImageSummary.Vendor == nil {
|
||||
break
|
||||
|
@ -386,6 +426,34 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
|
||||
return e.complexity.PackageInfo.Name(childComplexity), true
|
||||
|
||||
case "PageInfo.NextPage":
|
||||
if e.complexity.PageInfo.NextPage == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.PageInfo.NextPage(childComplexity), true
|
||||
|
||||
case "PageInfo.ObjectCount":
|
||||
if e.complexity.PageInfo.ObjectCount == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.PageInfo.ObjectCount(childComplexity), true
|
||||
|
||||
case "PageInfo.Pages":
|
||||
if e.complexity.PageInfo.Pages == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.PageInfo.Pages(childComplexity), true
|
||||
|
||||
case "PageInfo.PreviousPage":
|
||||
if e.complexity.PageInfo.PreviousPage == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.PageInfo.PreviousPage(childComplexity), true
|
||||
|
||||
case "Query.CVEListForImage":
|
||||
if e.complexity.Query.CVEListForImage == nil {
|
||||
break
|
||||
|
@ -432,7 +500,7 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
return 0, false
|
||||
}
|
||||
|
||||
return e.complexity.Query.GlobalSearch(childComplexity, args["query"].(string)), true
|
||||
return e.complexity.Query.GlobalSearch(childComplexity, args["query"].(string), args["requestedPage"].(*PageInput)), true
|
||||
|
||||
case "Query.ImageList":
|
||||
if e.complexity.Query.ImageList == nil {
|
||||
|
@ -517,6 +585,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
|
||||
return e.complexity.RepoSummary.IsBookmarked(childComplexity), true
|
||||
|
||||
case "RepoSummary.IsStarred":
|
||||
if e.complexity.RepoSummary.IsStarred == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.RepoSummary.IsStarred(childComplexity), true
|
||||
|
||||
case "RepoSummary.LastUpdated":
|
||||
if e.complexity.RepoSummary.LastUpdated == nil {
|
||||
break
|
||||
|
@ -580,7 +655,9 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
|
||||
rc := graphql.GetOperationContext(ctx)
|
||||
ec := executionContext{rc, e}
|
||||
inputUnmarshalMap := graphql.BuildUnmarshalerMap()
|
||||
inputUnmarshalMap := graphql.BuildUnmarshalerMap(
|
||||
ec.unmarshalInputPageInput,
|
||||
)
|
||||
first := true
|
||||
|
||||
switch rc.Operation.Operation {
|
||||
|
@ -654,6 +731,7 @@ type RepoInfo {
|
|||
# Search results in all repos/images/layers
|
||||
# There will be other more structures for more detailed information
|
||||
type GlobalSearchResult {
|
||||
Page: PageInfo
|
||||
Images: [ImageSummary]
|
||||
Repos: [RepoSummary]
|
||||
Layers: [LayerSummary]
|
||||
|
@ -675,8 +753,11 @@ type ImageSummary {
|
|||
DownloadCount: Int
|
||||
Layers: [LayerSummary]
|
||||
Description: String
|
||||
Licenses: String
|
||||
Licenses: String # The value of the annotation if present, 'unknown' otherwise).
|
||||
Labels: String
|
||||
Title: String
|
||||
Source: String
|
||||
Documentation: String
|
||||
}
|
||||
|
||||
# Brief on a specific repo to be used in queries returning a list of repos
|
||||
|
@ -687,10 +768,11 @@ type RepoSummary {
|
|||
Platforms: [OsArch]
|
||||
Vendors: [String]
|
||||
Score: Int
|
||||
NewestImage: ImageSummary
|
||||
NewestImage: ImageSummary # Newest based on created timestamp
|
||||
DownloadCount: Int
|
||||
StarCount: Int
|
||||
IsBookmarked: Boolean
|
||||
IsStarred: Boolean
|
||||
}
|
||||
|
||||
# Currently the same as LayerInfo, we can refactor later
|
||||
|
@ -706,6 +788,29 @@ type OsArch {
|
|||
Arch: String
|
||||
}
|
||||
|
||||
enum SortCriteria {
|
||||
RELEVANCE
|
||||
UPDATE_TIME
|
||||
ALPHABETIC_ASC
|
||||
ALPHABETIC_DSC
|
||||
STARS
|
||||
DOWNLOADS
|
||||
}
|
||||
|
||||
type PageInfo {
|
||||
ObjectCount: Int!
|
||||
PreviousPage: Int
|
||||
NextPage: Int
|
||||
Pages: Int
|
||||
}
|
||||
|
||||
# Pagination parameters
|
||||
input PageInput {
|
||||
limit: Int
|
||||
offset: Int
|
||||
sortBy: SortCriteria
|
||||
}
|
||||
|
||||
type Query {
|
||||
CVEListForImage(image: String!): CVEResultForImage!
|
||||
ImageListForCVE(id: String!): [ImageSummary!]
|
||||
|
@ -714,7 +819,7 @@ type Query {
|
|||
RepoListWithNewestImage: [RepoSummary!]! # Newest based on created timestamp
|
||||
ImageList(repo: String!): [ImageSummary!]
|
||||
ExpandedRepoInfo(repo: String!): RepoInfo!
|
||||
GlobalSearch(query: String!): GlobalSearchResult!
|
||||
GlobalSearch(query: String!, requestedPage: PageInput): GlobalSearchResult! # Return all images/repos/layers which match the query
|
||||
DerivedImageList(image: String!): [ImageSummary!]
|
||||
}
|
||||
`, BuiltIn: false},
|
||||
|
@ -782,6 +887,15 @@ func (ec *executionContext) field_Query_GlobalSearch_args(ctx context.Context, r
|
|||
}
|
||||
}
|
||||
args["query"] = arg0
|
||||
var arg1 *PageInput
|
||||
if tmp, ok := rawArgs["requestedPage"]; ok {
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("requestedPage"))
|
||||
arg1, err = ec.unmarshalOPageInput2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐPageInput(ctx, tmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
args["requestedPage"] = arg1
|
||||
return args, nil
|
||||
}
|
||||
|
||||
|
@ -1214,6 +1328,57 @@ func (ec *executionContext) fieldContext_CVEResultForImage_CVEList(ctx context.C
|
|||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _GlobalSearchResult_Page(ctx context.Context, field graphql.CollectedField, obj *GlobalSearchResult) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_GlobalSearchResult_Page(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.Page, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(*PageInfo)
|
||||
fc.Result = res
|
||||
return ec.marshalOPageInfo2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐPageInfo(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_GlobalSearchResult_Page(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "GlobalSearchResult",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
switch field.Name {
|
||||
case "ObjectCount":
|
||||
return ec.fieldContext_PageInfo_ObjectCount(ctx, field)
|
||||
case "PreviousPage":
|
||||
return ec.fieldContext_PageInfo_PreviousPage(ctx, field)
|
||||
case "NextPage":
|
||||
return ec.fieldContext_PageInfo_NextPage(ctx, field)
|
||||
case "Pages":
|
||||
return ec.fieldContext_PageInfo_Pages(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type PageInfo", field.Name)
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _GlobalSearchResult_Images(ctx context.Context, field graphql.CollectedField, obj *GlobalSearchResult) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_GlobalSearchResult_Images(ctx, field)
|
||||
if err != nil {
|
||||
|
@ -1280,6 +1445,12 @@ func (ec *executionContext) fieldContext_GlobalSearchResult_Images(ctx context.C
|
|||
return ec.fieldContext_ImageSummary_Licenses(ctx, field)
|
||||
case "Labels":
|
||||
return ec.fieldContext_ImageSummary_Labels(ctx, field)
|
||||
case "Title":
|
||||
return ec.fieldContext_ImageSummary_Title(ctx, field)
|
||||
case "Source":
|
||||
return ec.fieldContext_ImageSummary_Source(ctx, field)
|
||||
case "Documentation":
|
||||
return ec.fieldContext_ImageSummary_Documentation(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type ImageSummary", field.Name)
|
||||
},
|
||||
|
@ -1343,6 +1514,8 @@ func (ec *executionContext) fieldContext_GlobalSearchResult_Repos(ctx context.Co
|
|||
return ec.fieldContext_RepoSummary_StarCount(ctx, field)
|
||||
case "IsBookmarked":
|
||||
return ec.fieldContext_RepoSummary_IsBookmarked(ctx, field)
|
||||
case "IsStarred":
|
||||
return ec.fieldContext_RepoSummary_IsStarred(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type RepoSummary", field.Name)
|
||||
},
|
||||
|
@ -2028,6 +2201,129 @@ func (ec *executionContext) fieldContext_ImageSummary_Labels(ctx context.Context
|
|||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _ImageSummary_Title(ctx context.Context, field graphql.CollectedField, obj *ImageSummary) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_ImageSummary_Title(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.Title, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(*string)
|
||||
fc.Result = res
|
||||
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_ImageSummary_Title(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "ImageSummary",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type String does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _ImageSummary_Source(ctx context.Context, field graphql.CollectedField, obj *ImageSummary) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_ImageSummary_Source(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.Source, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(*string)
|
||||
fc.Result = res
|
||||
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_ImageSummary_Source(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "ImageSummary",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type String does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _ImageSummary_Documentation(ctx context.Context, field graphql.CollectedField, obj *ImageSummary) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_ImageSummary_Documentation(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.Documentation, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(*string)
|
||||
fc.Result = res
|
||||
return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_ImageSummary_Documentation(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "ImageSummary",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type String does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _LayerSummary_Size(ctx context.Context, field graphql.CollectedField, obj *LayerSummary) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_LayerSummary_Size(ctx, field)
|
||||
if err != nil {
|
||||
|
@ -2356,6 +2652,173 @@ func (ec *executionContext) fieldContext_PackageInfo_FixedVersion(ctx context.Co
|
|||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _PageInfo_ObjectCount(ctx context.Context, field graphql.CollectedField, obj *PageInfo) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_PageInfo_ObjectCount(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.ObjectCount, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
if !graphql.HasFieldError(ctx, fc) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(int)
|
||||
fc.Result = res
|
||||
return ec.marshalNInt2int(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_PageInfo_ObjectCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "PageInfo",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type Int does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _PageInfo_PreviousPage(ctx context.Context, field graphql.CollectedField, obj *PageInfo) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_PageInfo_PreviousPage(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.PreviousPage, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(*int)
|
||||
fc.Result = res
|
||||
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_PageInfo_PreviousPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "PageInfo",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type Int does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _PageInfo_NextPage(ctx context.Context, field graphql.CollectedField, obj *PageInfo) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_PageInfo_NextPage(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.NextPage, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(*int)
|
||||
fc.Result = res
|
||||
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_PageInfo_NextPage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "PageInfo",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type Int does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _PageInfo_Pages(ctx context.Context, field graphql.CollectedField, obj *PageInfo) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_PageInfo_Pages(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.Pages, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(*int)
|
||||
fc.Result = res
|
||||
return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_PageInfo_Pages(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "PageInfo",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type Int does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _Query_CVEListForImage(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_Query_CVEListForImage(ctx, field)
|
||||
if err != nil {
|
||||
|
@ -2483,6 +2946,12 @@ func (ec *executionContext) fieldContext_Query_ImageListForCVE(ctx context.Conte
|
|||
return ec.fieldContext_ImageSummary_Licenses(ctx, field)
|
||||
case "Labels":
|
||||
return ec.fieldContext_ImageSummary_Labels(ctx, field)
|
||||
case "Title":
|
||||
return ec.fieldContext_ImageSummary_Title(ctx, field)
|
||||
case "Source":
|
||||
return ec.fieldContext_ImageSummary_Source(ctx, field)
|
||||
case "Documentation":
|
||||
return ec.fieldContext_ImageSummary_Documentation(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type ImageSummary", field.Name)
|
||||
},
|
||||
|
@ -2567,6 +3036,12 @@ func (ec *executionContext) fieldContext_Query_ImageListWithCVEFixed(ctx context
|
|||
return ec.fieldContext_ImageSummary_Licenses(ctx, field)
|
||||
case "Labels":
|
||||
return ec.fieldContext_ImageSummary_Labels(ctx, field)
|
||||
case "Title":
|
||||
return ec.fieldContext_ImageSummary_Title(ctx, field)
|
||||
case "Source":
|
||||
return ec.fieldContext_ImageSummary_Source(ctx, field)
|
||||
case "Documentation":
|
||||
return ec.fieldContext_ImageSummary_Documentation(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type ImageSummary", field.Name)
|
||||
},
|
||||
|
@ -2651,6 +3126,12 @@ func (ec *executionContext) fieldContext_Query_ImageListForDigest(ctx context.Co
|
|||
return ec.fieldContext_ImageSummary_Licenses(ctx, field)
|
||||
case "Labels":
|
||||
return ec.fieldContext_ImageSummary_Labels(ctx, field)
|
||||
case "Title":
|
||||
return ec.fieldContext_ImageSummary_Title(ctx, field)
|
||||
case "Source":
|
||||
return ec.fieldContext_ImageSummary_Source(ctx, field)
|
||||
case "Documentation":
|
||||
return ec.fieldContext_ImageSummary_Documentation(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type ImageSummary", field.Name)
|
||||
},
|
||||
|
@ -2728,6 +3209,8 @@ func (ec *executionContext) fieldContext_Query_RepoListWithNewestImage(ctx conte
|
|||
return ec.fieldContext_RepoSummary_StarCount(ctx, field)
|
||||
case "IsBookmarked":
|
||||
return ec.fieldContext_RepoSummary_IsBookmarked(ctx, field)
|
||||
case "IsStarred":
|
||||
return ec.fieldContext_RepoSummary_IsStarred(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type RepoSummary", field.Name)
|
||||
},
|
||||
|
@ -2801,6 +3284,12 @@ func (ec *executionContext) fieldContext_Query_ImageList(ctx context.Context, fi
|
|||
return ec.fieldContext_ImageSummary_Licenses(ctx, field)
|
||||
case "Labels":
|
||||
return ec.fieldContext_ImageSummary_Labels(ctx, field)
|
||||
case "Title":
|
||||
return ec.fieldContext_ImageSummary_Title(ctx, field)
|
||||
case "Source":
|
||||
return ec.fieldContext_ImageSummary_Source(ctx, field)
|
||||
case "Documentation":
|
||||
return ec.fieldContext_ImageSummary_Documentation(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type ImageSummary", field.Name)
|
||||
},
|
||||
|
@ -2894,7 +3383,7 @@ func (ec *executionContext) _Query_GlobalSearch(ctx context.Context, field graph
|
|||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return ec.resolvers.Query().GlobalSearch(rctx, fc.Args["query"].(string))
|
||||
return ec.resolvers.Query().GlobalSearch(rctx, fc.Args["query"].(string), fc.Args["requestedPage"].(*PageInput))
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
|
@ -2919,6 +3408,8 @@ func (ec *executionContext) fieldContext_Query_GlobalSearch(ctx context.Context,
|
|||
IsResolver: true,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
switch field.Name {
|
||||
case "Page":
|
||||
return ec.fieldContext_GlobalSearchResult_Page(ctx, field)
|
||||
case "Images":
|
||||
return ec.fieldContext_GlobalSearchResult_Images(ctx, field)
|
||||
case "Repos":
|
||||
|
@ -3009,6 +3500,12 @@ func (ec *executionContext) fieldContext_Query_DerivedImageList(ctx context.Cont
|
|||
return ec.fieldContext_ImageSummary_Licenses(ctx, field)
|
||||
case "Labels":
|
||||
return ec.fieldContext_ImageSummary_Labels(ctx, field)
|
||||
case "Title":
|
||||
return ec.fieldContext_ImageSummary_Title(ctx, field)
|
||||
case "Source":
|
||||
return ec.fieldContext_ImageSummary_Source(ctx, field)
|
||||
case "Documentation":
|
||||
return ec.fieldContext_ImageSummary_Documentation(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type ImageSummary", field.Name)
|
||||
},
|
||||
|
@ -3222,6 +3719,12 @@ func (ec *executionContext) fieldContext_RepoInfo_Images(ctx context.Context, fi
|
|||
return ec.fieldContext_ImageSummary_Licenses(ctx, field)
|
||||
case "Labels":
|
||||
return ec.fieldContext_ImageSummary_Labels(ctx, field)
|
||||
case "Title":
|
||||
return ec.fieldContext_ImageSummary_Title(ctx, field)
|
||||
case "Source":
|
||||
return ec.fieldContext_ImageSummary_Source(ctx, field)
|
||||
case "Documentation":
|
||||
return ec.fieldContext_ImageSummary_Documentation(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type ImageSummary", field.Name)
|
||||
},
|
||||
|
@ -3285,6 +3788,8 @@ func (ec *executionContext) fieldContext_RepoInfo_Summary(ctx context.Context, f
|
|||
return ec.fieldContext_RepoSummary_StarCount(ctx, field)
|
||||
case "IsBookmarked":
|
||||
return ec.fieldContext_RepoSummary_IsBookmarked(ctx, field)
|
||||
case "IsStarred":
|
||||
return ec.fieldContext_RepoSummary_IsStarred(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type RepoSummary", field.Name)
|
||||
},
|
||||
|
@ -3610,6 +4115,12 @@ func (ec *executionContext) fieldContext_RepoSummary_NewestImage(ctx context.Con
|
|||
return ec.fieldContext_ImageSummary_Licenses(ctx, field)
|
||||
case "Labels":
|
||||
return ec.fieldContext_ImageSummary_Labels(ctx, field)
|
||||
case "Title":
|
||||
return ec.fieldContext_ImageSummary_Title(ctx, field)
|
||||
case "Source":
|
||||
return ec.fieldContext_ImageSummary_Source(ctx, field)
|
||||
case "Documentation":
|
||||
return ec.fieldContext_ImageSummary_Documentation(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type ImageSummary", field.Name)
|
||||
},
|
||||
|
@ -3740,6 +4251,47 @@ func (ec *executionContext) fieldContext_RepoSummary_IsBookmarked(ctx context.Co
|
|||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _RepoSummary_IsStarred(ctx context.Context, field graphql.CollectedField, obj *RepoSummary) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_RepoSummary_IsStarred(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.IsStarred, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(*bool)
|
||||
fc.Result = res
|
||||
return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_RepoSummary_IsStarred(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "RepoSummary",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type Boolean does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext___Directive_name(ctx, field)
|
||||
if err != nil {
|
||||
|
@ -5513,6 +6065,50 @@ func (ec *executionContext) fieldContext___Type_specifiedByURL(ctx context.Conte
|
|||
|
||||
// region **************************** input.gotpl *****************************
|
||||
|
||||
func (ec *executionContext) unmarshalInputPageInput(ctx context.Context, obj interface{}) (PageInput, error) {
|
||||
var it PageInput
|
||||
asMap := map[string]interface{}{}
|
||||
for k, v := range obj.(map[string]interface{}) {
|
||||
asMap[k] = v
|
||||
}
|
||||
|
||||
fieldsInOrder := [...]string{"limit", "offset", "sortBy"}
|
||||
for _, k := range fieldsInOrder {
|
||||
v, ok := asMap[k]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
switch k {
|
||||
case "limit":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("limit"))
|
||||
it.Limit, err = ec.unmarshalOInt2ᚖint(ctx, v)
|
||||
if err != nil {
|
||||
return it, err
|
||||
}
|
||||
case "offset":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("offset"))
|
||||
it.Offset, err = ec.unmarshalOInt2ᚖint(ctx, v)
|
||||
if err != nil {
|
||||
return it, err
|
||||
}
|
||||
case "sortBy":
|
||||
var err error
|
||||
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("sortBy"))
|
||||
it.SortBy, err = ec.unmarshalOSortCriteria2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐSortCriteria(ctx, v)
|
||||
if err != nil {
|
||||
return it, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return it, nil
|
||||
}
|
||||
|
||||
// endregion **************************** input.gotpl *****************************
|
||||
|
||||
// region ************************** interface.gotpl ***************************
|
||||
|
@ -5601,6 +6197,10 @@ func (ec *executionContext) _GlobalSearchResult(ctx context.Context, sel ast.Sel
|
|||
switch field.Name {
|
||||
case "__typename":
|
||||
out.Values[i] = graphql.MarshalString("GlobalSearchResult")
|
||||
case "Page":
|
||||
|
||||
out.Values[i] = ec._GlobalSearchResult_Page(ctx, field, obj)
|
||||
|
||||
case "Images":
|
||||
|
||||
out.Values[i] = ec._GlobalSearchResult_Images(ctx, field, obj)
|
||||
|
@ -5694,6 +6294,18 @@ func (ec *executionContext) _ImageSummary(ctx context.Context, sel ast.Selection
|
|||
|
||||
out.Values[i] = ec._ImageSummary_Labels(ctx, field, obj)
|
||||
|
||||
case "Title":
|
||||
|
||||
out.Values[i] = ec._ImageSummary_Title(ctx, field, obj)
|
||||
|
||||
case "Source":
|
||||
|
||||
out.Values[i] = ec._ImageSummary_Source(ctx, field, obj)
|
||||
|
||||
case "Documentation":
|
||||
|
||||
out.Values[i] = ec._ImageSummary_Documentation(ctx, field, obj)
|
||||
|
||||
default:
|
||||
panic("unknown field " + strconv.Quote(field.Name))
|
||||
}
|
||||
|
@ -5800,6 +6412,46 @@ func (ec *executionContext) _PackageInfo(ctx context.Context, sel ast.SelectionS
|
|||
return out
|
||||
}
|
||||
|
||||
var pageInfoImplementors = []string{"PageInfo"}
|
||||
|
||||
func (ec *executionContext) _PageInfo(ctx context.Context, sel ast.SelectionSet, obj *PageInfo) graphql.Marshaler {
|
||||
fields := graphql.CollectFields(ec.OperationContext, sel, pageInfoImplementors)
|
||||
out := graphql.NewFieldSet(fields)
|
||||
var invalids uint32
|
||||
for i, field := range fields {
|
||||
switch field.Name {
|
||||
case "__typename":
|
||||
out.Values[i] = graphql.MarshalString("PageInfo")
|
||||
case "ObjectCount":
|
||||
|
||||
out.Values[i] = ec._PageInfo_ObjectCount(ctx, field, obj)
|
||||
|
||||
if out.Values[i] == graphql.Null {
|
||||
invalids++
|
||||
}
|
||||
case "PreviousPage":
|
||||
|
||||
out.Values[i] = ec._PageInfo_PreviousPage(ctx, field, obj)
|
||||
|
||||
case "NextPage":
|
||||
|
||||
out.Values[i] = ec._PageInfo_NextPage(ctx, field, obj)
|
||||
|
||||
case "Pages":
|
||||
|
||||
out.Values[i] = ec._PageInfo_Pages(ctx, field, obj)
|
||||
|
||||
default:
|
||||
panic("unknown field " + strconv.Quote(field.Name))
|
||||
}
|
||||
}
|
||||
out.Dispatch()
|
||||
if invalids > 0 {
|
||||
return graphql.Null
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
var queryImplementors = []string{"Query"}
|
||||
|
||||
func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler {
|
||||
|
@ -6113,6 +6765,10 @@ func (ec *executionContext) _RepoSummary(ctx context.Context, sel ast.SelectionS
|
|||
|
||||
out.Values[i] = ec._RepoSummary_IsBookmarked(ctx, field, obj)
|
||||
|
||||
case "IsStarred":
|
||||
|
||||
out.Values[i] = ec._RepoSummary_IsStarred(ctx, field, obj)
|
||||
|
||||
default:
|
||||
panic("unknown field " + strconv.Quote(field.Name))
|
||||
}
|
||||
|
@ -6495,6 +7151,21 @@ func (ec *executionContext) marshalNImageSummary2ᚖzotregistryᚗioᚋzotᚋpkg
|
|||
return ec._ImageSummary(ctx, sel, v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalNInt2int(ctx context.Context, v interface{}) (int, error) {
|
||||
res, err := graphql.UnmarshalInt(v)
|
||||
return res, graphql.ErrorOnPath(ctx, err)
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler {
|
||||
res := graphql.MarshalInt(v)
|
||||
if res == graphql.Null {
|
||||
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
|
||||
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNRepoInfo2zotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐRepoInfo(ctx context.Context, sel ast.SelectionSet, v RepoInfo) graphql.Marshaler {
|
||||
return ec._RepoInfo(ctx, sel, &v)
|
||||
}
|
||||
|
@ -7160,6 +7831,21 @@ func (ec *executionContext) marshalOPackageInfo2ᚖzotregistryᚗioᚋzotᚋpkg
|
|||
return ec._PackageInfo(ctx, sel, v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalOPageInfo2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐPageInfo(ctx context.Context, sel ast.SelectionSet, v *PageInfo) graphql.Marshaler {
|
||||
if v == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
return ec._PageInfo(ctx, sel, v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalOPageInput2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐPageInput(ctx context.Context, v interface{}) (*PageInput, error) {
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
res, err := ec.unmarshalInputPageInput(ctx, v)
|
||||
return &res, graphql.ErrorOnPath(ctx, err)
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalORepoSummary2ᚕᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐRepoSummary(ctx context.Context, sel ast.SelectionSet, v []*RepoSummary) graphql.Marshaler {
|
||||
if v == nil {
|
||||
return graphql.Null
|
||||
|
@ -7208,6 +7894,22 @@ func (ec *executionContext) marshalORepoSummary2ᚖzotregistryᚗioᚋzotᚋpkg
|
|||
return ec._RepoSummary(ctx, sel, v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalOSortCriteria2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐSortCriteria(ctx context.Context, v interface{}) (*SortCriteria, error) {
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var res = new(SortCriteria)
|
||||
err := res.UnmarshalGQL(v)
|
||||
return res, graphql.ErrorOnPath(ctx, err)
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalOSortCriteria2ᚖzotregistryᚗioᚋzotᚋpkgᚋextensionsᚋsearchᚋgql_generatedᚐSortCriteria(ctx context.Context, sel ast.SelectionSet, v *SortCriteria) graphql.Marshaler {
|
||||
if v == nil {
|
||||
return graphql.Null
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalOString2ᚕᚖstring(ctx context.Context, v interface{}) ([]*string, error) {
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
|
|
|
@ -3,6 +3,9 @@
|
|||
package gql_generated
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -20,6 +23,7 @@ type CVEResultForImage struct {
|
|||
}
|
||||
|
||||
type GlobalSearchResult struct {
|
||||
Page *PageInfo `json:"Page"`
|
||||
Images []*ImageSummary `json:"Images"`
|
||||
Repos []*RepoSummary `json:"Repos"`
|
||||
Layers []*LayerSummary `json:"Layers"`
|
||||
|
@ -41,6 +45,9 @@ type ImageSummary struct {
|
|||
Description *string `json:"Description"`
|
||||
Licenses *string `json:"Licenses"`
|
||||
Labels *string `json:"Labels"`
|
||||
Title *string `json:"Title"`
|
||||
Source *string `json:"Source"`
|
||||
Documentation *string `json:"Documentation"`
|
||||
}
|
||||
|
||||
type LayerSummary struct {
|
||||
|
@ -60,6 +67,19 @@ type PackageInfo struct {
|
|||
FixedVersion *string `json:"FixedVersion"`
|
||||
}
|
||||
|
||||
type PageInfo struct {
|
||||
ObjectCount int `json:"ObjectCount"`
|
||||
PreviousPage *int `json:"PreviousPage"`
|
||||
NextPage *int `json:"NextPage"`
|
||||
Pages *int `json:"Pages"`
|
||||
}
|
||||
|
||||
type PageInput struct {
|
||||
Limit *int `json:"limit"`
|
||||
Offset *int `json:"offset"`
|
||||
SortBy *SortCriteria `json:"sortBy"`
|
||||
}
|
||||
|
||||
type RepoInfo struct {
|
||||
Images []*ImageSummary `json:"Images"`
|
||||
Summary *RepoSummary `json:"Summary"`
|
||||
|
@ -76,4 +96,54 @@ type RepoSummary struct {
|
|||
DownloadCount *int `json:"DownloadCount"`
|
||||
StarCount *int `json:"StarCount"`
|
||||
IsBookmarked *bool `json:"IsBookmarked"`
|
||||
IsStarred *bool `json:"IsStarred"`
|
||||
}
|
||||
|
||||
type SortCriteria string
|
||||
|
||||
const (
|
||||
SortCriteriaRelevance SortCriteria = "RELEVANCE"
|
||||
SortCriteriaUpdateTime SortCriteria = "UPDATE_TIME"
|
||||
SortCriteriaAlphabeticAsc SortCriteria = "ALPHABETIC_ASC"
|
||||
SortCriteriaAlphabeticDsc SortCriteria = "ALPHABETIC_DSC"
|
||||
SortCriteriaStars SortCriteria = "STARS"
|
||||
SortCriteriaDownloads SortCriteria = "DOWNLOADS"
|
||||
)
|
||||
|
||||
var AllSortCriteria = []SortCriteria{
|
||||
SortCriteriaRelevance,
|
||||
SortCriteriaUpdateTime,
|
||||
SortCriteriaAlphabeticAsc,
|
||||
SortCriteriaAlphabeticDsc,
|
||||
SortCriteriaStars,
|
||||
SortCriteriaDownloads,
|
||||
}
|
||||
|
||||
func (e SortCriteria) IsValid() bool {
|
||||
switch e {
|
||||
case SortCriteriaRelevance, SortCriteriaUpdateTime, SortCriteriaAlphabeticAsc, SortCriteriaAlphabeticDsc, SortCriteriaStars, SortCriteriaDownloads:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e SortCriteria) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e *SortCriteria) UnmarshalGQL(v interface{}) error {
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("enums must be strings")
|
||||
}
|
||||
|
||||
*e = SortCriteria(str)
|
||||
if !e.IsValid() {
|
||||
return fmt.Errorf("%s is not a valid SortCriteria", str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e SortCriteria) MarshalGQL(w io.Writer) {
|
||||
fmt.Fprint(w, strconv.Quote(e.String()))
|
||||
}
|
||||
|
|
|
@ -6,15 +6,19 @@ package search
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"sort"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
glob "github.com/bmatcuk/doublestar/v4" // nolint:gci
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1" // nolint:gci
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
"zotregistry.io/zot/pkg/storage/repodb"
|
||||
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/vektah/gqlparser/v2/gqlerror"
|
||||
"zotregistry.io/zot/pkg/extensions/search/common"
|
||||
|
@ -29,6 +33,7 @@ import (
|
|||
// Resolver ...
|
||||
type Resolver struct {
|
||||
cveInfo *cveinfo.CveInfo
|
||||
repoDB repodb.RepoDB
|
||||
storeController storage.StoreController
|
||||
digestInfo *digestinfo.DigestInfo
|
||||
log log.Logger
|
||||
|
@ -44,7 +49,9 @@ type cveDetail struct {
|
|||
var ErrBadCtxFormat = errors.New("type assertion failed")
|
||||
|
||||
// GetResolverConfig ...
|
||||
func GetResolverConfig(log log.Logger, storeController storage.StoreController, enableCVE bool) gql_generated.Config {
|
||||
func GetResolverConfig(log log.Logger, storeController storage.StoreController, repoDB repodb.RepoDB,
|
||||
enableCVE bool,
|
||||
) gql_generated.Config {
|
||||
var cveInfo *cveinfo.CveInfo
|
||||
|
||||
var err error
|
||||
|
@ -58,7 +65,13 @@ func GetResolverConfig(log log.Logger, storeController storage.StoreController,
|
|||
|
||||
digestInfo := digestinfo.NewDigestInfo(storeController, log)
|
||||
|
||||
resConfig := &Resolver{cveInfo: cveInfo, storeController: storeController, digestInfo: digestInfo, log: log}
|
||||
resConfig := &Resolver{
|
||||
cveInfo: cveInfo,
|
||||
repoDB: repoDB,
|
||||
storeController: storeController,
|
||||
digestInfo: digestInfo,
|
||||
log: log,
|
||||
}
|
||||
|
||||
return gql_generated.Config{
|
||||
Resolvers: resConfig, Directives: gql_generated.DirectiveRoot{},
|
||||
|
@ -238,170 +251,380 @@ func (r *queryResolver) repoListWithNewestImage(ctx context.Context, store stora
|
|||
}
|
||||
|
||||
func cleanQuerry(query string) string {
|
||||
query = strings.ToLower(query)
|
||||
query = strings.Replace(query, ":", " ", 1)
|
||||
query = strings.TrimSpace(query)
|
||||
|
||||
return query
|
||||
}
|
||||
|
||||
func globalSearch(repoList []string, name, tag string, olu common.OciLayoutUtils, log log.Logger) (
|
||||
[]*gql_generated.RepoSummary, []*gql_generated.ImageSummary, []*gql_generated.LayerSummary,
|
||||
func globalSearch(ctx context.Context, query string, repoDB repodb.RepoDB, requestedPage *gql_generated.PageInput,
|
||||
log log.Logger,
|
||||
) ([]*gql_generated.RepoSummary, []*gql_generated.ImageSummary, []*gql_generated.LayerSummary, error,
|
||||
) {
|
||||
repos := []*gql_generated.RepoSummary{}
|
||||
images := []*gql_generated.ImageSummary{}
|
||||
layers := []*gql_generated.LayerSummary{}
|
||||
|
||||
for _, repo := range repoList {
|
||||
repo := repo
|
||||
|
||||
// map used for dedube if 2 images reference the same blob
|
||||
repoBlob2Size := make(map[string]int64, 10)
|
||||
|
||||
// made up of all manifests, configs and image layers
|
||||
repoSize := int64(0)
|
||||
|
||||
lastUpdatedTag, err := olu.GetRepoLastUpdated(repo)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("can't find latest updated tag for repo: %s", repo)
|
||||
if requestedPage == nil {
|
||||
requestedPage = &gql_generated.PageInput{}
|
||||
}
|
||||
|
||||
manifests, err := olu.GetImageManifests(repo)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("can't get manifests for repo: %s", repo)
|
||||
|
||||
continue
|
||||
if searchingForRepos(query) {
|
||||
limit := 0
|
||||
if requestedPage.Limit != nil {
|
||||
limit = *requestedPage.Limit
|
||||
}
|
||||
|
||||
var lastUpdatedImageSummary gql_generated.ImageSummary
|
||||
|
||||
repoPlatforms := make([]*gql_generated.OsArch, 0, len(manifests))
|
||||
repoVendors := make([]*string, 0, len(manifests))
|
||||
|
||||
for i, manifest := range manifests {
|
||||
imageLayersSize := int64(0)
|
||||
|
||||
manifestTag, ok := manifest.Annotations[ispec.AnnotationRefName]
|
||||
if !ok {
|
||||
log.Error().Msg("reference not found for this manifest")
|
||||
|
||||
continue
|
||||
offset := 0
|
||||
if requestedPage.Offset != nil {
|
||||
offset = *requestedPage.Offset
|
||||
}
|
||||
|
||||
imageBlobManifest, err := olu.GetImageBlobManifest(repo, manifests[i].Digest)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("can't read manifest for repo %s %s", repo, manifestTag)
|
||||
|
||||
continue
|
||||
sortBy := gql_generated.SortCriteriaRelevance
|
||||
if requestedPage.SortBy != nil {
|
||||
sortBy = *requestedPage.SortBy
|
||||
}
|
||||
|
||||
manifestSize := olu.GetImageManifestSize(repo, manifests[i].Digest)
|
||||
configSize := imageBlobManifest.Config.Size
|
||||
|
||||
repoBlob2Size[manifests[i].Digest.String()] = manifestSize
|
||||
repoBlob2Size[imageBlobManifest.Config.Digest.Hex] = configSize
|
||||
|
||||
for _, layer := range imageBlobManifest.Layers {
|
||||
layer := layer
|
||||
layerDigest := layer.Digest.String()
|
||||
layerSizeStr := strconv.Itoa(int(layer.Size))
|
||||
repoBlob2Size[layer.Digest.String()] = layer.Size
|
||||
imageLayersSize += layer.Size
|
||||
|
||||
// if we have a tag we won't match a layer
|
||||
if tag != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if index := strings.Index(layerDigest, name); index != -1 {
|
||||
layers = append(layers, &gql_generated.LayerSummary{
|
||||
Digest: &layerDigest,
|
||||
Size: &layerSizeStr,
|
||||
Score: &index,
|
||||
reposMeta, manifestMetaMap, err := repoDB.SearchRepos(ctx, query, repodb.PageInput{
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
SortBy: repodb.SortCriteria(sortBy),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
imageSize := imageLayersSize + manifestSize + configSize
|
||||
|
||||
index := strings.Index(repo, name)
|
||||
matchesTag := strings.HasPrefix(manifestTag, tag)
|
||||
|
||||
if index != -1 {
|
||||
imageConfigInfo, err := olu.GetImageConfigInfo(repo, manifests[i].Digest)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("can't retrieve config info for the image %s %s", repo, manifestTag)
|
||||
return []*gql_generated.RepoSummary{}, []*gql_generated.ImageSummary{}, []*gql_generated.LayerSummary{}, err
|
||||
}
|
||||
|
||||
for _, repoMeta := range reposMeta {
|
||||
repoSummary := RepoMeta2RepoSummary(ctx, repoMeta, manifestMetaMap)
|
||||
|
||||
*repoSummary.Score = calculateImageMatchingScore(repoMeta.Name, strings.Index(repoMeta.Name, query))
|
||||
repos = append(repos, repoSummary)
|
||||
}
|
||||
} else { // search for images
|
||||
limit := 0
|
||||
if requestedPage.Limit != nil {
|
||||
limit = *requestedPage.Limit
|
||||
}
|
||||
|
||||
offset := 0
|
||||
if requestedPage.Offset != nil {
|
||||
offset = *requestedPage.Offset
|
||||
}
|
||||
|
||||
sortBy := gql_generated.SortCriteriaRelevance
|
||||
if requestedPage.SortBy != nil {
|
||||
sortBy = *requestedPage.SortBy
|
||||
}
|
||||
reposMeta, manifestMetaMap, err := repoDB.SearchTags(ctx, query, repodb.PageInput{
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
SortBy: repodb.SortCriteria(sortBy),
|
||||
})
|
||||
if err != nil {
|
||||
return []*gql_generated.RepoSummary{}, []*gql_generated.ImageSummary{}, []*gql_generated.LayerSummary{}, err
|
||||
}
|
||||
|
||||
for _, repoMeta := range reposMeta {
|
||||
imageSummaries := RepoMeta2ImageSummaries(ctx, repoMeta, manifestMetaMap)
|
||||
|
||||
images = append(images, imageSummaries...)
|
||||
}
|
||||
}
|
||||
|
||||
return repos, images, layers, nil
|
||||
}
|
||||
|
||||
func RepoMeta2ImageSummaries(ctx context.Context, repoMeta repodb.RepoMetadata,
|
||||
manifestMetaMap map[string]repodb.ManifestMetadata,
|
||||
) []*gql_generated.ImageSummary {
|
||||
imageSummaries := make([]*gql_generated.ImageSummary, 0, len(repoMeta.Tags))
|
||||
|
||||
for tag, manifestDigest := range repoMeta.Tags {
|
||||
var manifestContent ispec.Manifest
|
||||
|
||||
err := json.Unmarshal(manifestMetaMap[manifestDigest].ManifestBlob, &manifestContent)
|
||||
if err != nil {
|
||||
graphql.AddError(ctx, gqlerror.Errorf("can't unmarshal manifest blob for image: %s:%s, "+
|
||||
"manifest digest: %s, error: %s", repoMeta.Name, tag, manifestDigest, err.Error()))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
size := strconv.Itoa(int(imageSize))
|
||||
isSigned := olu.CheckManifestSignature(repo, manifests[i].Digest)
|
||||
var configContent ispec.Image
|
||||
|
||||
// update matching score
|
||||
score := calculateImageMatchingScore(repo, index, matchesTag)
|
||||
err = json.Unmarshal(manifestMetaMap[manifestDigest].ConfigBlob, &configContent)
|
||||
if err != nil {
|
||||
graphql.AddError(ctx, gqlerror.Errorf("can't unmarshal config blob for image: %s:%s, "+
|
||||
"manifest digest: %s, error: %s", repoMeta.Name, tag, manifestDigest, err.Error()))
|
||||
|
||||
vendor := olu.GetImageVendor(imageConfigInfo)
|
||||
lastUpdated := olu.GetImageLastUpdated(imageConfigInfo)
|
||||
os, arch := olu.GetImagePlatform(imageConfigInfo)
|
||||
osArch := &gql_generated.OsArch{
|
||||
Os: &os,
|
||||
Arch: &arch,
|
||||
continue
|
||||
}
|
||||
|
||||
repoPlatforms = append(repoPlatforms, osArch)
|
||||
repoVendors = append(repoVendors, &vendor)
|
||||
imgSize := int64(0)
|
||||
imgSize += manifestContent.Config.Size
|
||||
imgSize += int64(len(manifestMetaMap[manifestDigest].ManifestBlob))
|
||||
|
||||
for _, layer := range manifestContent.Layers {
|
||||
imgSize += layer.Size
|
||||
}
|
||||
|
||||
var (
|
||||
repoName = repoMeta.Name
|
||||
tag = tag
|
||||
manifestDigest = manifestDigest
|
||||
configDigest = manifestContent.Config.Digest.String()
|
||||
imageLastUpdated = getImageLastUpdated(configContent)
|
||||
isSigned = imageHasSignatures(manifestMetaMap[manifestDigest].Signatures)
|
||||
imageSize = strconv.FormatInt(imgSize, 10)
|
||||
os = configContent.OS
|
||||
arch = configContent.Architecture
|
||||
osArch = gql_generated.OsArch{Os: &os, Arch: &arch}
|
||||
downloadCount = manifestMetaMap[manifestDigest].DownloadCount
|
||||
)
|
||||
|
||||
annotations := common.GetAnnotations(manifestContent.Annotations, configContent.Config.Labels)
|
||||
|
||||
imageSummary := gql_generated.ImageSummary{
|
||||
RepoName: &repo,
|
||||
Tag: &manifestTag,
|
||||
LastUpdated: &lastUpdated,
|
||||
RepoName: &repoName,
|
||||
Tag: &tag,
|
||||
Digest: &manifestDigest,
|
||||
ConfigDigest: &configDigest,
|
||||
LastUpdated: imageLastUpdated,
|
||||
IsSigned: &isSigned,
|
||||
Size: &imageSize,
|
||||
Platform: &osArch,
|
||||
Vendor: &annotations.Vendor,
|
||||
DownloadCount: &downloadCount,
|
||||
Layers: getLayersSummary(manifestContent),
|
||||
Description: &annotations.Description,
|
||||
Title: &annotations.Title,
|
||||
Documentation: &annotations.Documentation,
|
||||
Licenses: &annotations.Licenses,
|
||||
Labels: &annotations.Labels,
|
||||
Source: &annotations.Source,
|
||||
}
|
||||
|
||||
imageSummaries = append(imageSummaries, &imageSummary)
|
||||
}
|
||||
|
||||
return imageSummaries
|
||||
}
|
||||
|
||||
func getLayersSummary(manifestContent ispec.Manifest) []*gql_generated.LayerSummary {
|
||||
layers := make([]*gql_generated.LayerSummary, 0, len(manifestContent.Layers))
|
||||
|
||||
for _, layer := range manifestContent.Layers {
|
||||
size := strconv.FormatInt(layer.Size, 10)
|
||||
digest := layer.Digest.String()
|
||||
|
||||
layers = append(layers, &gql_generated.LayerSummary{
|
||||
Size: &size,
|
||||
Platform: osArch,
|
||||
Vendor: &vendor,
|
||||
Score: &score,
|
||||
Digest: &digest,
|
||||
})
|
||||
}
|
||||
|
||||
if manifests[i].Digest.String() == lastUpdatedTag.Digest {
|
||||
lastUpdatedImageSummary = imageSummary
|
||||
return layers
|
||||
}
|
||||
|
||||
func RepoMeta2RepoSummary(ctx context.Context, repoMeta repodb.RepoMetadata,
|
||||
manifestMetaMap map[string]repodb.ManifestMetadata,
|
||||
) *gql_generated.RepoSummary {
|
||||
var (
|
||||
repoLastUpdatedTimestamp = time.Time{}
|
||||
repoPlatformsSet = map[string]*gql_generated.OsArch{}
|
||||
repoVendorsSet = map[string]bool{}
|
||||
lastUpdatedImageSummary *gql_generated.ImageSummary
|
||||
repoStarCount = repoMeta.Stars
|
||||
isBookmarked = false
|
||||
isStarred = false
|
||||
repoDownloadCount = 0
|
||||
repoName = repoMeta.Name
|
||||
|
||||
// map used to keep track of all blobs of a repo without dublicates
|
||||
// some images may have the same layers
|
||||
repoBlob2Size = make(map[string]int64, 10)
|
||||
|
||||
// made up of all manifests, configs and image layers
|
||||
size = int64(0)
|
||||
)
|
||||
|
||||
for tag, manifestDigest := range repoMeta.Tags {
|
||||
var manifestContent ispec.Manifest
|
||||
|
||||
err := json.Unmarshal(manifestMetaMap[manifestDigest].ManifestBlob, &manifestContent)
|
||||
if err != nil {
|
||||
graphql.AddError(ctx, gqlerror.Errorf("can't unmarshal manifest blob for image: %s:%s, manifest digest: %s, "+
|
||||
"error: %s", repoMeta.Name, tag, manifestDigest, err.Error()))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
images = append(images, &imageSummary)
|
||||
}
|
||||
var configContent ispec.Image
|
||||
|
||||
err = json.Unmarshal(manifestMetaMap[manifestDigest].ConfigBlob, &configContent)
|
||||
if err != nil {
|
||||
graphql.AddError(ctx, gqlerror.Errorf("can't unmarshal config blob for image: %s:%s, manifest digest: %s, error: %s",
|
||||
repoMeta.Name, tag, manifestDigest, err.Error()))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
for blob := range repoBlob2Size {
|
||||
repoSize += repoBlob2Size[blob]
|
||||
var (
|
||||
tag = tag
|
||||
isSigned = len(manifestMetaMap[manifestDigest].Signatures) > 0
|
||||
configDigest = manifestContent.Config.Digest.String()
|
||||
configSize = manifestContent.Config.Size
|
||||
opSys = configContent.OS
|
||||
arch = configContent.Architecture
|
||||
osArch = gql_generated.OsArch{Os: &opSys, Arch: &arch}
|
||||
|
||||
imageLastUpdated = getImageLastUpdated(configContent)
|
||||
|
||||
size = updateRepoBlobsMap(
|
||||
manifestDigest, int64(len(manifestMetaMap[manifestDigest].ManifestBlob)),
|
||||
configDigest, configSize,
|
||||
manifestContent.Layers,
|
||||
repoBlob2Size)
|
||||
imageSize = strconv.FormatInt(size, 10)
|
||||
downloadCount = manifestMetaMap[manifestDigest].DownloadCount
|
||||
manifestDigest = manifestDigest
|
||||
)
|
||||
|
||||
annotations := common.GetAnnotations(manifestContent.Annotations, configContent.Config.Labels)
|
||||
|
||||
imageSummary := gql_generated.ImageSummary{
|
||||
RepoName: &repoName,
|
||||
Tag: &tag,
|
||||
Digest: &manifestDigest,
|
||||
ConfigDigest: &configDigest,
|
||||
LastUpdated: imageLastUpdated,
|
||||
IsSigned: &isSigned,
|
||||
Size: &imageSize,
|
||||
Platform: &osArch,
|
||||
Vendor: &annotations.Vendor,
|
||||
DownloadCount: &downloadCount,
|
||||
Layers: getLayersSummary(manifestContent),
|
||||
Description: &annotations.Description,
|
||||
Title: &annotations.Title,
|
||||
Documentation: &annotations.Documentation,
|
||||
Licenses: &annotations.Licenses,
|
||||
Labels: &annotations.Labels,
|
||||
Source: &annotations.Source,
|
||||
}
|
||||
|
||||
if index := strings.Index(repo, name); index != -1 {
|
||||
repoSize := strconv.FormatInt(repoSize, 10)
|
||||
if annotations.Vendor != "" {
|
||||
repoVendorsSet[annotations.Vendor] = true
|
||||
}
|
||||
|
||||
repos = append(repos, &gql_generated.RepoSummary{
|
||||
Name: &repo,
|
||||
LastUpdated: &lastUpdatedTag.Timestamp,
|
||||
if opSys != "" || arch != "" {
|
||||
osArchString := strings.TrimSpace(fmt.Sprintf("%s %s", opSys, arch))
|
||||
repoPlatformsSet[osArchString] = &gql_generated.OsArch{Os: &opSys, Arch: &arch}
|
||||
}
|
||||
|
||||
if repoLastUpdatedTimestamp.Equal(time.Time{}) {
|
||||
// initialize with first time value
|
||||
if imageLastUpdated != nil {
|
||||
repoLastUpdatedTimestamp = *imageLastUpdated
|
||||
}
|
||||
|
||||
lastUpdatedImageSummary = &imageSummary
|
||||
} else if imageLastUpdated != nil && repoLastUpdatedTimestamp.After(*imageLastUpdated) {
|
||||
repoLastUpdatedTimestamp = *imageLastUpdated
|
||||
lastUpdatedImageSummary = &imageSummary
|
||||
}
|
||||
|
||||
repoDownloadCount += manifestMetaMap[manifestDigest].DownloadCount
|
||||
}
|
||||
|
||||
// calculate repo size = sum all manifest, config and layer blobs sizes
|
||||
for _, blobSize := range repoBlob2Size {
|
||||
size += blobSize
|
||||
}
|
||||
|
||||
repoSize := strconv.FormatInt(size, 10)
|
||||
score := 0
|
||||
|
||||
repoPlatforms := make([]*gql_generated.OsArch, 0, len(repoPlatformsSet))
|
||||
for _, osArch := range repoPlatformsSet {
|
||||
repoPlatforms = append(repoPlatforms, osArch)
|
||||
}
|
||||
|
||||
repoVendors := make([]*string, 0, len(repoVendorsSet))
|
||||
|
||||
for vendor := range repoVendorsSet {
|
||||
vendor := vendor
|
||||
repoVendors = append(repoVendors, &vendor)
|
||||
}
|
||||
|
||||
return &gql_generated.RepoSummary{
|
||||
Name: &repoName,
|
||||
LastUpdated: &repoLastUpdatedTimestamp,
|
||||
Size: &repoSize,
|
||||
Platforms: repoPlatforms,
|
||||
Vendors: repoVendors,
|
||||
Score: &index,
|
||||
NewestImage: &lastUpdatedImageSummary,
|
||||
})
|
||||
Score: &score,
|
||||
NewestImage: lastUpdatedImageSummary,
|
||||
DownloadCount: &repoDownloadCount,
|
||||
StarCount: &repoStarCount,
|
||||
IsBookmarked: &isBookmarked,
|
||||
IsStarred: &isStarred,
|
||||
}
|
||||
}
|
||||
|
||||
func imageHasSignatures(signatures map[string][]string) bool {
|
||||
// (sigType, signatures)
|
||||
for _, sigs := range signatures {
|
||||
if len(sigs) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(repos, func(i, j int) bool {
|
||||
return *repos[i].Score < *repos[j].Score
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
sort.Slice(images, func(i, j int) bool {
|
||||
return *images[i].Score < *images[j].Score
|
||||
})
|
||||
func searchingForRepos(query string) bool {
|
||||
return !strings.Contains(query, ":")
|
||||
}
|
||||
|
||||
sort.Slice(layers, func(i, j int) bool {
|
||||
return *layers[i].Score < *layers[j].Score
|
||||
})
|
||||
// updateRepoBlobsMap adds all the image blobs and their respective size to the repo blobs map
|
||||
// and returnes the total size of the image.
|
||||
func updateRepoBlobsMap(manifestDigest string, manifestSize int64, configDigest string, configSize int64,
|
||||
layers []ispec.Descriptor, repoBlob2Size map[string]int64,
|
||||
) int64 {
|
||||
imgSize := int64(0)
|
||||
|
||||
return repos, images, layers
|
||||
// add config size
|
||||
imgSize += configSize
|
||||
repoBlob2Size[configDigest] = configSize
|
||||
|
||||
// add manifest size
|
||||
imgSize += manifestSize
|
||||
repoBlob2Size[manifestDigest] = manifestSize
|
||||
|
||||
// add layers size
|
||||
for _, layer := range layers {
|
||||
repoBlob2Size[layer.Digest.String()] = layer.Size
|
||||
imgSize += layer.Size
|
||||
}
|
||||
|
||||
return imgSize
|
||||
}
|
||||
|
||||
func getImageLastUpdated(configContent ispec.Image) *time.Time {
|
||||
var lastUpdated *time.Time
|
||||
|
||||
if configContent.Created != nil {
|
||||
lastUpdated = configContent.Created
|
||||
}
|
||||
|
||||
for _, update := range configContent.History {
|
||||
if update.Created != nil {
|
||||
lastUpdated = update.Created
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return lastUpdated
|
||||
}
|
||||
|
||||
// calcalculateImageMatchingScore iterated from the index of the matched string in the
|
||||
|
@ -412,7 +635,7 @@ func globalSearch(repoList []string, name, tag string, olu common.OciLayoutUtils
|
|||
// query: image
|
||||
// repos: repo/test/myimage
|
||||
// Score will be 2.
|
||||
func calculateImageMatchingScore(artefactName string, index int, matchesTag bool) int {
|
||||
func calculateImageMatchingScore(artefactName string, index int) int {
|
||||
score := 0
|
||||
|
||||
for index >= 1 {
|
||||
|
@ -423,10 +646,6 @@ func calculateImageMatchingScore(artefactName string, index int, matchesTag bool
|
|||
score++
|
||||
}
|
||||
|
||||
if !matchesTag {
|
||||
score += 10
|
||||
}
|
||||
|
||||
return score
|
||||
}
|
||||
|
||||
|
|
|
@ -2,21 +2,23 @@ package search //nolint
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
godigest "github.com/opencontainers/go-digest"
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/rs/zerolog"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
"zotregistry.io/zot/pkg/extensions/monitoring"
|
||||
"zotregistry.io/zot/pkg/extensions/search/common"
|
||||
"zotregistry.io/zot/pkg/extensions/search/gql_generated"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
localCtx "zotregistry.io/zot/pkg/requestcontext"
|
||||
"zotregistry.io/zot/pkg/storage"
|
||||
"zotregistry.io/zot/pkg/storage/repodb"
|
||||
"zotregistry.io/zot/pkg/test/mocks"
|
||||
)
|
||||
|
||||
|
@ -24,156 +26,351 @@ var ErrTestError = errors.New("TestError")
|
|||
|
||||
func TestGlobalSearch(t *testing.T) {
|
||||
Convey("globalSearch", t, func() {
|
||||
Convey("GetRepoLastUpdated fail", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetRepoLastUpdatedFn: func(repo string) (common.TagInfo, error) {
|
||||
return common.TagInfo{}, ErrTestError
|
||||
const query = "repo1"
|
||||
Convey("RepoDB SearchRepos error", func() {
|
||||
mockSearchDB := mocks.RepoDBMock{
|
||||
SearchReposFn: func(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
return make([]repodb.RepoMetadata, 0), make(map[string]repodb.ManifestMetadata), ErrTestError
|
||||
},
|
||||
}
|
||||
|
||||
globalSearch([]string{"repo1"}, "name", "tag", mockOlum, log.NewLogger("debug", ""))
|
||||
responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter,
|
||||
graphql.DefaultRecover)
|
||||
repos, images, layers, err := globalSearch(responseContext, query, mockSearchDB, &gql_generated.PageInput{},
|
||||
log.NewLogger("debug", ""))
|
||||
So(err, ShouldNotBeNil)
|
||||
So(images, ShouldBeEmpty)
|
||||
So(layers, ShouldBeEmpty)
|
||||
So(repos, ShouldBeEmpty)
|
||||
})
|
||||
|
||||
Convey("GetImageTagsWithTimestamp fail", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageTagsWithTimestampFn: func(repo string) ([]common.TagInfo, error) {
|
||||
return []common.TagInfo{}, ErrTestError
|
||||
Convey("RepoDB SearchRepo is successful", func() {
|
||||
mockSearchDB := mocks.RepoDBMock{
|
||||
SearchReposFn: func(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
repos := []repodb.RepoMetadata{
|
||||
{
|
||||
Name: "repo1",
|
||||
Tags: map[string]string{
|
||||
"1.0.1": "digestTag1.0.1",
|
||||
"1.0.2": "digestTag1.0.2",
|
||||
},
|
||||
Signatures: []string{"testSignature"},
|
||||
Stars: 100,
|
||||
Description: "Descriptions repo1",
|
||||
LogoPath: "test/logoPath",
|
||||
},
|
||||
}
|
||||
|
||||
globalSearch([]string{"repo1"}, "name", "tag", mockOlum, log.NewLogger("debug", ""))
|
||||
createTime := time.Now()
|
||||
configBlob1, err := json.Marshal(ispec.Image{
|
||||
Config: ispec.ImageConfig{
|
||||
Labels: map[string]string{
|
||||
ispec.AnnotationVendor: "TestVendor1",
|
||||
},
|
||||
},
|
||||
Created: &createTime,
|
||||
})
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
configBlob2, err := json.Marshal(ispec.Image{
|
||||
Config: ispec.ImageConfig{
|
||||
Labels: map[string]string{
|
||||
ispec.AnnotationVendor: "TestVendor2",
|
||||
},
|
||||
},
|
||||
})
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifestBlob, err := json.Marshal(ispec.Manifest{})
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifestMetas := map[string]repodb.ManifestMetadata{
|
||||
"digestTag1.0.1": {
|
||||
ManifestBlob: manifestBlob,
|
||||
ConfigBlob: configBlob1,
|
||||
DownloadCount: 100,
|
||||
Signatures: make(map[string][]string),
|
||||
Dependencies: make([]string, 0),
|
||||
Dependants: make([]string, 0),
|
||||
BlobsSize: 0,
|
||||
BlobCount: 0,
|
||||
},
|
||||
"digestTag1.0.2": {
|
||||
ManifestBlob: manifestBlob,
|
||||
ConfigBlob: configBlob2,
|
||||
DownloadCount: 100,
|
||||
Signatures: make(map[string][]string),
|
||||
Dependencies: make([]string, 0),
|
||||
Dependants: make([]string, 0),
|
||||
BlobsSize: 0,
|
||||
BlobCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
return repos, manifestMetas, nil
|
||||
},
|
||||
}
|
||||
|
||||
const query = "repo1"
|
||||
limit := 1
|
||||
ofset := 0
|
||||
sortCriteria := gql_generated.SortCriteriaAlphabeticAsc
|
||||
pageInput := gql_generated.PageInput{
|
||||
Limit: &limit,
|
||||
Offset: &ofset,
|
||||
SortBy: &sortCriteria,
|
||||
}
|
||||
|
||||
responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter,
|
||||
graphql.DefaultRecover)
|
||||
repos, images, layers, err := globalSearch(responseContext, query, mockSearchDB, &pageInput,
|
||||
log.NewLogger("debug", ""))
|
||||
So(err, ShouldBeNil)
|
||||
So(images, ShouldBeEmpty)
|
||||
So(layers, ShouldBeEmpty)
|
||||
So(repos, ShouldNotBeEmpty)
|
||||
So(len(repos[0].Vendors), ShouldEqual, 2)
|
||||
})
|
||||
|
||||
Convey("GetImageManifests fail", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageManifestsFn: func(name string) ([]ispec.Descriptor, error) {
|
||||
return []ispec.Descriptor{}, ErrTestError
|
||||
Convey("RepoDB SearchRepo Bad manifest refferenced", func() {
|
||||
mockSearchDB := mocks.RepoDBMock{
|
||||
SearchReposFn: func(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
repos := []repodb.RepoMetadata{
|
||||
{
|
||||
Name: "repo1",
|
||||
Tags: map[string]string{
|
||||
"1.0.1": "digestTag1.0.1",
|
||||
},
|
||||
Signatures: []string{"testSignature"},
|
||||
Stars: 100,
|
||||
Description: "Descriptions repo1",
|
||||
LogoPath: "test/logoPath",
|
||||
},
|
||||
}
|
||||
|
||||
globalSearch([]string{"repo1"}, "name", "tag", mockOlum, log.NewLogger("debug", ""))
|
||||
configBlob, err := json.Marshal(ispec.Image{})
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifestMetas := map[string]repodb.ManifestMetadata{
|
||||
"digestTag1.0.1": {
|
||||
ManifestBlob: []byte("bad manifest blob"),
|
||||
ConfigBlob: configBlob,
|
||||
DownloadCount: 100,
|
||||
Signatures: make(map[string][]string),
|
||||
Dependencies: make([]string, 0),
|
||||
Dependants: make([]string, 0),
|
||||
BlobsSize: 0,
|
||||
BlobCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
return repos, manifestMetas, nil
|
||||
},
|
||||
}
|
||||
|
||||
query := "repo1"
|
||||
limit := 1
|
||||
ofset := 0
|
||||
sortCriteria := gql_generated.SortCriteriaAlphabeticAsc
|
||||
pageInput := gql_generated.PageInput{
|
||||
Limit: &limit,
|
||||
Offset: &ofset,
|
||||
SortBy: &sortCriteria,
|
||||
}
|
||||
|
||||
responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter,
|
||||
graphql.DefaultRecover)
|
||||
|
||||
repos, images, layers, err := globalSearch(responseContext, query, mockSearchDB, &pageInput,
|
||||
log.NewLogger("debug", ""))
|
||||
So(err, ShouldBeNil)
|
||||
So(images, ShouldBeEmpty)
|
||||
So(layers, ShouldBeEmpty)
|
||||
So(repos, ShouldNotBeEmpty)
|
||||
|
||||
query = "repo1:1.0.1"
|
||||
|
||||
responseContext = graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter,
|
||||
graphql.DefaultRecover)
|
||||
repos, images, layers, err = globalSearch(responseContext, query, mockSearchDB, &pageInput,
|
||||
log.NewLogger("debug", ""))
|
||||
So(err, ShouldBeNil)
|
||||
So(images, ShouldBeEmpty)
|
||||
So(layers, ShouldBeEmpty)
|
||||
So(repos, ShouldBeEmpty)
|
||||
})
|
||||
|
||||
Convey("Manifests given, bad image blob manifest", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageManifestsFn: func(name string) ([]ispec.Descriptor, error) {
|
||||
return []ispec.Descriptor{
|
||||
Convey("RepoDB SearchRepo good manifest refferenced and bad config blob", func() {
|
||||
mockSearchDB := mocks.RepoDBMock{
|
||||
SearchReposFn: func(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
repos := []repodb.RepoMetadata{
|
||||
{
|
||||
Digest: "digest",
|
||||
Size: -1,
|
||||
Annotations: map[string]string{
|
||||
ispec.AnnotationRefName: "this is a bad format",
|
||||
Name: "repo1",
|
||||
Tags: map[string]string{
|
||||
"1.0.1": "digestTag1.0.1",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
return v1.Manifest{}, ErrTestError
|
||||
Signatures: []string{"testSignature"},
|
||||
Stars: 100,
|
||||
Description: "Descriptions repo1",
|
||||
LogoPath: "test/logoPath",
|
||||
},
|
||||
}
|
||||
globalSearch([]string{"repo1"}, "name", "tag", mockOlum, log.NewLogger("debug", ""))
|
||||
|
||||
manifestBlob, err := json.Marshal(ispec.Manifest{})
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifestMetas := map[string]repodb.ManifestMetadata{
|
||||
"digestTag1.0.1": {
|
||||
ManifestBlob: manifestBlob,
|
||||
ConfigBlob: []byte("bad config blob"),
|
||||
DownloadCount: 100,
|
||||
Signatures: make(map[string][]string),
|
||||
Dependencies: make([]string, 0),
|
||||
Dependants: make([]string, 0),
|
||||
BlobsSize: 0,
|
||||
BlobCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
return repos, manifestMetas, nil
|
||||
},
|
||||
}
|
||||
|
||||
query := "repo1"
|
||||
limit := 1
|
||||
ofset := 0
|
||||
sortCriteria := gql_generated.SortCriteriaAlphabeticAsc
|
||||
pageInput := gql_generated.PageInput{
|
||||
Limit: &limit,
|
||||
Offset: &ofset,
|
||||
SortBy: &sortCriteria,
|
||||
}
|
||||
|
||||
responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter,
|
||||
graphql.DefaultRecover)
|
||||
repos, images, layers, err := globalSearch(responseContext, query, mockSearchDB, &pageInput,
|
||||
log.NewLogger("debug", ""))
|
||||
So(err, ShouldBeNil)
|
||||
So(images, ShouldBeEmpty)
|
||||
So(layers, ShouldBeEmpty)
|
||||
So(repos, ShouldNotBeEmpty)
|
||||
|
||||
query = "repo1:1.0.1"
|
||||
responseContext = graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter,
|
||||
graphql.DefaultRecover)
|
||||
repos, images, layers, err = globalSearch(responseContext, query, mockSearchDB, &pageInput,
|
||||
log.NewLogger("debug", ""))
|
||||
So(err, ShouldBeNil)
|
||||
So(images, ShouldBeEmpty)
|
||||
So(layers, ShouldBeEmpty)
|
||||
So(repos, ShouldBeEmpty)
|
||||
})
|
||||
|
||||
Convey("Manifests given, no manifest tag", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageManifestsFn: func(name string) ([]ispec.Descriptor, error) {
|
||||
return []ispec.Descriptor{
|
||||
{
|
||||
Digest: "digest",
|
||||
Size: -1,
|
||||
},
|
||||
}, nil
|
||||
Convey("RepoDB SearchTags gives error", func() {
|
||||
mockSearchDB := mocks.RepoDBMock{
|
||||
SearchTagsFn: func(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
return make([]repodb.RepoMetadata, 0), make(map[string]repodb.ManifestMetadata), ErrTestError
|
||||
},
|
||||
}
|
||||
const query = "repo1:1.0.1"
|
||||
|
||||
globalSearch([]string{"repo1"}, "test", "tag", mockOlum, log.NewLogger("debug", ""))
|
||||
responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter,
|
||||
graphql.DefaultRecover)
|
||||
repos, images, layers, err := globalSearch(responseContext, query, mockSearchDB, &gql_generated.PageInput{},
|
||||
log.NewLogger("debug", ""))
|
||||
So(err, ShouldNotBeNil)
|
||||
So(images, ShouldBeEmpty)
|
||||
So(layers, ShouldBeEmpty)
|
||||
So(repos, ShouldBeEmpty)
|
||||
})
|
||||
|
||||
Convey("Global search success, no tag", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetRepoLastUpdatedFn: func(repo string) (common.TagInfo, error) {
|
||||
return common.TagInfo{
|
||||
Digest: "sha256:855b1556a45637abf05c63407437f6f305b4627c4361fb965a78e5731999c0c7",
|
||||
}, nil
|
||||
},
|
||||
GetImageManifestsFn: func(name string) ([]ispec.Descriptor, error) {
|
||||
return []ispec.Descriptor{
|
||||
Convey("RepoDB SearchTags is successful", func() {
|
||||
mockSearchDB := mocks.RepoDBMock{
|
||||
SearchTagsFn: func(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
repos := []repodb.RepoMetadata{
|
||||
{
|
||||
Digest: "sha256:855b1556a45637abf05c63407437f6f305b4627c4361fb965a78e5731999c0c7",
|
||||
Size: -1,
|
||||
Annotations: map[string]string{
|
||||
ispec.AnnotationRefName: "this is a bad format",
|
||||
Name: "repo1",
|
||||
Tags: map[string]string{
|
||||
"1.0.1": "digestTag1.0.1",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
GetImageBlobManifestFn: func(imageDir string, digest godigest.Digest) (v1.Manifest, error) {
|
||||
return v1.Manifest{
|
||||
Layers: []v1.Descriptor{
|
||||
{
|
||||
Size: 0,
|
||||
Digest: v1.Hash{},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
Signatures: []string{"testSignature"},
|
||||
Stars: 100,
|
||||
Description: "Descriptions repo1",
|
||||
LogoPath: "test/logoPath",
|
||||
},
|
||||
}
|
||||
globalSearch([]string{"repo1/name"}, "name", "tag", mockOlum, log.NewLogger("debug", ""))
|
||||
})
|
||||
|
||||
Convey("Manifests given, bad image config info", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetImageManifestsFn: func(name string) ([]ispec.Descriptor, error) {
|
||||
return []ispec.Descriptor{
|
||||
{
|
||||
Digest: "digest",
|
||||
Size: -1,
|
||||
Annotations: map[string]string{
|
||||
ispec.AnnotationRefName: "this is a bad format",
|
||||
configBlob1, err := json.Marshal(ispec.Image{
|
||||
Config: ispec.ImageConfig{
|
||||
Labels: map[string]string{
|
||||
ispec.AnnotationVendor: "TestVendor1",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
GetImageConfigInfoFn: func(repo string, manifestDigest godigest.Digest) (ispec.Image, error) {
|
||||
return ispec.Image{}, ErrTestError
|
||||
},
|
||||
}
|
||||
globalSearch([]string{"repo1/name"}, "name", "tag", mockOlum, log.NewLogger("debug", ""))
|
||||
})
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("Tag given, no layer match", func() {
|
||||
mockOlum := mocks.OciLayoutUtilsMock{
|
||||
GetExpandedRepoInfoFn: func(name string) (common.RepoInfo, error) {
|
||||
return common.RepoInfo{
|
||||
Images: []common.Image{
|
||||
{
|
||||
Tag: "latest",
|
||||
Layers: []common.Layer{
|
||||
{
|
||||
Size: "100",
|
||||
Digest: "sha256:855b1556a45637abf05c63407437f6f305b4627c4361fb965a78e5731999c0c7",
|
||||
configBlob2, err := json.Marshal(ispec.Image{
|
||||
Config: ispec.ImageConfig{
|
||||
Labels: map[string]string{
|
||||
ispec.AnnotationVendor: "TestVendor2",
|
||||
},
|
||||
},
|
||||
})
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifestBlob, err := json.Marshal(ispec.Manifest{})
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
manifestMetas := map[string]repodb.ManifestMetadata{
|
||||
"digestTag1.0.1": {
|
||||
ManifestBlob: manifestBlob,
|
||||
ConfigBlob: configBlob1,
|
||||
DownloadCount: 100,
|
||||
Signatures: make(map[string][]string),
|
||||
Dependencies: make([]string, 0),
|
||||
Dependants: make([]string, 0),
|
||||
BlobsSize: 0,
|
||||
BlobCount: 0,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
GetImageManifestSizeFn: func(repo string, manifestDigest godigest.Digest) int64 {
|
||||
return 100
|
||||
},
|
||||
GetImageConfigSizeFn: func(repo string, manifestDigest godigest.Digest) int64 {
|
||||
return 100
|
||||
},
|
||||
GetImageTagsWithTimestampFn: func(repo string) ([]common.TagInfo, error) {
|
||||
return []common.TagInfo{
|
||||
{
|
||||
Name: "test",
|
||||
Digest: "test",
|
||||
},
|
||||
}, nil
|
||||
"digestTag1.0.2": {
|
||||
ManifestBlob: manifestBlob,
|
||||
ConfigBlob: configBlob2,
|
||||
DownloadCount: 100,
|
||||
Signatures: make(map[string][]string),
|
||||
Dependencies: make([]string, 0),
|
||||
Dependants: make([]string, 0),
|
||||
BlobsSize: 0,
|
||||
BlobCount: 0,
|
||||
},
|
||||
}
|
||||
globalSearch([]string{"repo1"}, "name", "tag", mockOlum, log.NewLogger("debug", ""))
|
||||
|
||||
return repos, manifestMetas, nil
|
||||
},
|
||||
}
|
||||
|
||||
const query = "repo1:1.0.1"
|
||||
limit := 1
|
||||
ofset := 0
|
||||
sortCriteria := gql_generated.SortCriteriaAlphabeticAsc
|
||||
pageInput := gql_generated.PageInput{
|
||||
Limit: &limit,
|
||||
Offset: &ofset,
|
||||
SortBy: &sortCriteria,
|
||||
}
|
||||
|
||||
responseContext := graphql.WithResponseContext(context.Background(), graphql.DefaultErrorPresenter,
|
||||
graphql.DefaultRecover)
|
||||
repos, images, layers, err := globalSearch(responseContext, query, mockSearchDB, &pageInput,
|
||||
log.NewLogger("debug", ""))
|
||||
So(err, ShouldBeNil)
|
||||
So(images, ShouldNotBeEmpty)
|
||||
So(layers, ShouldBeEmpty)
|
||||
So(repos, ShouldBeEmpty)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -205,31 +402,27 @@ func TestMatching(t *testing.T) {
|
|||
|
||||
Convey("Perfect Matching", t, func() {
|
||||
query := "alpine"
|
||||
score := calculateImageMatchingScore("alpine", strings.Index("alpine", query), true)
|
||||
score := calculateImageMatchingScore("alpine", strings.Index("alpine", query))
|
||||
So(score, ShouldEqual, 0)
|
||||
})
|
||||
|
||||
Convey("Partial Matching", t, func() {
|
||||
query := pine
|
||||
score := calculateImageMatchingScore("alpine", strings.Index("alpine", query), true)
|
||||
score := calculateImageMatchingScore("alpine", strings.Index("alpine", query))
|
||||
So(score, ShouldEqual, 2)
|
||||
})
|
||||
|
||||
Convey("Complex Partial Matching", t, func() {
|
||||
query := pine
|
||||
score := calculateImageMatchingScore("repo/test/alpine", strings.Index("alpine", query), true)
|
||||
score := calculateImageMatchingScore("repo/test/alpine", strings.Index("alpine", query))
|
||||
So(score, ShouldEqual, 2)
|
||||
|
||||
query = pine
|
||||
score = calculateImageMatchingScore("repo/alpine/test", strings.Index("alpine", query), true)
|
||||
score = calculateImageMatchingScore("repo/alpine/test", strings.Index("alpine", query))
|
||||
So(score, ShouldEqual, 2)
|
||||
|
||||
query = pine
|
||||
score = calculateImageMatchingScore("alpine/repo/test", strings.Index("alpine", query), true)
|
||||
score = calculateImageMatchingScore("alpine/repo/test", strings.Index("alpine", query))
|
||||
So(score, ShouldEqual, 2)
|
||||
|
||||
query = pine
|
||||
score = calculateImageMatchingScore("alpine/repo/test", strings.Index("alpine", query), false)
|
||||
So(score, ShouldEqual, 12)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ type RepoInfo {
|
|||
# Search results in all repos/images/layers
|
||||
# There will be other more structures for more detailed information
|
||||
type GlobalSearchResult {
|
||||
Page: PageInfo
|
||||
Images: [ImageSummary]
|
||||
Repos: [RepoSummary]
|
||||
Layers: [LayerSummary]
|
||||
|
@ -48,8 +49,11 @@ type ImageSummary {
|
|||
DownloadCount: Int
|
||||
Layers: [LayerSummary]
|
||||
Description: String
|
||||
Licenses: String
|
||||
Licenses: String # The value of the annotation if present, 'unknown' otherwise).
|
||||
Labels: String
|
||||
Title: String
|
||||
Source: String
|
||||
Documentation: String
|
||||
}
|
||||
|
||||
# Brief on a specific repo to be used in queries returning a list of repos
|
||||
|
@ -60,10 +64,11 @@ type RepoSummary {
|
|||
Platforms: [OsArch]
|
||||
Vendors: [String]
|
||||
Score: Int
|
||||
NewestImage: ImageSummary
|
||||
NewestImage: ImageSummary # Newest based on created timestamp
|
||||
DownloadCount: Int
|
||||
StarCount: Int
|
||||
IsBookmarked: Boolean
|
||||
IsStarred: Boolean
|
||||
}
|
||||
|
||||
# Currently the same as LayerInfo, we can refactor later
|
||||
|
@ -79,6 +84,29 @@ type OsArch {
|
|||
Arch: String
|
||||
}
|
||||
|
||||
enum SortCriteria {
|
||||
RELEVANCE
|
||||
UPDATE_TIME
|
||||
ALPHABETIC_ASC
|
||||
ALPHABETIC_DSC
|
||||
STARS
|
||||
DOWNLOADS
|
||||
}
|
||||
|
||||
type PageInfo {
|
||||
ObjectCount: Int!
|
||||
PreviousPage: Int
|
||||
NextPage: Int
|
||||
Pages: Int
|
||||
}
|
||||
|
||||
# Pagination parameters
|
||||
input PageInput {
|
||||
limit: Int
|
||||
offset: Int
|
||||
sortBy: SortCriteria
|
||||
}
|
||||
|
||||
type Query {
|
||||
CVEListForImage(image: String!): CVEResultForImage!
|
||||
ImageListForCVE(id: String!): [ImageSummary!]
|
||||
|
@ -87,6 +115,6 @@ type Query {
|
|||
RepoListWithNewestImage: [RepoSummary!]! # Newest based on created timestamp
|
||||
ImageList(repo: String!): [ImageSummary!]
|
||||
ExpandedRepoInfo(repo: String!): RepoInfo!
|
||||
GlobalSearch(query: String!): GlobalSearchResult!
|
||||
GlobalSearch(query: String!, requestedPage: PageInput): GlobalSearchResult! # Return all images/repos/layers which match the query
|
||||
DerivedImageList(image: String!): [ImageSummary!]
|
||||
}
|
||||
|
|
|
@ -436,39 +436,16 @@ func (r *queryResolver) ExpandedRepoInfo(ctx context.Context, repo string) (*gql
|
|||
}
|
||||
|
||||
// GlobalSearch is the resolver for the GlobalSearch field.
|
||||
func (r *queryResolver) GlobalSearch(ctx context.Context, query string) (*gql_generated.GlobalSearchResult, error) {
|
||||
func (r *queryResolver) GlobalSearch(ctx context.Context, query string, requestedPage *gql_generated.PageInput) (*gql_generated.GlobalSearchResult, error) {
|
||||
query = cleanQuerry(query)
|
||||
defaultStore := r.storeController.DefaultStore
|
||||
olu := common.NewBaseOciLayoutUtils(r.storeController, r.log)
|
||||
|
||||
var name, tag string
|
||||
|
||||
_, err := fmt.Sscanf(query, "%s %s", &name, &tag)
|
||||
if err != nil {
|
||||
name = query
|
||||
}
|
||||
|
||||
repoList, err := defaultStore.GetRepositories()
|
||||
if err != nil {
|
||||
r.log.Error().Err(err).Msg("unable to search repositories")
|
||||
|
||||
return &gql_generated.GlobalSearchResult{}, err
|
||||
}
|
||||
|
||||
availableRepos, err := userAvailableRepos(ctx, repoList)
|
||||
if err != nil {
|
||||
r.log.Error().Err(err).Msg("unable to filter user available repositories")
|
||||
|
||||
return &gql_generated.GlobalSearchResult{}, err
|
||||
}
|
||||
|
||||
repos, images, layers := globalSearch(availableRepos, name, tag, olu, r.log)
|
||||
repos, images, layers, err := globalSearch(ctx, query, r.repoDB, requestedPage, r.log)
|
||||
|
||||
return &gql_generated.GlobalSearchResult{
|
||||
Images: images,
|
||||
Repos: repos,
|
||||
Layers: layers,
|
||||
}, nil
|
||||
}, err
|
||||
}
|
||||
|
||||
// DependencyListForImage is the resolver for the DependencyListForImage field.
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
|
@ -12,6 +13,12 @@ import (
|
|||
|
||||
const defaultPerms = 0o0600
|
||||
|
||||
// nolint:gochecknoglobals
|
||||
var loggerSetTimeFormat sync.Once
|
||||
|
||||
// nolint:gochecknoglobals
|
||||
var auditLoggerSetTimeFormat sync.Once
|
||||
|
||||
// Logger extends zerolog's Logger.
|
||||
type Logger struct {
|
||||
zerolog.Logger
|
||||
|
@ -22,7 +29,9 @@ func (l Logger) Println(v ...interface{}) {
|
|||
}
|
||||
|
||||
func NewLogger(level, output string) Logger {
|
||||
loggerSetTimeFormat.Do(func() {
|
||||
zerolog.TimeFieldFormat = time.RFC3339Nano
|
||||
})
|
||||
|
||||
lvl, err := zerolog.ParseLevel(level)
|
||||
if err != nil {
|
||||
|
@ -47,7 +56,9 @@ func NewLogger(level, output string) Logger {
|
|||
}
|
||||
|
||||
func NewAuditLogger(level, audit string) *Logger {
|
||||
auditLoggerSetTimeFormat.Do(func() {
|
||||
zerolog.TimeFieldFormat = time.RFC3339Nano
|
||||
})
|
||||
|
||||
lvl, err := zerolog.ParseLevel(level)
|
||||
if err != nil {
|
||||
|
|
832
pkg/storage/repodb/boltdb_wrapper.go
Normal file
832
pkg/storage/repodb/boltdb_wrapper.go
Normal file
|
@ -0,0 +1,832 @@
|
|||
package repodb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
glob "github.com/bmatcuk/doublestar/v4"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
zerr "zotregistry.io/zot/errors"
|
||||
"zotregistry.io/zot/pkg/log"
|
||||
localCtx "zotregistry.io/zot/pkg/requestcontext"
|
||||
)
|
||||
|
||||
var ErrBadCtxFormat = errors.New("type assertion failed")
|
||||
|
||||
type BoltDBWrapper struct {
|
||||
db *bolt.DB
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func NewBotDBWrapper(params BoltDBParameters) (*BoltDBWrapper, error) {
|
||||
const perms = 0o600
|
||||
|
||||
boltDB, err := bolt.Open(path.Join(params.RootDir, "repo.db"), perms, &bolt.Options{Timeout: time.Second * 10})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = boltDB.Update(func(transaction *bolt.Tx) error {
|
||||
_, err := transaction.CreateBucketIfNotExists([]byte(ManifestMetadataBucket))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = transaction.CreateBucketIfNotExists([]byte(RepoMetadataBucket))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BoltDBWrapper{
|
||||
db: boltDB,
|
||||
log: log.Logger{Logger: zerolog.New(os.Stdout)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) SetManifestMeta(manifestDigest string, manifestMeta ManifestMetadata) error {
|
||||
// Q: should we check for correct input?
|
||||
if manifestMeta.Signatures == nil {
|
||||
manifestMeta.Signatures = map[string][]string{}
|
||||
}
|
||||
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(ManifestMetadataBucket))
|
||||
|
||||
mmBlob, err := json.Marshal(manifestMeta)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "repodb: error while calculating blob for manifest with digest %s", manifestDigest)
|
||||
}
|
||||
|
||||
err = buck.Put([]byte(manifestDigest), mmBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "repodb: error while setting manifest meta with for digest %s", manifestDigest)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) GetManifestMeta(manifestDigest string) (ManifestMetadata, error) {
|
||||
var manifestMetadata ManifestMetadata
|
||||
|
||||
err := bdw.db.View(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(ManifestMetadataBucket))
|
||||
|
||||
mmBlob := buck.Get([]byte(manifestDigest))
|
||||
|
||||
if len(mmBlob) == 0 {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
}
|
||||
|
||||
err := json.Unmarshal(mmBlob, &manifestMetadata)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "repodb: error while unmashaling manifest meta for digest %s", manifestDigest)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return manifestMetadata, err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) SetRepoTag(repo string, tag string, manifestDigest string) error {
|
||||
if err := validateRepoTagInput(repo, tag, manifestDigest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(RepoMetadataBucket))
|
||||
|
||||
repoMetaBlob := buck.Get([]byte(repo))
|
||||
|
||||
// object not found
|
||||
if len(repoMetaBlob) == 0 {
|
||||
// create a new object
|
||||
repoMeta := RepoMetadata{
|
||||
Name: repo,
|
||||
Tags: map[string]string{
|
||||
tag: manifestDigest,
|
||||
},
|
||||
}
|
||||
|
||||
repoMetaBlob, err := json.Marshal(repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(repo), repoMetaBlob)
|
||||
}
|
||||
|
||||
// object found
|
||||
var repoMeta RepoMetadata
|
||||
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
repoMeta.Tags[tag] = manifestDigest
|
||||
|
||||
repoMetaBlob, err = json.Marshal(repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(repo), repoMetaBlob)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func validateRepoTagInput(repo, tag, manifestDigest string) error {
|
||||
if repo == "" {
|
||||
return errors.New("repodb: repo name can't be empty string")
|
||||
}
|
||||
|
||||
if tag == "" {
|
||||
return errors.New("repodb: tag can't be empty string")
|
||||
}
|
||||
|
||||
if manifestDigest == "" {
|
||||
return errors.New("repodb: manifest digest can't be empty string")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) GetRepoMeta(repo string) (RepoMetadata, error) {
|
||||
var repoMeta RepoMetadata
|
||||
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(RepoMetadataBucket))
|
||||
|
||||
repoMetaBlob := buck.Get([]byte(repo))
|
||||
|
||||
// object not found
|
||||
if repoMetaBlob == nil {
|
||||
return zerr.ErrRepoMetaNotFound
|
||||
}
|
||||
|
||||
// object found
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return repoMeta, err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) DeleteRepoTag(repo string, tag string) error {
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(RepoMetadataBucket))
|
||||
|
||||
repoMetaBlob := buck.Get([]byte(repo))
|
||||
|
||||
// object not found
|
||||
if repoMetaBlob == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// object found
|
||||
var repoMeta RepoMetadata
|
||||
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(repoMeta.Tags, tag)
|
||||
|
||||
if len(repoMeta.Tags) == 0 {
|
||||
return buck.Delete([]byte(repo))
|
||||
}
|
||||
|
||||
repoMetaBlob, err = json.Marshal(repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(repo), repoMetaBlob)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) IncrementRepoStars(repo string) error {
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(RepoMetadataBucket))
|
||||
|
||||
repoMetaBlob := buck.Get([]byte(repo))
|
||||
if repoMetaBlob == nil {
|
||||
return zerr.ErrRepoMetaNotFound
|
||||
}
|
||||
|
||||
var repoMeta RepoMetadata
|
||||
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
repoMeta.Stars++
|
||||
|
||||
repoMetaBlob, err = json.Marshal(repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(repo), repoMetaBlob)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) DecrementRepoStars(repo string) error {
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(RepoMetadataBucket))
|
||||
|
||||
repoMetaBlob := buck.Get([]byte(repo))
|
||||
if repoMetaBlob == nil {
|
||||
return zerr.ErrRepoMetaNotFound
|
||||
}
|
||||
|
||||
var repoMeta RepoMetadata
|
||||
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if repoMeta.Stars > 0 {
|
||||
repoMeta.Stars--
|
||||
}
|
||||
|
||||
repoMetaBlob, err = json.Marshal(repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(repo), repoMetaBlob)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) GetRepoStars(repo string) (int, error) {
|
||||
stars := 0
|
||||
|
||||
err := bdw.db.View(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(RepoMetadataBucket))
|
||||
|
||||
buck.Get([]byte(repo))
|
||||
repoMetaBlob := buck.Get([]byte(repo))
|
||||
if repoMetaBlob == nil {
|
||||
return zerr.ErrRepoMetaNotFound
|
||||
}
|
||||
|
||||
var repoMeta RepoMetadata
|
||||
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stars = repoMeta.Stars
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return stars, err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) SetRepoDescription(repo, description string) error {
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(RepoMetadataBucket))
|
||||
|
||||
repoMetaBlob := buck.Get([]byte(repo))
|
||||
if repoMetaBlob == nil {
|
||||
return zerr.ErrRepoMetaNotFound
|
||||
}
|
||||
|
||||
var repoMeta RepoMetadata
|
||||
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
repoMeta.Description = description
|
||||
|
||||
repoMetaBlob, err = json.Marshal(repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(repo), repoMetaBlob)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) SetRepoLogo(repo string, logoPath string) error {
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(RepoMetadataBucket))
|
||||
|
||||
repoMetaBlob := buck.Get([]byte(repo))
|
||||
if repoMetaBlob == nil {
|
||||
return zerr.ErrRepoMetaNotFound
|
||||
}
|
||||
|
||||
var repoMeta RepoMetadata
|
||||
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
repoMeta.LogoPath = logoPath
|
||||
|
||||
repoMetaBlob, err = json.Marshal(repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(repo), repoMetaBlob)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) GetMultipleRepoMeta(ctx context.Context, filter func(repoMeta RepoMetadata) bool,
|
||||
requestedPage PageInput,
|
||||
) ([]RepoMetadata, error) {
|
||||
var (
|
||||
foundRepos = make([]RepoMetadata, 0)
|
||||
paginator PageFinder
|
||||
)
|
||||
|
||||
paginator, err := NewBaseRepoPageFinder(requestedPage.Limit, requestedPage.Offset, requestedPage.SortBy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = bdw.db.View(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(RepoMetadataBucket))
|
||||
|
||||
cursor := buck.Cursor()
|
||||
|
||||
for repoName, repoMetaBlob := cursor.First(); repoName != nil; repoName, repoMetaBlob = cursor.Next() {
|
||||
if ok, err := repoIsUserAvailable(ctx, string(repoName)); !ok || err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
repoMeta := RepoMetadata{}
|
||||
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if filter(repoMeta) {
|
||||
paginator.Add(DetailedRepoMeta{
|
||||
RepoMeta: repoMeta,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
foundRepos = paginator.Page()
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return foundRepos, err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) IncrementManifestDownloads(manifestDigest string) error {
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(ManifestMetadataBucket))
|
||||
|
||||
manifestMetaBlob := buck.Get([]byte(manifestDigest))
|
||||
if manifestMetaBlob == nil {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
}
|
||||
|
||||
var manifestMeta ManifestMetadata
|
||||
|
||||
err := json.Unmarshal(manifestMetaBlob, &manifestMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifestMeta.DownloadCount++
|
||||
|
||||
manifestMetaBlob, err = json.Marshal(manifestMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(manifestDigest), manifestMetaBlob)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) AddManifestSignature(manifestDigest string, sigMeta SignatureMetadata) error {
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(ManifestMetadataBucket))
|
||||
|
||||
manifestMetaBlob := buck.Get([]byte(manifestDigest))
|
||||
if manifestMetaBlob == nil {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
}
|
||||
|
||||
var manifestMeta ManifestMetadata
|
||||
|
||||
err := json.Unmarshal(manifestMetaBlob, &manifestMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifestMeta.Signatures[sigMeta.SignatureType] = append(manifestMeta.Signatures[sigMeta.SignatureType],
|
||||
sigMeta.SignatureDigest)
|
||||
|
||||
manifestMetaBlob, err = json.Marshal(manifestMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(manifestDigest), manifestMetaBlob)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) DeleteSignature(manifestDigest string, sigMeta SignatureMetadata) error {
|
||||
err := bdw.db.Update(func(tx *bolt.Tx) error {
|
||||
buck := tx.Bucket([]byte(ManifestMetadataBucket))
|
||||
|
||||
manifestMetaBlob := buck.Get([]byte(manifestDigest))
|
||||
if manifestMetaBlob == nil {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
}
|
||||
|
||||
var manifestMeta ManifestMetadata
|
||||
|
||||
err := json.Unmarshal(manifestMetaBlob, &manifestMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sigType := sigMeta.SignatureType
|
||||
|
||||
for i, v := range manifestMeta.Signatures[sigType] {
|
||||
if v == sigMeta.SignatureDigest {
|
||||
signaturesCount := len(manifestMeta.Signatures[sigType])
|
||||
|
||||
if signaturesCount < 1 {
|
||||
manifestMeta.Signatures[sigType] = []string{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// put element to be deleted at the end of the array
|
||||
manifestMeta.Signatures[sigType][i] = manifestMeta.Signatures[sigType][signaturesCount-1]
|
||||
|
||||
// trim the last element
|
||||
manifestMeta.Signatures[sigType] = manifestMeta.Signatures[sigType][:signaturesCount-1]
|
||||
|
||||
manifestMetaBlob, err = json.Marshal(manifestMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return buck.Put([]byte(manifestDigest), manifestMetaBlob)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) SearchRepos(ctx context.Context, searchText string, requestedPage PageInput,
|
||||
) ([]RepoMetadata, map[string]ManifestMetadata, error) {
|
||||
var (
|
||||
foundRepos = make([]RepoMetadata, 0)
|
||||
foundManifestMetadataMap = make(map[string]ManifestMetadata)
|
||||
paginator PageFinder
|
||||
)
|
||||
|
||||
paginator, err := NewBaseRepoPageFinder(requestedPage.Limit, requestedPage.Offset, requestedPage.SortBy)
|
||||
if err != nil {
|
||||
return []RepoMetadata{}, map[string]ManifestMetadata{}, err
|
||||
}
|
||||
|
||||
err = bdw.db.View(func(tx *bolt.Tx) error {
|
||||
var (
|
||||
manifestMetadataMap = make(map[string]ManifestMetadata)
|
||||
repoBuck = tx.Bucket([]byte(RepoMetadataBucket))
|
||||
manifestBuck = tx.Bucket([]byte(ManifestMetadataBucket))
|
||||
)
|
||||
|
||||
cursor := repoBuck.Cursor()
|
||||
|
||||
for repoName, repoMetaBlob := cursor.First(); repoName != nil; repoName, repoMetaBlob = cursor.Next() {
|
||||
if ok, err := repoIsUserAvailable(ctx, string(repoName)); !ok || err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
repoMeta := RepoMetadata{}
|
||||
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if score := strings.Index(string(repoName), searchText); score != -1 {
|
||||
var (
|
||||
// sorting specific values that need to be calculated based on all manifests from the repo
|
||||
repoDownloads = 0
|
||||
repoLastUpdated time.Time
|
||||
)
|
||||
|
||||
for _, manifestDigest := range repoMeta.Tags {
|
||||
if _, manifestExists := manifestMetadataMap[manifestDigest]; manifestExists {
|
||||
continue
|
||||
}
|
||||
|
||||
manifestMetaBlob := manifestBuck.Get([]byte(manifestDigest))
|
||||
if manifestMetaBlob == nil {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
}
|
||||
|
||||
var manifestMeta ManifestMetadata
|
||||
|
||||
err := json.Unmarshal(manifestMetaBlob, &manifestMeta)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "repodb: error while unmarshaling manifest metadata for digest %s", manifestDigest)
|
||||
}
|
||||
|
||||
repoDownloads += manifestMeta.DownloadCount
|
||||
|
||||
imageLastUpdated, err := getImageLastUpdatedTimestamp(manifestMeta.ConfigBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "repodb: error while unmarshaling image config referenced by digest %s", manifestDigest)
|
||||
}
|
||||
|
||||
if repoLastUpdated.Before(imageLastUpdated) {
|
||||
repoLastUpdated = imageLastUpdated
|
||||
}
|
||||
|
||||
manifestMetadataMap[manifestDigest] = manifestMeta
|
||||
}
|
||||
|
||||
paginator.Add(DetailedRepoMeta{
|
||||
RepoMeta: repoMeta,
|
||||
Score: score,
|
||||
Downloads: repoDownloads,
|
||||
UpdateTime: repoLastUpdated,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
foundRepos = paginator.Page()
|
||||
|
||||
// keep just the manifestMeta we need
|
||||
for _, repoMeta := range foundRepos {
|
||||
for _, manifestDigest := range repoMeta.Tags {
|
||||
foundManifestMetadataMap[manifestDigest] = manifestMetadataMap[manifestDigest]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return foundRepos, foundManifestMetadataMap, err
|
||||
}
|
||||
|
||||
func getImageLastUpdatedTimestamp(configBlob []byte) (time.Time, error) {
|
||||
var (
|
||||
configContent ispec.Image
|
||||
timeStamp time.Time
|
||||
)
|
||||
|
||||
err := json.Unmarshal(configBlob, &configContent)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
if len(configContent.History) != 0 {
|
||||
timeStamp = *configContent.History[0].Created
|
||||
} else {
|
||||
timeStamp = time.Time{}
|
||||
}
|
||||
|
||||
return timeStamp, nil
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) SearchTags(ctx context.Context, searchText string, requestedPage PageInput,
|
||||
) ([]RepoMetadata, map[string]ManifestMetadata, error) {
|
||||
var (
|
||||
foundRepos = make([]RepoMetadata, 0)
|
||||
foundManifestMetadataMap = make(map[string]ManifestMetadata)
|
||||
|
||||
paginator PageFinder
|
||||
)
|
||||
|
||||
paginator, err := NewBaseImagePageFinder(requestedPage.Limit, requestedPage.Offset, requestedPage.SortBy)
|
||||
if err != nil {
|
||||
return []RepoMetadata{}, map[string]ManifestMetadata{}, err
|
||||
}
|
||||
|
||||
searchRepo, searchTag, err := getRepoTag(searchText)
|
||||
if err != nil {
|
||||
return []RepoMetadata{}, map[string]ManifestMetadata{},
|
||||
errors.Wrap(err, "repodb: error while parsing search text, invalid format")
|
||||
}
|
||||
|
||||
err = bdw.db.View(func(tx *bolt.Tx) error {
|
||||
var (
|
||||
manifestMetadataMap = make(map[string]ManifestMetadata)
|
||||
repoBuck = tx.Bucket([]byte(RepoMetadataBucket))
|
||||
manifestBuck = tx.Bucket([]byte(ManifestMetadataBucket))
|
||||
cursor = repoBuck.Cursor()
|
||||
)
|
||||
|
||||
repoName, repoMetaBlob := cursor.Seek([]byte(searchRepo))
|
||||
|
||||
for ; repoName != nil; repoName, repoMetaBlob = cursor.Next() {
|
||||
if ok, err := repoIsUserAvailable(ctx, string(repoName)); !ok || err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
repoMeta := RepoMetadata{}
|
||||
|
||||
err := json.Unmarshal(repoMetaBlob, &repoMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if string(repoName) == searchRepo {
|
||||
matchedTags := make(map[string]string)
|
||||
// take all manifestMetas
|
||||
for tag, manifestDigest := range repoMeta.Tags {
|
||||
if !strings.HasPrefix(tag, searchTag) {
|
||||
continue
|
||||
}
|
||||
|
||||
matchedTags[tag] = manifestDigest
|
||||
|
||||
// in case tags reference the same manifest we don't download from DB multiple times
|
||||
if manifestMeta, manifestExists := manifestMetadataMap[manifestDigest]; manifestExists {
|
||||
manifestMetadataMap[manifestDigest] = manifestMeta
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
manifestMetaBlob := manifestBuck.Get([]byte(manifestDigest))
|
||||
if manifestMetaBlob == nil {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
}
|
||||
|
||||
var manifestMeta ManifestMetadata
|
||||
|
||||
err := json.Unmarshal(manifestMetaBlob, &manifestMeta)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "repodb: error while unmashaling manifest metadata for digest %s", manifestDigest)
|
||||
}
|
||||
|
||||
manifestMetadataMap[manifestDigest] = manifestMeta
|
||||
}
|
||||
|
||||
repoMeta.Tags = matchedTags
|
||||
|
||||
paginator.Add(DetailedRepoMeta{
|
||||
RepoMeta: repoMeta,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
foundRepos = paginator.Page()
|
||||
|
||||
// keep just the manifestMeta we need
|
||||
for _, repoMeta := range foundRepos {
|
||||
for _, manifestDigest := range repoMeta.Tags {
|
||||
foundManifestMetadataMap[manifestDigest] = manifestMetadataMap[manifestDigest]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return foundRepos, foundManifestMetadataMap, err
|
||||
}
|
||||
|
||||
func getRepoTag(searchText string) (string, string, error) {
|
||||
const repoTagCount = 2
|
||||
|
||||
splitSlice := strings.Split(searchText, ":")
|
||||
|
||||
if len(splitSlice) != repoTagCount {
|
||||
return "", "", errors.New("invalid format for tag search, not following repo:tag")
|
||||
}
|
||||
|
||||
repo := strings.TrimSpace(splitSlice[0])
|
||||
tag := strings.TrimSpace(splitSlice[1])
|
||||
|
||||
return repo, tag, nil
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) SearchDigests(ctx context.Context, searchText string, requestedPage PageInput,
|
||||
) ([]RepoMetadata, map[string]ManifestMetadata, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) SearchLayers(ctx context.Context, searchText string, requestedPage PageInput,
|
||||
) ([]RepoMetadata, map[string]ManifestMetadata, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) SearchForAscendantImages(ctx context.Context, searchText string, requestedPage PageInput,
|
||||
) ([]RepoMetadata, map[string]ManifestMetadata, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (bdw BoltDBWrapper) SearchForDescendantImages(ctx context.Context, searchText string, requestedPage PageInput,
|
||||
) ([]RepoMetadata, map[string]ManifestMetadata, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
type BoltDBParameters struct {
|
||||
RootDir string
|
||||
}
|
||||
|
||||
type BoltDBWrapperFactory struct{}
|
||||
|
||||
func (bwf BoltDBWrapperFactory) Create(parameters interface{}) (RepoDB, error) {
|
||||
properParameters, ok := parameters.(BoltDBParameters)
|
||||
if !ok {
|
||||
panic("Failed type assertion")
|
||||
}
|
||||
|
||||
return NewBotDBWrapper(properParameters)
|
||||
}
|
||||
|
||||
func repoIsUserAvailable(ctx context.Context, repoName string) (bool, error) {
|
||||
authzCtxKey := localCtx.GetContextKey()
|
||||
|
||||
if authCtx := ctx.Value(authzCtxKey); authCtx != nil {
|
||||
acCtx, ok := authCtx.(localCtx.AccessControlContext)
|
||||
if !ok {
|
||||
err := ErrBadCtxFormat
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
if acCtx.IsAdmin || matchesRepo(acCtx.GlobPatterns, repoName) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// returns either a user has or not rights on 'repository'.
|
||||
func matchesRepo(globPatterns map[string]bool, repository string) bool {
|
||||
var longestMatchedPattern string
|
||||
|
||||
// because of the longest path matching rule, we need to check all patterns from config
|
||||
for pattern := range globPatterns {
|
||||
matched, err := glob.Match(pattern, repository)
|
||||
if err == nil {
|
||||
if matched && len(pattern) > len(longestMatchedPattern) {
|
||||
longestMatchedPattern = pattern
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
allowed := globPatterns[longestMatchedPattern]
|
||||
|
||||
return allowed
|
||||
}
|
1114
pkg/storage/repodb/boltdb_wrapper_test.go
Normal file
1114
pkg/storage/repodb/boltdb_wrapper_test.go
Normal file
File diff suppressed because it is too large
Load diff
61
pkg/storage/repodb/common.go
Normal file
61
pkg/storage/repodb/common.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
package repodb
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type DetailedRepoMeta struct {
|
||||
RepoMeta RepoMetadata
|
||||
Score int
|
||||
Downloads int
|
||||
UpdateTime time.Time
|
||||
}
|
||||
|
||||
func SortFunctions() map[SortCriteria]func(pageBuffer []DetailedRepoMeta) func(i, j int) bool {
|
||||
return map[SortCriteria]func(pageBuffer []DetailedRepoMeta) func(i, j int) bool{
|
||||
AlphabeticAsc: SortByAlphabeticAsc,
|
||||
AlphabeticDsc: SortByAlphabeticDsc,
|
||||
Relevance: SortByRelevance,
|
||||
UpdateTime: SortByUpdateTime,
|
||||
Stars: SortByStars,
|
||||
Downloads: SortByDownloads,
|
||||
}
|
||||
}
|
||||
|
||||
func SortByAlphabeticAsc(pageBuffer []DetailedRepoMeta) func(i, j int) bool {
|
||||
return func(i, j int) bool {
|
||||
return pageBuffer[i].RepoMeta.Name < pageBuffer[j].RepoMeta.Name
|
||||
}
|
||||
}
|
||||
|
||||
func SortByAlphabeticDsc(pageBuffer []DetailedRepoMeta) func(i, j int) bool {
|
||||
return func(i, j int) bool {
|
||||
return pageBuffer[i].RepoMeta.Name > pageBuffer[j].RepoMeta.Name
|
||||
}
|
||||
}
|
||||
|
||||
func SortByRelevance(pageBuffer []DetailedRepoMeta) func(i, j int) bool {
|
||||
return func(i, j int) bool {
|
||||
return pageBuffer[i].Score < pageBuffer[j].Score
|
||||
}
|
||||
}
|
||||
|
||||
// SortByUpdateTime sorting descending by time.
|
||||
func SortByUpdateTime(pageBuffer []DetailedRepoMeta) func(i, j int) bool {
|
||||
return func(i, j int) bool {
|
||||
return pageBuffer[i].UpdateTime.After(pageBuffer[j].UpdateTime)
|
||||
}
|
||||
}
|
||||
|
||||
func SortByStars(pageBuffer []DetailedRepoMeta) func(i, j int) bool {
|
||||
return func(i, j int) bool {
|
||||
return pageBuffer[i].RepoMeta.Stars > pageBuffer[j].RepoMeta.Stars
|
||||
}
|
||||
}
|
||||
|
||||
// SortByDownloads returns a comparison function for descendant sorting by downloads.
|
||||
func SortByDownloads(pageBuffer []DetailedRepoMeta) func(i, j int) bool {
|
||||
return func(i, j int) bool {
|
||||
return pageBuffer[i].Downloads > pageBuffer[j].Downloads
|
||||
}
|
||||
}
|
239
pkg/storage/repodb/pagination.go
Normal file
239
pkg/storage/repodb/pagination.go
Normal file
|
@ -0,0 +1,239 @@
|
|||
package repodb
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrLimitIsNegative = errors.New("pageturner: limit has negative value")
|
||||
ErrOffsetIsNegative = errors.New("pageturner: offset has negative value")
|
||||
ErrSortCriteriaNotSupported = errors.New("pageturner: the sort criteria is not supported")
|
||||
)
|
||||
|
||||
// PageFinder permits keeping a pool of objects using Add
|
||||
// and returning a specific page.
|
||||
type PageFinder interface {
|
||||
// Add
|
||||
Add(detailedRepoMeta DetailedRepoMeta)
|
||||
Page() []RepoMetadata
|
||||
Reset()
|
||||
}
|
||||
|
||||
// RepoPageFinder implements PageFinder. It manages RepoMeta objects and calculates the page
|
||||
// using the given limit, offset and sortBy option.
|
||||
type RepoPageFinder struct {
|
||||
limit int
|
||||
offset int
|
||||
sortBy SortCriteria
|
||||
pageBuffer []DetailedRepoMeta
|
||||
}
|
||||
|
||||
func NewBaseRepoPageFinder(limit, offset int, sortBy SortCriteria) (*RepoPageFinder, error) {
|
||||
if sortBy == "" {
|
||||
sortBy = AlphabeticAsc
|
||||
}
|
||||
|
||||
if limit < 0 {
|
||||
return nil, ErrLimitIsNegative
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
return nil, ErrLimitIsNegative
|
||||
}
|
||||
|
||||
if _, found := SortFunctions()[sortBy]; !found {
|
||||
return nil, errors.Wrapf(ErrSortCriteriaNotSupported, "sorting repos by '%s' is not supported", sortBy)
|
||||
}
|
||||
|
||||
return &RepoPageFinder{
|
||||
limit: limit,
|
||||
offset: offset,
|
||||
sortBy: sortBy,
|
||||
pageBuffer: make([]DetailedRepoMeta, 0, limit),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bpt *RepoPageFinder) Reset() {
|
||||
bpt.pageBuffer = []DetailedRepoMeta{}
|
||||
}
|
||||
|
||||
func (bpt *RepoPageFinder) Add(namedRepoMeta DetailedRepoMeta) {
|
||||
bpt.pageBuffer = append(bpt.pageBuffer, namedRepoMeta)
|
||||
}
|
||||
|
||||
func (bpt *RepoPageFinder) Page() []RepoMetadata {
|
||||
if len(bpt.pageBuffer) == 0 {
|
||||
return []RepoMetadata{}
|
||||
}
|
||||
|
||||
sort.Slice(bpt.pageBuffer, SortFunctions()[bpt.sortBy](bpt.pageBuffer))
|
||||
|
||||
start := bpt.offset
|
||||
end := bpt.offset + bpt.limit
|
||||
|
||||
// we'll return an empty array when the offset is greater than the number of elements
|
||||
if start >= len(bpt.pageBuffer) {
|
||||
start = len(bpt.pageBuffer)
|
||||
end = start
|
||||
}
|
||||
|
||||
detailedReposPage := bpt.pageBuffer[start:end]
|
||||
|
||||
if start == 0 && end == 0 {
|
||||
detailedReposPage = bpt.pageBuffer
|
||||
}
|
||||
|
||||
repos := make([]RepoMetadata, 0, len(detailedReposPage))
|
||||
|
||||
for _, drm := range detailedReposPage {
|
||||
repos = append(repos, drm.RepoMeta)
|
||||
}
|
||||
|
||||
return repos
|
||||
}
|
||||
|
||||
type ImagePageFinder struct {
|
||||
limit int
|
||||
offset int
|
||||
sortBy SortCriteria
|
||||
pageBuffer []DetailedRepoMeta
|
||||
}
|
||||
|
||||
func NewBaseImagePageFinder(limit, offset int, sortBy SortCriteria) (*ImagePageFinder, error) {
|
||||
if sortBy == "" {
|
||||
sortBy = AlphabeticAsc
|
||||
}
|
||||
|
||||
if limit < 0 {
|
||||
return nil, ErrLimitIsNegative
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
return nil, ErrLimitIsNegative
|
||||
}
|
||||
|
||||
if _, found := SortFunctions()[sortBy]; !found {
|
||||
return nil, errors.Wrapf(ErrSortCriteriaNotSupported, "sorting repos by '%s' is not supported", sortBy)
|
||||
}
|
||||
|
||||
return &ImagePageFinder{
|
||||
limit: limit,
|
||||
offset: offset,
|
||||
sortBy: sortBy,
|
||||
pageBuffer: make([]DetailedRepoMeta, 0, limit),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bpt *ImagePageFinder) Reset() {
|
||||
bpt.pageBuffer = []DetailedRepoMeta{}
|
||||
}
|
||||
|
||||
func (bpt *ImagePageFinder) Add(namedRepoMeta DetailedRepoMeta) {
|
||||
bpt.pageBuffer = append(bpt.pageBuffer, namedRepoMeta)
|
||||
}
|
||||
|
||||
func (bpt *ImagePageFinder) Page() []RepoMetadata {
|
||||
if len(bpt.pageBuffer) == 0 {
|
||||
return []RepoMetadata{}
|
||||
}
|
||||
|
||||
sort.Slice(bpt.pageBuffer, SortFunctions()[bpt.sortBy](bpt.pageBuffer))
|
||||
|
||||
repoStartIndex := 0
|
||||
tagStartIndex := 0
|
||||
remainingOffset := bpt.offset
|
||||
remainingLimit := bpt.limit
|
||||
|
||||
// bring cursor to position
|
||||
for _, drm := range bpt.pageBuffer {
|
||||
if remainingOffset < len(drm.RepoMeta.Tags) {
|
||||
tagStartIndex = remainingOffset
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
remainingOffset -= len(drm.RepoMeta.Tags)
|
||||
repoStartIndex++
|
||||
}
|
||||
|
||||
// offset is larger than the number of tags
|
||||
if repoStartIndex >= len(bpt.pageBuffer) {
|
||||
return []RepoMetadata{}
|
||||
}
|
||||
|
||||
repos := make([]RepoMetadata, 0)
|
||||
|
||||
// finish any partial repo tags (when tagStartIndex is not 0)
|
||||
|
||||
partialTags := map[string]string{}
|
||||
repoMeta := bpt.pageBuffer[repoStartIndex].RepoMeta
|
||||
|
||||
keys := make([]string, 0, len(repoMeta.Tags))
|
||||
for k := range repoMeta.Tags {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
for i := tagStartIndex; i < len(keys); i++ {
|
||||
tag := keys[i]
|
||||
|
||||
partialTags[tag] = repoMeta.Tags[tag]
|
||||
remainingLimit--
|
||||
|
||||
if remainingLimit == 0 {
|
||||
repoMeta.Tags = partialTags
|
||||
repos = append(repos, repoMeta)
|
||||
|
||||
return repos
|
||||
}
|
||||
}
|
||||
|
||||
repoMeta.Tags = partialTags
|
||||
repos = append(repos, repoMeta)
|
||||
repoStartIndex++
|
||||
|
||||
// continue with the remaining repos
|
||||
for i := repoStartIndex; i < len(bpt.pageBuffer); i++ {
|
||||
repoMeta := bpt.pageBuffer[i].RepoMeta
|
||||
|
||||
if len(repoMeta.Tags) > remainingLimit {
|
||||
partialTags := map[string]string{}
|
||||
|
||||
keys := make([]string, 0, len(repoMeta.Tags))
|
||||
for k := range repoMeta.Tags {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, tag := range keys {
|
||||
partialTags[tag] = repoMeta.Tags[tag]
|
||||
remainingLimit--
|
||||
|
||||
if remainingLimit == 0 {
|
||||
repoMeta.Tags = partialTags
|
||||
repos = append(repos, repoMeta)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return repos
|
||||
}
|
||||
|
||||
// add the whole repo
|
||||
repos = append(repos, repoMeta)
|
||||
remainingLimit -= len(repoMeta.Tags)
|
||||
|
||||
if remainingLimit == 0 {
|
||||
return repos
|
||||
}
|
||||
}
|
||||
|
||||
// we arrive here when the limit is bigger than the number of tags
|
||||
|
||||
return repos
|
||||
}
|
140
pkg/storage/repodb/pagination_test.go
Normal file
140
pkg/storage/repodb/pagination_test.go
Normal file
|
@ -0,0 +1,140 @@
|
|||
package repodb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
"zotregistry.io/zot/pkg/storage/repodb"
|
||||
)
|
||||
|
||||
func TestPagination(t *testing.T) {
|
||||
Convey("Repo Pagination", t, func() {
|
||||
Convey("reset", func() {
|
||||
paginator, err := repodb.NewBaseRepoPageFinder(1, 0, repodb.AlphabeticAsc)
|
||||
So(err, ShouldBeNil)
|
||||
So(paginator, ShouldNotBeNil)
|
||||
|
||||
paginator.Add(repodb.DetailedRepoMeta{})
|
||||
paginator.Add(repodb.DetailedRepoMeta{})
|
||||
paginator.Add(repodb.DetailedRepoMeta{})
|
||||
|
||||
paginator.Reset()
|
||||
|
||||
So(paginator.Page(), ShouldBeEmpty)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Image Pagination", t, func() {
|
||||
Convey("create new paginator errors", func() {
|
||||
paginator, err := repodb.NewBaseImagePageFinder(-1, 10, repodb.AlphabeticAsc)
|
||||
So(paginator, ShouldBeNil)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
paginator, err = repodb.NewBaseImagePageFinder(2, -1, repodb.AlphabeticAsc)
|
||||
So(paginator, ShouldBeNil)
|
||||
So(err, ShouldNotBeNil)
|
||||
|
||||
paginator, err = repodb.NewBaseImagePageFinder(2, 1, "wrong sorting criteria")
|
||||
So(paginator, ShouldBeNil)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Reset", func() {
|
||||
paginator, err := repodb.NewBaseImagePageFinder(1, 0, repodb.AlphabeticAsc)
|
||||
So(err, ShouldBeNil)
|
||||
So(paginator, ShouldNotBeNil)
|
||||
|
||||
paginator.Add(repodb.DetailedRepoMeta{})
|
||||
paginator.Add(repodb.DetailedRepoMeta{})
|
||||
paginator.Add(repodb.DetailedRepoMeta{})
|
||||
|
||||
paginator.Reset()
|
||||
|
||||
So(paginator.Page(), ShouldBeEmpty)
|
||||
})
|
||||
|
||||
Convey("Page", func() {
|
||||
Convey("limit < len(tags)", func() {
|
||||
paginator, err := repodb.NewBaseImagePageFinder(5, 2, repodb.AlphabeticAsc)
|
||||
So(err, ShouldBeNil)
|
||||
So(paginator, ShouldNotBeNil)
|
||||
|
||||
paginator.Add(repodb.DetailedRepoMeta{
|
||||
RepoMeta: repodb.RepoMetadata{
|
||||
Name: "repo1",
|
||||
Tags: map[string]string{
|
||||
"tag1": "dig1",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
paginator.Add(repodb.DetailedRepoMeta{
|
||||
RepoMeta: repodb.RepoMetadata{
|
||||
Name: "repo2",
|
||||
Tags: map[string]string{
|
||||
"Tag1": "dig1",
|
||||
"Tag2": "dig2",
|
||||
"Tag3": "dig3",
|
||||
"Tag4": "dig4",
|
||||
},
|
||||
},
|
||||
})
|
||||
paginator.Add(repodb.DetailedRepoMeta{
|
||||
RepoMeta: repodb.RepoMetadata{
|
||||
Name: "repo3",
|
||||
Tags: map[string]string{
|
||||
"Tag11": "dig11",
|
||||
"Tag12": "dig12",
|
||||
"Tag13": "dig13",
|
||||
"Tag14": "dig14",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
result := paginator.Page()
|
||||
So(result[0].Tags, ShouldContainKey, "Tag2")
|
||||
So(result[0].Tags, ShouldContainKey, "Tag3")
|
||||
So(result[0].Tags, ShouldContainKey, "Tag4")
|
||||
So(result[1].Tags, ShouldContainKey, "Tag11")
|
||||
So(result[1].Tags, ShouldContainKey, "Tag12")
|
||||
})
|
||||
|
||||
Convey("limit > len(tags)", func() {
|
||||
paginator, err := repodb.NewBaseImagePageFinder(3, 0, repodb.AlphabeticAsc)
|
||||
So(err, ShouldBeNil)
|
||||
So(paginator, ShouldNotBeNil)
|
||||
|
||||
paginator.Add(repodb.DetailedRepoMeta{
|
||||
RepoMeta: repodb.RepoMetadata{
|
||||
Name: "repo1",
|
||||
Tags: map[string]string{
|
||||
"tag1": "dig1",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
paginator.Add(repodb.DetailedRepoMeta{
|
||||
RepoMeta: repodb.RepoMetadata{
|
||||
Name: "repo2",
|
||||
Tags: map[string]string{
|
||||
"Tag1": "dig1",
|
||||
},
|
||||
},
|
||||
})
|
||||
paginator.Add(repodb.DetailedRepoMeta{
|
||||
RepoMeta: repodb.RepoMetadata{
|
||||
Name: "repo3",
|
||||
Tags: map[string]string{
|
||||
"Tag11": "dig11",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
result := paginator.Page()
|
||||
So(result[0].Tags, ShouldContainKey, "tag1")
|
||||
So(result[1].Tags, ShouldContainKey, "Tag1")
|
||||
So(result[2].Tags, ShouldContainKey, "Tag11")
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
122
pkg/storage/repodb/repodb.go
Normal file
122
pkg/storage/repodb/repodb.go
Normal file
|
@ -0,0 +1,122 @@
|
|||
package repodb
|
||||
|
||||
import "context"
|
||||
|
||||
// MetadataDB.
|
||||
const (
|
||||
ManifestMetadataBucket = "ManifestMetadata"
|
||||
UserMetadataBucket = "UserMeta"
|
||||
RepoMetadataBucket = "RepoMetadata"
|
||||
)
|
||||
|
||||
type RepoDB interface {
|
||||
// SetRepoDescription sets the repo description
|
||||
SetRepoDescription(repo, description string) error
|
||||
|
||||
// IncrementRepoStars adds 1 to the star count of an image
|
||||
IncrementRepoStars(repo string) error
|
||||
|
||||
// IncrementRepoStars subtracts 1 from the star count of an image
|
||||
DecrementRepoStars(repo string) error
|
||||
|
||||
// GetRepoStars returns the total number of stars a repo has
|
||||
GetRepoStars(repo string) (int, error)
|
||||
|
||||
// SetRepoLogo sets the path of the repo logo image
|
||||
SetRepoLogo(repo string, logoPath string) error
|
||||
|
||||
// SetRepoTag sets the tag of a manifest in the tag list of a repo
|
||||
SetRepoTag(repo string, tag string, manifestDigest string) error
|
||||
|
||||
// DeleteRepoTag delets the tag from the tag list of a repo
|
||||
DeleteRepoTag(repo string, tag string) error
|
||||
|
||||
// GetRepoMeta returns RepoMetadata of a repo from the database
|
||||
GetRepoMeta(repo string) (RepoMetadata, error)
|
||||
|
||||
// GetMultipleRepoMeta returns information about all repositories as map[string]RepoMetadata filtered by the filter
|
||||
// function
|
||||
GetMultipleRepoMeta(ctx context.Context, filter func(repoMeta RepoMetadata) bool, requestedPage PageInput) (
|
||||
[]RepoMetadata, error)
|
||||
|
||||
// GetManifestMeta returns ManifestMetadata for a given manifest from the database
|
||||
GetManifestMeta(manifestDigest string) (ManifestMetadata, error)
|
||||
|
||||
// GetManifestMeta sets ManifestMetadata for a given manifest in the database
|
||||
SetManifestMeta(manifestDigest string, mm ManifestMetadata) error
|
||||
|
||||
// IncrementManifestDownloads adds 1 to the download count of a manifest
|
||||
IncrementManifestDownloads(manifestDigest string) error
|
||||
|
||||
// AddManifestSignature adds signature metadata to a given manifest in the database
|
||||
AddManifestSignature(manifestDigest string, sm SignatureMetadata) error
|
||||
|
||||
// DeleteSignature delets signature metadata to a given manifest from the database
|
||||
DeleteSignature(manifestDigest string, sm SignatureMetadata) error
|
||||
|
||||
// SearchRepos searches for repos given a search string
|
||||
SearchRepos(ctx context.Context, searchText string, requestedPage PageInput) (
|
||||
[]RepoMetadata, map[string]ManifestMetadata, error)
|
||||
|
||||
// SearchTags searches for images(repo:tag) given a search string
|
||||
SearchTags(ctx context.Context, searchText string, requestedPage PageInput) (
|
||||
[]RepoMetadata, map[string]ManifestMetadata, error)
|
||||
|
||||
// SearchDigests searches for digests given a search string
|
||||
SearchDigests(ctx context.Context, searchText string, requestedPage PageInput) (
|
||||
[]RepoMetadata, map[string]ManifestMetadata, error)
|
||||
|
||||
// SearchLayers searches for layers given a search string
|
||||
SearchLayers(ctx context.Context, searchText string, requestedPage PageInput) (
|
||||
[]RepoMetadata, map[string]ManifestMetadata, error)
|
||||
|
||||
// SearchForAscendantImages searches for ascendant images given a search string
|
||||
SearchForAscendantImages(ctx context.Context, searchText string, requestedPage PageInput) (
|
||||
[]RepoMetadata, map[string]ManifestMetadata, error)
|
||||
|
||||
// SearchForDescendantImages searches for descendant images given a search string
|
||||
SearchForDescendantImages(ctx context.Context, searchText string, requestedPage PageInput) (
|
||||
[]RepoMetadata, map[string]ManifestMetadata, error)
|
||||
}
|
||||
|
||||
type ManifestMetadata struct {
|
||||
ManifestBlob []byte
|
||||
ConfigBlob []byte
|
||||
DownloadCount int
|
||||
Signatures map[string][]string
|
||||
Dependencies []string
|
||||
Dependants []string
|
||||
BlobsSize int
|
||||
BlobCount int
|
||||
}
|
||||
|
||||
type RepoMetadata struct {
|
||||
Name string
|
||||
Tags map[string]string
|
||||
Signatures []string
|
||||
Stars int
|
||||
Description string
|
||||
LogoPath string
|
||||
}
|
||||
|
||||
type SignatureMetadata struct {
|
||||
SignatureType string
|
||||
SignatureDigest string
|
||||
}
|
||||
|
||||
type SortCriteria string
|
||||
|
||||
const (
|
||||
Relevance = SortCriteria("RELEVANCE")
|
||||
UpdateTime = SortCriteria("UPDATE_TIME")
|
||||
AlphabeticAsc = SortCriteria("ALPHABETIC_ASC")
|
||||
AlphabeticDsc = SortCriteria("ALPHABETIC_DSC")
|
||||
Stars = SortCriteria("STARS")
|
||||
Downloads = SortCriteria("DOWNLOADS")
|
||||
)
|
||||
|
||||
type PageInput struct {
|
||||
Limit int
|
||||
Offset int
|
||||
SortBy SortCriteria
|
||||
}
|
25
pkg/storage/repodb/repodbfactory/repodb_factory.go
Normal file
25
pkg/storage/repodb/repodbfactory/repodb_factory.go
Normal file
|
@ -0,0 +1,25 @@
|
|||
package repodbfactory
|
||||
|
||||
import (
|
||||
"zotregistry.io/zot/errors"
|
||||
"zotregistry.io/zot/pkg/storage/repodb"
|
||||
)
|
||||
|
||||
type RepoDBDriverFactory interface {
|
||||
Create(parameters interface{}) (repodb.RepoDB, error)
|
||||
}
|
||||
|
||||
func repoDBFactories() map[string]RepoDBDriverFactory {
|
||||
return map[string]RepoDBDriverFactory{
|
||||
"boltdb": repodb.BoltDBWrapperFactory{},
|
||||
}
|
||||
}
|
||||
|
||||
func Create(name string, parameters interface{}) (repodb.RepoDB, error) {
|
||||
driverFactory, ok := repoDBFactories()[name]
|
||||
if !ok {
|
||||
return nil, errors.ErrBadConfig
|
||||
}
|
||||
|
||||
return driverFactory.Create(parameters)
|
||||
}
|
|
@ -7,11 +7,13 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
|
@ -20,6 +22,9 @@ import (
|
|||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/umoci"
|
||||
"github.com/phayes/freeport"
|
||||
"github.com/sigstore/cosign/cmd/cosign/cli/generate"
|
||||
"github.com/sigstore/cosign/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/cmd/cosign/cli/sign"
|
||||
"gopkg.in/resty.v1"
|
||||
)
|
||||
|
||||
|
@ -368,7 +373,7 @@ func UploadImage(img Image, baseURL, repo string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if ErrStatusCode(resp.StatusCode()) != http.StatusAccepted && ErrStatusCode(resp.StatusCode()) == -1 {
|
||||
if ErrStatusCode(resp.StatusCode()) != http.StatusAccepted || ErrStatusCode(resp.StatusCode()) == -1 {
|
||||
return ErrPostBlob
|
||||
}
|
||||
|
||||
|
@ -385,7 +390,7 @@ func UploadImage(img Image, baseURL, repo string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if ErrStatusCode(resp.StatusCode()) != http.StatusCreated && ErrStatusCode(resp.StatusCode()) == -1 {
|
||||
if ErrStatusCode(resp.StatusCode()) != http.StatusCreated || ErrStatusCode(resp.StatusCode()) == -1 {
|
||||
return ErrPostBlob
|
||||
}
|
||||
|
||||
|
@ -402,3 +407,164 @@ func UploadImage(img Image, baseURL, repo string) error {
|
|||
|
||||
return err
|
||||
}
|
||||
|
||||
func UploadImageWithBasicAuth(img Image, baseURL, repo, user, password string) error {
|
||||
for _, blob := range img.Layers {
|
||||
resp, err := resty.R().
|
||||
SetBasicAuth(user, password).
|
||||
Post(baseURL + "/v2/" + repo + "/blobs/uploads/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode() != http.StatusAccepted {
|
||||
return ErrPostBlob
|
||||
}
|
||||
|
||||
loc := resp.Header().Get("Location")
|
||||
|
||||
digest := godigest.FromBytes(blob).String()
|
||||
|
||||
resp, err = resty.R().
|
||||
SetBasicAuth(user, password).
|
||||
SetHeader("Content-Length", fmt.Sprintf("%d", len(blob))).
|
||||
SetHeader("Content-Type", "application/octet-stream").
|
||||
SetQueryParam("digest", digest).
|
||||
SetBody(blob).
|
||||
Put(baseURL + loc)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode() != http.StatusCreated {
|
||||
return ErrPutBlob
|
||||
}
|
||||
}
|
||||
// upload config
|
||||
cblob, err := json.Marshal(img.Config)
|
||||
if err = Error(err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cdigest := godigest.FromBytes(cblob)
|
||||
|
||||
resp, err := resty.R().
|
||||
SetBasicAuth(user, password).
|
||||
Post(baseURL + "/v2/" + repo + "/blobs/uploads/")
|
||||
if err = Error(err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ErrStatusCode(resp.StatusCode()) != http.StatusAccepted || ErrStatusCode(resp.StatusCode()) == -1 {
|
||||
return ErrPostBlob
|
||||
}
|
||||
|
||||
loc := Location(baseURL, resp)
|
||||
|
||||
// uploading blob should get 201
|
||||
resp, err = resty.R().
|
||||
SetBasicAuth(user, password).
|
||||
SetHeader("Content-Length", fmt.Sprintf("%d", len(cblob))).
|
||||
SetHeader("Content-Type", "application/octet-stream").
|
||||
SetQueryParam("digest", cdigest.String()).
|
||||
SetBody(cblob).
|
||||
Put(loc)
|
||||
if err = Error(err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ErrStatusCode(resp.StatusCode()) != http.StatusCreated || ErrStatusCode(resp.StatusCode()) == -1 {
|
||||
return ErrPostBlob
|
||||
}
|
||||
|
||||
// put manifest
|
||||
manifestBlob, err := json.Marshal(img.Manifest)
|
||||
if err = Error(err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = resty.R().
|
||||
SetBasicAuth(user, password).
|
||||
SetHeader("Content-type", "application/vnd.oci.image.manifest.v1+json").
|
||||
SetBody(manifestBlob).
|
||||
Put(baseURL + "/v2/" + repo + "/manifests/" + img.Tag)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func SignImageUsingCosign(repoTag, port string) error {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() { _ = os.Chdir(cwd) }()
|
||||
|
||||
tdir, err := ioutil.TempDir("", "cosign")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer os.RemoveAll(tdir)
|
||||
|
||||
_ = os.Chdir(tdir)
|
||||
|
||||
// generate a keypair
|
||||
os.Setenv("COSIGN_PASSWORD", "")
|
||||
|
||||
err = generate.GenerateKeyPairCmd(context.TODO(), "", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageURL := fmt.Sprintf("localhost:%s/%s", port, repoTag)
|
||||
|
||||
// sign the image
|
||||
return sign.SignCmd(&options.RootOptions{Verbose: true, Timeout: 1 * time.Minute},
|
||||
options.KeyOpts{KeyRef: path.Join(tdir, "cosign.key"), PassFunc: generate.GetPass},
|
||||
options.RegistryOptions{AllowInsecure: true},
|
||||
map[string]interface{}{"tag": "1.0"},
|
||||
[]string{imageURL},
|
||||
"", "", true, "", "", "", false, false, "", true)
|
||||
}
|
||||
|
||||
func SignImageUsingNotary(repoTag, port string) error {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() { _ = os.Chdir(cwd) }()
|
||||
|
||||
tdir, err := ioutil.TempDir("", "notation")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer os.RemoveAll(tdir)
|
||||
|
||||
_ = os.Chdir(tdir)
|
||||
|
||||
_, err = exec.LookPath("notation")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
os.Setenv("XDG_CONFIG_HOME", tdir)
|
||||
|
||||
// generate a keypair
|
||||
cmd := exec.Command("notation", "cert", "generate-test", "--trust", "notation-sign-test")
|
||||
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// sign the image
|
||||
image := fmt.Sprintf("localhost:%s/%s", port, repoTag)
|
||||
|
||||
cmd = exec.Command("notation", "sign", "--key", "notation-sign-test", "--plain-http", image)
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ package test_test
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
@ -13,6 +14,7 @@ import (
|
|||
"github.com/opencontainers/go-digest"
|
||||
ispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"zotregistry.io/zot/pkg/api"
|
||||
"zotregistry.io/zot/pkg/api/config"
|
||||
"zotregistry.io/zot/pkg/test"
|
||||
|
@ -235,6 +237,78 @@ func TestUploadImage(t *testing.T) {
|
|||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("Upload image with authentification", t, func() {
|
||||
tempDir := t.TempDir()
|
||||
conf := config.New()
|
||||
port := test.GetFreePort()
|
||||
baseURL := test.GetBaseURL(port)
|
||||
|
||||
user1 := "test"
|
||||
password1 := "test"
|
||||
testString1 := getCredString(user1, password1)
|
||||
htpasswdPath := test.MakeHtpasswdFileFromString(testString1)
|
||||
defer os.Remove(htpasswdPath)
|
||||
conf.HTTP.Auth = &config.AuthConfig{
|
||||
HTPasswd: config.AuthHTPasswd{
|
||||
Path: htpasswdPath,
|
||||
},
|
||||
}
|
||||
|
||||
conf.HTTP.Port = port
|
||||
|
||||
conf.AccessControl = &config.AccessControlConfig{
|
||||
Repositories: config.Repositories{
|
||||
"repo": config.PolicyGroup{
|
||||
Policies: []config.Policy{
|
||||
{
|
||||
Users: []string{user1},
|
||||
Actions: []string{"read", "create"},
|
||||
},
|
||||
},
|
||||
DefaultPolicy: []string{},
|
||||
},
|
||||
"inaccessibleRepo": config.PolicyGroup{
|
||||
Policies: []config.Policy{
|
||||
{
|
||||
Users: []string{user1},
|
||||
Actions: []string{"create"},
|
||||
},
|
||||
},
|
||||
DefaultPolicy: []string{},
|
||||
},
|
||||
},
|
||||
AdminPolicy: config.Policy{
|
||||
Users: []string{},
|
||||
Actions: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
ctlr := api.NewController(conf)
|
||||
|
||||
ctlr.Config.Storage.RootDirectory = tempDir
|
||||
|
||||
go startServer(ctlr)
|
||||
defer stopServer(ctlr)
|
||||
test.WaitTillServerReady(baseURL)
|
||||
|
||||
Convey("Request fail while pushing layer", func() {
|
||||
err := test.UploadImageWithBasicAuth(test.Image{Layers: [][]byte{{1, 2, 3}}}, "badURL", "", "", "")
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
Convey("Request status is not StatusOk while pushing layer", func() {
|
||||
err := test.UploadImageWithBasicAuth(test.Image{Layers: [][]byte{{1, 2, 3}}}, baseURL, "repo", "", "")
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
Convey("Request fail while pushing config", func() {
|
||||
err := test.UploadImageWithBasicAuth(test.Image{}, "badURL", "", "", "")
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
Convey("Request status is not StatusOk while pushing config", func() {
|
||||
err := test.UploadImageWithBasicAuth(test.Image{}, baseURL, "repo", "", "")
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Blob upload wrong response status code", t, func() {
|
||||
port := test.GetFreePort()
|
||||
baseURL := test.GetBaseURL(port)
|
||||
|
@ -329,6 +403,17 @@ func TestUploadImage(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func getCredString(username, password string) string {
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(password), 10)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
usernameAndHash := fmt.Sprintf("%s:%s", username, string(hash))
|
||||
|
||||
return usernameAndHash
|
||||
}
|
||||
|
||||
func TestInjectUploadImage(t *testing.T) {
|
||||
Convey("Inject failures for unreachable lines", t, func() {
|
||||
port := test.GetFreePort()
|
||||
|
@ -393,6 +478,81 @@ func TestInjectUploadImage(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestInjectUploadImageWithBasicAuth(t *testing.T) {
|
||||
Convey("Inject failures for unreachable lines", t, func() {
|
||||
port := test.GetFreePort()
|
||||
baseURL := test.GetBaseURL(port)
|
||||
|
||||
tempDir := t.TempDir()
|
||||
conf := config.New()
|
||||
conf.HTTP.Port = port
|
||||
conf.Storage.RootDirectory = tempDir
|
||||
|
||||
user := "user"
|
||||
password := "password"
|
||||
testString := getCredString(user, password)
|
||||
htpasswdPath := test.MakeHtpasswdFileFromString(testString)
|
||||
defer os.Remove(htpasswdPath)
|
||||
conf.HTTP.Auth = &config.AuthConfig{
|
||||
HTPasswd: config.AuthHTPasswd{
|
||||
Path: htpasswdPath,
|
||||
},
|
||||
}
|
||||
|
||||
ctlr := api.NewController(conf)
|
||||
go startServer(ctlr)
|
||||
defer stopServer(ctlr)
|
||||
|
||||
test.WaitTillServerReady(baseURL)
|
||||
|
||||
layerBlob := []byte("test")
|
||||
layerPath := path.Join(tempDir, "test", ".uploads")
|
||||
|
||||
if _, err := os.Stat(layerPath); os.IsNotExist(err) {
|
||||
err = os.MkdirAll(layerPath, 0o700)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
img := test.Image{
|
||||
Layers: [][]byte{
|
||||
layerBlob,
|
||||
}, // invalid format that will result in an error
|
||||
Config: ispec.Image{},
|
||||
}
|
||||
|
||||
Convey("first marshal", func() {
|
||||
injected := test.InjectFailure(0)
|
||||
if injected {
|
||||
err := test.UploadImageWithBasicAuth(img, baseURL, "test", "user", "password")
|
||||
So(err, ShouldNotBeNil)
|
||||
}
|
||||
})
|
||||
Convey("CreateBlobUpload POST call", func() {
|
||||
injected := test.InjectFailure(1)
|
||||
if injected {
|
||||
err := test.UploadImageWithBasicAuth(img, baseURL, "test", "user", "password")
|
||||
So(err, ShouldNotBeNil)
|
||||
}
|
||||
})
|
||||
Convey("UpdateBlobUpload PUT call", func() {
|
||||
injected := test.InjectFailure(3)
|
||||
if injected {
|
||||
err := test.UploadImageWithBasicAuth(img, baseURL, "test", "user", "password")
|
||||
So(err, ShouldNotBeNil)
|
||||
}
|
||||
})
|
||||
Convey("second marshal", func() {
|
||||
injected := test.InjectFailure(5)
|
||||
if injected {
|
||||
err := test.UploadImageWithBasicAuth(img, baseURL, "test", "user", "password")
|
||||
So(err, ShouldNotBeNil)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func startServer(c *api.Controller) {
|
||||
// this blocks
|
||||
ctx := context.Background()
|
||||
|
|
206
pkg/test/mocks/search_db_mock.go
Normal file
206
pkg/test/mocks/search_db_mock.go
Normal file
|
@ -0,0 +1,206 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"zotregistry.io/zot/pkg/storage/repodb"
|
||||
)
|
||||
|
||||
type RepoDBMock struct {
|
||||
SetRepoDescriptionFn func(repo, description string) error
|
||||
IncrementRepoStarsFn func(repo string) error
|
||||
DecrementRepoStarsFn func(repo string) error
|
||||
GetRepoStarsFn func(repo string) (int, error)
|
||||
SetRepoLogoFn func(repo string, logoPath string) error
|
||||
SetRepoTagFn func(repo string, tag string, manifestDigest string) error
|
||||
DeleteRepoTagFn func(repo string, tag string) error
|
||||
GetRepoMetaFn func(repo string) (repodb.RepoMetadata, error)
|
||||
GetMultipleRepoMetaFn func(ctx context.Context, filter func(repoMeta repodb.RepoMetadata) bool,
|
||||
requestedPage repodb.PageInput) ([]repodb.RepoMetadata, error)
|
||||
GetManifestMetaFn func(manifestDigest string) (repodb.ManifestMetadata, error)
|
||||
SetManifestMetaFn func(manifestDigest string, mm repodb.ManifestMetadata) error
|
||||
IncrementManifestDownloadsFn func(manifestDigest string) error
|
||||
AddManifestSignatureFn func(manifestDigest string, sm repodb.SignatureMetadata) error
|
||||
DeleteSignatureFn func(manifestDigest string, sm repodb.SignatureMetadata) error
|
||||
SearchReposFn func(ctx context.Context, searchText string, requestedPage repodb.PageInput) (
|
||||
[]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error)
|
||||
SearchTagsFn func(ctx context.Context, searchText string, requestedPage repodb.PageInput) (
|
||||
[]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error)
|
||||
SearchDigestsFn func(ctx context.Context, searchText string, requestedPage repodb.PageInput) (
|
||||
[]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error)
|
||||
SearchLayersFn func(ctx context.Context, searchText string, requestedPage repodb.PageInput) (
|
||||
[]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error)
|
||||
SearchForAscendantImagesFn func(ctx context.Context, searchText string, requestedPage repodb.PageInput) (
|
||||
[]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error)
|
||||
SearchForDescendantImagesFn func(ctx context.Context, searchText string, requestedPage repodb.PageInput) (
|
||||
[]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error)
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) SetRepoDescription(repo, description string) error {
|
||||
if sdm.SetRepoDescriptionFn != nil {
|
||||
return sdm.SetRepoDescriptionFn(repo, description)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) IncrementRepoStars(repo string) error {
|
||||
if sdm.IncrementRepoStarsFn != nil {
|
||||
return sdm.IncrementRepoStarsFn(repo)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) DecrementRepoStars(repo string) error {
|
||||
if sdm.DecrementRepoStarsFn != nil {
|
||||
return sdm.DecrementRepoStarsFn(repo)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) GetRepoStars(repo string) (int, error) {
|
||||
if sdm.GetRepoStarsFn != nil {
|
||||
return sdm.GetRepoStarsFn(repo)
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) SetRepoLogo(repo string, logoPath string) error {
|
||||
if sdm.SetRepoLogoFn != nil {
|
||||
return sdm.SetRepoLogoFn(repo, logoPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) SetRepoTag(repo string, tag string, manifestDigest string) error {
|
||||
if sdm.SetRepoTagFn != nil {
|
||||
return sdm.SetRepoTagFn(repo, tag, manifestDigest)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) DeleteRepoTag(repo string, tag string) error {
|
||||
if sdm.DeleteRepoTagFn != nil {
|
||||
return sdm.DeleteRepoTagFn(repo, tag)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) GetRepoMeta(repo string) (repodb.RepoMetadata, error) {
|
||||
if sdm.GetRepoMetaFn != nil {
|
||||
return sdm.GetRepoMetaFn(repo)
|
||||
}
|
||||
|
||||
return repodb.RepoMetadata{}, nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) GetMultipleRepoMeta(ctx context.Context, filter func(repoMeta repodb.RepoMetadata) bool,
|
||||
requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, error) {
|
||||
if sdm.GetMultipleRepoMetaFn != nil {
|
||||
return sdm.GetMultipleRepoMetaFn(ctx, filter, requestedPage)
|
||||
}
|
||||
|
||||
return []repodb.RepoMetadata{}, nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) GetManifestMeta(manifestDigest string) (repodb.ManifestMetadata, error) {
|
||||
if sdm.GetManifestMetaFn != nil {
|
||||
return sdm.GetManifestMetaFn(manifestDigest)
|
||||
}
|
||||
|
||||
return repodb.ManifestMetadata{}, nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) SetManifestMeta(manifestDigest string, mm repodb.ManifestMetadata) error {
|
||||
if sdm.SetManifestMetaFn != nil {
|
||||
return sdm.SetManifestMetaFn(manifestDigest, mm)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) IncrementManifestDownloads(manifestDigest string) error {
|
||||
if sdm.IncrementManifestDownloadsFn != nil {
|
||||
return sdm.IncrementManifestDownloadsFn(manifestDigest)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) AddManifestSignature(manifestDigest string, sm repodb.SignatureMetadata) error {
|
||||
if sdm.AddManifestSignatureFn != nil {
|
||||
return sdm.AddManifestSignatureFn(manifestDigest, sm)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) DeleteSignature(manifestDigest string, sm repodb.SignatureMetadata) error {
|
||||
if sdm.DeleteSignatureFn != nil {
|
||||
return sdm.DeleteSignatureFn(manifestDigest, sm)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) SearchRepos(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
if sdm.SearchReposFn != nil {
|
||||
return sdm.SearchReposFn(ctx, searchText, requestedPage)
|
||||
}
|
||||
|
||||
return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) SearchTags(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
if sdm.SearchTagsFn != nil {
|
||||
return sdm.SearchTagsFn(ctx, searchText, requestedPage)
|
||||
}
|
||||
|
||||
return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) SearchDigests(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
if sdm.SearchDigestsFn != nil {
|
||||
return sdm.SearchDigestsFn(ctx, searchText, requestedPage)
|
||||
}
|
||||
|
||||
return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) SearchLayers(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
if sdm.SearchLayersFn != nil {
|
||||
return sdm.SearchLayersFn(ctx, searchText, requestedPage)
|
||||
}
|
||||
|
||||
return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) SearchForAscendantImages(ctx context.Context, searchText string, requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
if sdm.SearchForAscendantImagesFn != nil {
|
||||
return sdm.SearchForAscendantImagesFn(ctx, searchText, requestedPage)
|
||||
}
|
||||
|
||||
return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil
|
||||
}
|
||||
|
||||
func (sdm RepoDBMock) SearchForDescendantImages(ctx context.Context, searchText string,
|
||||
requestedPage repodb.PageInput,
|
||||
) ([]repodb.RepoMetadata, map[string]repodb.ManifestMetadata, error) {
|
||||
if sdm.SearchForDescendantImagesFn != nil {
|
||||
return sdm.SearchForDescendantImagesFn(ctx, searchText, requestedPage)
|
||||
}
|
||||
|
||||
return []repodb.RepoMetadata{}, map[string]repodb.ManifestMetadata{}, nil
|
||||
}
|
Loading…
Reference in a new issue