0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-30 22:34:13 -05:00
zot/pkg/storage/common/common.go

974 lines
24 KiB
Go
Raw Normal View History

package storage
import (
"bytes"
"encoding/json"
"errors"
"io"
"math/rand"
"path"
"strings"
"time"
notreg "github.com/notaryproject/notation-go/registry"
godigest "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/schema"
imeta "github.com/opencontainers/image-spec/specs-go"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
oras "github.com/oras-project/artifacts-spec/specs-go/v1"
"github.com/rs/zerolog"
zerr "zotregistry.io/zot/errors"
zcommon "zotregistry.io/zot/pkg/common"
"zotregistry.io/zot/pkg/scheduler"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
storageTypes "zotregistry.io/zot/pkg/storage/types"
)
const (
manifestWithEmptyLayersErrMsg = "layers: Array must have at least 1 items"
cosignSignatureTagSuffix = "sig"
)
func GetTagsByIndex(index ispec.Index) []string {
tags := make([]string, 0)
for _, manifest := range index.Manifests {
v, ok := manifest.Annotations[ispec.AnnotationRefName]
if ok {
tags = append(tags, v)
}
}
return tags
}
func GetManifestDescByReference(index ispec.Index, reference string) (ispec.Descriptor, bool) {
var manifestDesc ispec.Descriptor
for _, manifest := range index.Manifests {
if reference == manifest.Digest.String() {
return manifest, true
}
v, ok := manifest.Annotations[ispec.AnnotationRefName]
if ok && v == reference {
return manifest, true
}
}
return manifestDesc, false
}
func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaType string, body []byte,
log zerolog.Logger,
) (godigest.Digest, error) {
// validate the manifest
if !IsSupportedMediaType(mediaType) {
log.Debug().Interface("actual", mediaType).
Msg("bad manifest media type")
return "", zerr.ErrBadManifest
}
if len(body) == 0 {
log.Debug().Int("len", len(body)).Msg("invalid body length")
return "", zerr.ErrBadManifest
}
switch mediaType {
case ispec.MediaTypeImageManifest:
var manifest ispec.Manifest
// validate manifest
if err := ValidateManifestSchema(body); err != nil {
log.Error().Err(err).Msg("OCIv1 image manifest schema validation failed")
return "", zerr.ErrBadManifest
}
if err := json.Unmarshal(body, &manifest); err != nil {
log.Error().Err(err).Msg("unable to unmarshal JSON")
return "", zerr.ErrBadManifest
}
if manifest.Config.MediaType == ispec.MediaTypeImageConfig {
// validate layers - a lightweight check if the blob is present
for _, layer := range manifest.Layers {
if IsNonDistributable(layer.MediaType) {
log.Debug().Str("digest", layer.Digest.String()).Str("mediaType", layer.MediaType).
Msg("skip checking non-distributable layer exists")
continue
}
ok, _, err := imgStore.StatBlob(repo, layer.Digest)
if !ok || err != nil {
log.Error().Err(err).Str("digest", layer.Digest.String()).Msg("missing layer blob")
return "", zerr.ErrBadManifest
}
}
}
case oras.MediaTypeArtifactManifest:
var m oras.Descriptor
if err := json.Unmarshal(body, &m); err != nil {
log.Error().Err(err).Msg("unable to unmarshal JSON")
return "", zerr.ErrBadManifest
}
case ispec.MediaTypeImageIndex:
// validate manifest
if err := ValidateImageIndexSchema(body); err != nil {
log.Error().Err(err).Msg("OCIv1 image index manifest schema validation failed")
return "", err
}
var indexManifest ispec.Index
if err := json.Unmarshal(body, &indexManifest); err != nil {
log.Error().Err(err).Msg("unable to unmarshal JSON")
return "", zerr.ErrBadManifest
}
for _, manifest := range indexManifest.Manifests {
if ok, _, err := imgStore.StatBlob(repo, manifest.Digest); !ok || err != nil {
log.Error().Err(err).Str("digest", manifest.Digest.String()).Msg("missing manifest blob")
return "", zerr.ErrBadManifest
}
}
}
return "", nil
}
func GetAndValidateRequestDigest(body []byte, digestStr string, log zerolog.Logger) (godigest.Digest, error) {
bodyDigest := godigest.FromBytes(body)
d, err := godigest.Parse(digestStr)
if err == nil {
if d.String() != bodyDigest.String() {
log.Error().Str("actual", bodyDigest.String()).Str("expected", d.String()).
Msg("manifest digest is not valid")
return "", zerr.ErrBadManifest
}
}
return bodyDigest, err
}
/*
CheckIfIndexNeedsUpdate verifies if an index needs to be updated given a new manifest descriptor.
Returns whether or not index needs update, in the latter case it will also return the previous digest.
*/
func CheckIfIndexNeedsUpdate(index *ispec.Index, desc *ispec.Descriptor,
log zerolog.Logger,
) (bool, godigest.Digest, error) {
var oldDgst godigest.Digest
var reference string
tag, ok := desc.Annotations[ispec.AnnotationRefName]
if ok {
reference = tag
} else {
reference = desc.Digest.String()
}
updateIndex := true
for midx, manifest := range index.Manifests {
manifest := manifest
if reference == manifest.Digest.String() {
// nothing changed, so don't update
updateIndex = false
break
}
v, ok := manifest.Annotations[ispec.AnnotationRefName]
if ok && v == reference {
if manifest.Digest.String() == desc.Digest.String() {
// nothing changed, so don't update
updateIndex = false
break
}
// manifest contents have changed for the same tag,
// so update index.json descriptor
log.Info().
Int64("old size", manifest.Size).
Int64("new size", desc.Size).
Str("old digest", manifest.Digest.String()).
Str("new digest", desc.Digest.String()).
Str("old mediaType", manifest.MediaType).
Str("new mediaType", desc.MediaType).
Msg("updating existing tag with new manifest contents")
// changing media-type is disallowed!
if manifest.MediaType != desc.MediaType {
err := zerr.ErrBadManifest
log.Error().Err(err).
Str("old mediaType", manifest.MediaType).
Str("new mediaType", desc.MediaType).Msg("cannot change media-type")
return false, "", err
}
oldDesc := *desc
desc = &manifest
oldDgst = manifest.Digest
desc.Size = oldDesc.Size
desc.Digest = oldDesc.Digest
index.Manifests = append(index.Manifests[:midx], index.Manifests[midx+1:]...)
break
}
}
return updateIndex, oldDgst, nil
}
// GetIndex returns the contents of index.json.
func GetIndex(imgStore storageTypes.ImageStore, repo string, log zerolog.Logger) (ispec.Index, error) {
var index ispec.Index
buf, err := imgStore.GetIndexContent(repo)
if err != nil {
return index, err
}
if err := json.Unmarshal(buf, &index); err != nil {
log.Error().Err(err).Str("dir", path.Join(imgStore.RootDir(), repo)).Msg("invalid JSON")
return index, zerr.ErrRepoBadVersion
}
return index, nil
}
// GetImageIndex returns a multiarch type image.
func GetImageIndex(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, log zerolog.Logger,
) (ispec.Index, error) {
var imageIndex ispec.Index
if err := digest.Validate(); err != nil {
return imageIndex, err
}
buf, err := imgStore.GetBlobContent(repo, digest)
if err != nil {
return imageIndex, err
}
indexPath := path.Join(imgStore.RootDir(), repo, "blobs",
digest.Algorithm().String(), digest.Encoded())
if err := json.Unmarshal(buf, &imageIndex); err != nil {
log.Error().Err(err).Str("path", indexPath).Msg("invalid JSON")
return imageIndex, err
}
return imageIndex, nil
}
func GetImageManifest(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, log zerolog.Logger,
) (ispec.Manifest, error) {
var manifestContent ispec.Manifest
manifestBlob, err := imgStore.GetBlobContent(repo, digest)
if err != nil {
return manifestContent, err
}
manifestPath := path.Join(imgStore.RootDir(), repo, "blobs",
digest.Algorithm().String(), digest.Encoded())
if err := json.Unmarshal(manifestBlob, &manifestContent); err != nil {
log.Error().Err(err).Str("path", manifestPath).Msg("invalid JSON")
return manifestContent, err
}
return manifestContent, nil
}
func RemoveManifestDescByReference(index *ispec.Index, reference string, detectCollisions bool,
) (ispec.Descriptor, error) {
var removedManifest ispec.Descriptor
var found bool
foundCount := 0
var outIndex ispec.Index
for _, manifest := range index.Manifests {
tag, ok := manifest.Annotations[ispec.AnnotationRefName]
if ok && tag == reference {
removedManifest = manifest
found = true
foundCount++
continue
} else if reference == manifest.Digest.String() {
removedManifest = manifest
found = true
foundCount++
continue
}
outIndex.Manifests = append(outIndex.Manifests, manifest)
}
if foundCount > 1 && detectCollisions {
return ispec.Descriptor{}, zerr.ErrManifestConflict
} else if !found {
return ispec.Descriptor{}, zerr.ErrManifestNotFound
}
index.Manifests = outIndex.Manifests
return removedManifest, nil
}
/*
Unmarshal an image index and for all manifests in that
index, ensure that they do not have a name or they are not in other
manifest indexes else GC can never clean them.
*/
func UpdateIndexWithPrunedImageManifests(imgStore storageTypes.ImageStore, index *ispec.Index, repo string,
desc ispec.Descriptor, oldDgst godigest.Digest, log zerolog.Logger,
) error {
if (desc.MediaType == ispec.MediaTypeImageIndex) && (oldDgst != "") {
otherImgIndexes := []ispec.Descriptor{}
for _, manifest := range index.Manifests {
if manifest.MediaType == ispec.MediaTypeImageIndex {
otherImgIndexes = append(otherImgIndexes, manifest)
}
}
otherImgIndexes = append(otherImgIndexes, desc)
prunedManifests, err := PruneImageManifestsFromIndex(imgStore, repo, oldDgst, *index, otherImgIndexes, log)
if err != nil {
return err
}
index.Manifests = prunedManifests
}
return nil
}
/*
Before an image index manifest is pushed to a repo, its constituent manifests
are pushed first, so when updating/removing this image index manifest, we also
need to determine if there are other image index manifests which refer to the
same constitutent manifests so that they can be garbage-collected correctly
PruneImageManifestsFromIndex is a helper routine to achieve this.
*/
func PruneImageManifestsFromIndex(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, //nolint:gocyclo,lll
outIndex ispec.Index, otherImgIndexes []ispec.Descriptor, log zerolog.Logger,
) ([]ispec.Descriptor, error) {
dir := path.Join(imgStore.RootDir(), repo)
indexPath := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
buf, err := imgStore.GetBlobContent(repo, digest)
if err != nil {
return nil, err
}
var imgIndex ispec.Index
if err := json.Unmarshal(buf, &imgIndex); err != nil {
log.Error().Err(err).Str("path", indexPath).Msg("invalid JSON")
return nil, err
}
inUse := map[string]uint{}
for _, manifest := range imgIndex.Manifests {
inUse[manifest.Digest.Encoded()]++
}
for _, otherIndex := range otherImgIndexes {
oindex, err := GetImageIndex(imgStore, repo, otherIndex.Digest, log)
if err != nil {
return nil, err
}
for _, omanifest := range oindex.Manifests {
_, ok := inUse[omanifest.Digest.Encoded()]
if ok {
inUse[omanifest.Digest.Encoded()]++
}
}
}
prunedManifests := []ispec.Descriptor{}
// for all manifests in the index, skip those that either have a tag or
// are used in other imgIndexes
for _, outManifest := range outIndex.Manifests {
if outManifest.MediaType != ispec.MediaTypeImageManifest {
prunedManifests = append(prunedManifests, outManifest)
continue
}
_, ok := outManifest.Annotations[ispec.AnnotationRefName]
if ok {
prunedManifests = append(prunedManifests, outManifest)
continue
}
count, ok := inUse[outManifest.Digest.Encoded()]
if !ok {
prunedManifests = append(prunedManifests, outManifest)
continue
}
if count != 1 {
// this manifest is in use in other image indexes
prunedManifests = append(prunedManifests, outManifest)
continue
}
}
return prunedManifests, nil
}
func isBlobReferencedInManifest(imgStore storageTypes.ImageStore, repo string,
bdigest, mdigest godigest.Digest, log zerolog.Logger,
) (bool, error) {
if bdigest == mdigest {
return true, nil
}
manifestContent, err := GetImageManifest(imgStore, repo, mdigest, log)
if err != nil {
log.Error().Err(err).Str("repo", repo).Str("digest", mdigest.String()).
Msg("gc: failed to read manifest image")
return false, err
}
if bdigest == manifestContent.Config.Digest {
return true, nil
}
for _, layer := range manifestContent.Layers {
if bdigest == layer.Digest {
return true, nil
}
}
return false, nil
}
func isBlobReferencedInImageIndex(imgStore storageTypes.ImageStore, repo string,
digest godigest.Digest, index ispec.Index, log zerolog.Logger,
) (bool, error) {
for _, desc := range index.Manifests {
var found bool
switch desc.MediaType {
case ispec.MediaTypeImageIndex:
/* this branch is not needed, because every manifests in index is already checked
when this one is hit, all manifests are referenced in index.json */
indexImage, err := GetImageIndex(imgStore, repo, desc.Digest, log)
if err != nil {
log.Error().Err(err).Str("repository", repo).Str("digest", desc.Digest.String()).
Msg("failed to read multiarch(index) image")
return false, err
}
found, _ = isBlobReferencedInImageIndex(imgStore, repo, digest, indexImage, log)
case ispec.MediaTypeImageManifest:
found, _ = isBlobReferencedInManifest(imgStore, repo, digest, desc.Digest, log)
}
if found {
return true, nil
}
}
return false, nil
}
func IsBlobReferenced(imgStore storageTypes.ImageStore, repo string,
digest godigest.Digest, log zerolog.Logger,
) (bool, error) {
dir := path.Join(imgStore.RootDir(), repo)
if !imgStore.DirExists(dir) {
return false, zerr.ErrRepoNotFound
}
index, err := GetIndex(imgStore, repo, log)
if err != nil {
return false, err
}
return isBlobReferencedInImageIndex(imgStore, repo, digest, index, log)
}
func ApplyLinter(imgStore storageTypes.ImageStore, linter Lint, repo string, descriptor ispec.Descriptor,
) (bool, error) {
pass := true
// we'll skip anything that's not a image manifest
if descriptor.MediaType != ispec.MediaTypeImageManifest {
return pass, nil
}
if linter != nil && !IsSignature(descriptor) {
// lint new index with new manifest before writing to disk
pass, err := linter.Lint(repo, descriptor.Digest, imgStore)
if err != nil {
return false, err
}
if !pass {
return false, zerr.ErrImageLintAnnotations
}
}
return pass, nil
}
func IsSignature(descriptor ispec.Descriptor) bool {
tag := descriptor.Annotations[ispec.AnnotationRefName]
switch descriptor.MediaType {
case ispec.MediaTypeImageManifest:
// is cosgin signature
if strings.HasPrefix(tag, "sha256-") && strings.HasSuffix(tag, cosignSignatureTagSuffix) {
return true
}
// is notation signature
if descriptor.ArtifactType == notreg.ArtifactTypeNotation {
return true
}
default:
return false
}
return false
}
func GetOrasReferrers(imgStore storageTypes.ImageStore, repo string, gdigest godigest.Digest, artifactType string,
log zerolog.Logger,
) ([]oras.Descriptor, error) {
if err := gdigest.Validate(); err != nil {
return nil, err
}
dir := path.Join(imgStore.RootDir(), repo)
if !imgStore.DirExists(dir) {
return nil, zerr.ErrRepoNotFound
}
index, err := GetIndex(imgStore, repo, log)
if err != nil {
return nil, err
}
found := false
result := []oras.Descriptor{}
for _, manifest := range index.Manifests {
if manifest.MediaType != oras.MediaTypeArtifactManifest {
continue
}
artManifest, err := GetOrasManifestByDigest(imgStore, repo, manifest.Digest, log)
if err != nil {
return nil, err
}
if artManifest.Subject.Digest != gdigest {
continue
}
// filter by artifact type
if artifactType != "" && artManifest.ArtifactType != artifactType {
continue
}
result = append(result, oras.Descriptor{
MediaType: manifest.MediaType,
ArtifactType: artManifest.ArtifactType,
Digest: manifest.Digest,
Size: manifest.Size,
Annotations: manifest.Annotations,
})
found = true
}
if !found {
return nil, zerr.ErrManifestNotFound
}
return result, nil
}
func GetReferrers(imgStore storageTypes.ImageStore, repo string, gdigest godigest.Digest, artifactTypes []string,
log zerolog.Logger,
) (ispec.Index, error) {
nilIndex := ispec.Index{}
if err := gdigest.Validate(); err != nil {
return nilIndex, err
}
dir := path.Join(imgStore.RootDir(), repo)
if !imgStore.DirExists(dir) {
return nilIndex, zerr.ErrRepoNotFound
}
index, err := GetIndex(imgStore, repo, log)
if err != nil {
return nilIndex, err
}
result := []ispec.Descriptor{}
for _, descriptor := range index.Manifests {
if descriptor.Digest == gdigest {
continue
}
buf, err := imgStore.GetBlobContent(repo, descriptor.Digest)
if err != nil {
log.Error().Err(err).Str("blob", imgStore.BlobPath(repo, descriptor.Digest)).Msg("failed to read manifest")
if errors.Is(err, zerr.ErrBlobNotFound) {
return nilIndex, zerr.ErrManifestNotFound
}
return nilIndex, err
}
switch descriptor.MediaType {
case ispec.MediaTypeImageManifest:
var manifestContent ispec.Manifest
if err := json.Unmarshal(buf, &manifestContent); err != nil {
log.Error().Err(err).Str("manifest digest", descriptor.Digest.String()).Msg("invalid JSON")
return nilIndex, err
}
if manifestContent.Subject == nil || manifestContent.Subject.Digest != gdigest {
continue
}
// filter by artifact type
manifestArtifactType := zcommon.GetManifestArtifactType(manifestContent)
if len(artifactTypes) > 0 && !zcommon.Contains(artifactTypes, manifestArtifactType) {
continue
}
result = append(result, ispec.Descriptor{
MediaType: descriptor.MediaType,
ArtifactType: manifestArtifactType,
Size: descriptor.Size,
Digest: descriptor.Digest,
Annotations: manifestContent.Annotations,
})
case ispec.MediaTypeImageIndex:
var indexContent ispec.Index
if err := json.Unmarshal(buf, &indexContent); err != nil {
log.Error().Err(err).Str("manifest digest", descriptor.Digest.String()).Msg("invalid JSON")
return nilIndex, err
}
if indexContent.Subject == nil || indexContent.Subject.Digest != gdigest {
continue
}
indexArtifactType := zcommon.GetIndexArtifactType(indexContent)
if len(artifactTypes) > 0 && !zcommon.Contains(artifactTypes, indexArtifactType) {
continue
}
result = append(result, ispec.Descriptor{
MediaType: descriptor.MediaType,
ArtifactType: indexArtifactType,
Size: descriptor.Size,
Digest: descriptor.Digest,
Annotations: indexContent.Annotations,
})
}
}
index = ispec.Index{
Versioned: imeta.Versioned{SchemaVersion: storageConstants.SchemaVersion},
MediaType: ispec.MediaTypeImageIndex,
Manifests: result,
Annotations: map[string]string{},
}
return index, nil
}
func GetOrasManifestByDigest(imgStore storageTypes.ImageStore, repo string, digest godigest.Digest, log zerolog.Logger,
) (oras.Manifest, error) {
var artManifest oras.Manifest
blobPath := imgStore.BlobPath(repo, digest)
buf, err := imgStore.GetBlobContent(repo, digest)
if err != nil {
log.Error().Err(err).Str("blob", blobPath).Msg("failed to read manifest")
if errors.Is(err, zerr.ErrBlobNotFound) {
return artManifest, zerr.ErrManifestNotFound
}
return artManifest, err
}
if err := json.Unmarshal(buf, &artManifest); err != nil {
log.Error().Err(err).Str("blob", blobPath).Msg("invalid JSON")
return artManifest, err
}
return artManifest, nil
}
func IsSupportedMediaType(mediaType string) bool {
return mediaType == ispec.MediaTypeImageIndex ||
mediaType == ispec.MediaTypeImageManifest ||
mediaType == oras.MediaTypeArtifactManifest
}
feat(repodb): Implement RepoDB for image specific information using boltdb/dynamodb (#979) * feat(repodb): implement a DB for image specific information using boltdb (cherry picked from commit e3cb60b8566d84158409815325083ed149175063) Some other fixes/improvements on top (Andrei) Global search: The last updated attribute on repo level is now computed correctly. Global search: Fix and enhance tests: validate more fields, and fix CVE verification logic RepoListWithNewestImage: The vendors and platforms at repo level are no longer containing duplicate entries CVE: scan OCIUncompressedLayer instead of skiping them (used in tests) bug(repodb): do no try to increment download counters for signatures Signed-off-by: Andrei Aaron <andaaron@cisco.com> Add filtering to global search API (Laurentiu) (cherry picked from commit a87976d635ea876fe8ced532e8adb7c3bb24098f) Original work by Laurentiu Niculae <niculae.laurentiu1@gmail.com> Fix pagination bug - when limit was bigger than the repo count result contained empty results - now correctly returns only maximum available number of repo results Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> Add history to the fields returned from RepoDB Consolidate fields used in packages - pkg/extensions/search/common/common_test - pkg/extensions/search/common/common Refactor duplicate code in GlobalSearch verification Add vulnerability scan results to image:tag reply Signed-off-by: Andrei Aaron <andaaron@cisco.com> Refactor ExpandedRepoInfo to using RepoDB Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit fd7dc85c3a9d028fd8860d3791cad4df769ed005) Init RepoDB at startup - sync with storage - ignore images without a tag Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit 359898facd6541b2aa99ee95080f7aabf28c2650) Update request to get image:tag to use repodb Signed-off-by: Andrei Aaron <andaaron@cisco.com> Sync RepoDB logging - added logging for errors Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit 2e128f4d01712b34c70b5468285100b0657001bb) sync-repodb minor error checking fix Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> Improve tests for syncing RepoDB with storage Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit b18408c6d64e01312849fc18b929e3a2a7931e9e) Update scoring rule for repos - now prioritize matches to the end of the repo name Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit 6961346ccf02223132b3b12a2132c80bd1b6b33c) Upgrade search filters to permit multiple values - multiple values for os and arch Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit 3ffb72c6fc0587ff827a03fe4f76a13b27b876a0) feature(repodb): add pagination for RepoListWithNewestImage Signed-off-by: Alex Stan <alexandrustan96@yahoo.ro> (cherry picked from commit 32c917f2dc65363b0856345289353559a8027aee) test(fix): fix tests failing since repodb is used for listing all repos 1. One of the tests was verifying disk/oci related erros and is not applicable 2. Another test was actually broken in an older PR, the default store and the substore were using the same repo names (the substore ones were unprefixed), which should not be the case, this was causing a single entry to show in the RepoDB instead of two separate entries for each test image Root cause in: https://github.com/project-zot/zot/commit/b61aff62cd5088314cee39fae42cec0ad855bd7c#diff-b86e11fa5a3102b336caebec3b30a9d35e26af554dd8658f124dba2404b7d24aR88 Signed-off-by: Andrei Aaron <andaaron@cisco.com> chore: move code reponsible for transforming objects to gql_generated types to separate package Signed-off-by: Andrei Aaron <andaaron@cisco.com> Process input for global search - Clean input: query, filter strings - Add validation for global search input Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit f1ca8670fbe4a4a327ea25cf459237dbf23bb78a) fix: only call cve scanning for data shown to the user Signed-off-by: Andrei Aaron <andaaron@cisco.com> GQL omit scanning for CVE if field is not required Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit 5479ce45d6cb2abcf5fbccadeaf6f3393c3f6bf1) Fix filtering logic in RepoDB - filter parameter was set to false instead of being calculator from the later image Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit a82d2327e34e5da617af0b7ca78a2dba90999f0a) bug(repodb): Checking signature returns error if signed image is not found - we considere a signature image orfan when the image it signs is not found - we need this to ignore such signatures in certain cases Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> (cherry picked from commit d0418505f76467accd8e1ee34fcc2b2a165efae5) feat(repodb): CVE logic to use repoDB Also update some method signatures to remove usage of: github.com/google/go-containerregistry/pkg/v1 Signed-off-by: Andrei Aaron <andaaron@cisco.com> * feat(repodb): refactor repodb update logic Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * fix(repodb): minor fixes Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * feat(repodb): move repodb logic inside meta directory under pkg Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * feat(repodb): change factory class for repodb initialization with factory metrod Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * feat(repodb): simplify repodb configuration - repodb now shares config parameters with the cache - config taken directly from storage config Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * fix(authors): fix authors information to work properly with repodb Ideally this commit would be squshed in the repodb commit but as-is it is easier to cherry-pick on other branches Signed-off-by: Andrei Aaron <andaaron@cisco.com> * feat(repodb): dynamodb support for repodb - clean-up repodb code + coverage improvements Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * feat(dynamo): tables used by dynamo are created automatically if they don't exists - if the table exists nothing happens Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * test(repodb): coverage tests - minor fix for CVEListForImage to fix the tests Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * feat(repodb): add descriptor with media type - to represent images and multi-arch images Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * feat(repodb): support signatures on repo level - added to follow the behavior of signing and signature verification tools that work on a manifest level for each repo - all images with different tags but the same manifest will be signed at once Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * feat(repodb): old repodb version migration support Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * feat(repodb): tests for coverage Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * feat(repodb): WIP fixing tests Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * feat(repodb): work on patchRepoDB tests Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * fix(repodb): create dynamo tables only for linux amd Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> * fix(ci): fix a typo in ci-cd.yml Signed-off-by: Andrei Aaron <aaaron@luxoft.com> Signed-off-by: Andrei Aaron <andaaron@cisco.com> Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> Signed-off-by: Andrei Aaron <aaaron@luxoft.com> Co-authored-by: Andrei Aaron <andaaron@cisco.com> Co-authored-by: Andrei Aaron <aaaron@luxoft.com>
2023-01-09 15:37:44 -05:00
func IsNonDistributable(mediaType string) bool {
return mediaType == ispec.MediaTypeImageLayerNonDistributable || //nolint:staticcheck
mediaType == ispec.MediaTypeImageLayerNonDistributableGzip || //nolint:staticcheck
mediaType == ispec.MediaTypeImageLayerNonDistributableZstd //nolint:staticcheck
}
func ValidateManifestSchema(buf []byte) error {
if err := schema.ValidatorMediaTypeManifest.Validate(bytes.NewBuffer(buf)); err != nil {
if !IsEmptyLayersError(err) {
return zerr.ErrBadManifest
}
}
return nil
}
func ValidateImageIndexSchema(buf []byte) error {
if err := schema.ValidatorMediaTypeImageIndex.Validate(bytes.NewBuffer(buf)); err != nil {
return zerr.ErrBadManifest
}
return nil
}
func IsEmptyLayersError(err error) bool {
var validationErr schema.ValidationError
if errors.As(err, &validationErr) {
if len(validationErr.Errs) == 1 && strings.Contains(err.Error(), manifestWithEmptyLayersErrMsg) {
return true
} else {
return false
}
}
return false
}
/*
DedupeTaskGenerator takes all blobs paths found in the storage.imagestore and groups them by digest
for each digest and based on the dedupe value it will dedupe or restore deduped blobs to the original state(undeduped)\
by creating a task for each digest and pushing it to the task scheduler.
*/
type DedupeTaskGenerator struct {
ImgStore storageTypes.ImageStore
// storage dedupe value
Dedupe bool
// store blobs paths grouped by digest
digest godigest.Digest
duplicateBlobs []string
/* store processed digest, used for iterating duplicateBlobs one by one
and generating a task for each unprocessed one*/
lastDigests []godigest.Digest
done bool
Log zerolog.Logger
}
func (gen *DedupeTaskGenerator) Next() (scheduler.Task, error) {
var err error
// get all blobs from storage.imageStore and group them by digest
gen.digest, gen.duplicateBlobs, err = gen.ImgStore.GetNextDigestWithBlobPaths(gen.lastDigests)
if err != nil {
gen.Log.Error().Err(err).Msg("dedupe rebuild: failed to get next digest")
return nil, err
}
// if no digests left, then mark the task generator as done
if gen.digest == "" {
gen.Log.Info().Msg("dedupe rebuild: finished")
gen.done = true
return nil, nil
}
// mark digest as processed before running its task
gen.lastDigests = append(gen.lastDigests, gen.digest)
// generate rebuild dedupe task for this digest
return newDedupeTask(gen.ImgStore, gen.digest, gen.Dedupe, gen.duplicateBlobs, gen.Log), nil
}
func (gen *DedupeTaskGenerator) IsDone() bool {
return gen.done
}
func (gen *DedupeTaskGenerator) IsReady() bool {
return true
}
func (gen *DedupeTaskGenerator) Reset() {
gen.lastDigests = []godigest.Digest{}
gen.duplicateBlobs = []string{}
gen.digest = ""
gen.done = false
}
type dedupeTask struct {
imgStore storageTypes.ImageStore
// digest of duplicateBLobs
digest godigest.Digest
// blobs paths with the same digest ^
duplicateBlobs []string
dedupe bool
log zerolog.Logger
}
func newDedupeTask(imgStore storageTypes.ImageStore, digest godigest.Digest, dedupe bool,
duplicateBlobs []string, log zerolog.Logger,
) *dedupeTask {
return &dedupeTask{imgStore, digest, duplicateBlobs, dedupe, log}
}
func (dt *dedupeTask) DoWork() error {
// run task
err := dt.imgStore.RunDedupeForDigest(dt.digest, dt.dedupe, dt.duplicateBlobs)
if err != nil {
// log it
dt.log.Error().Err(err).Str("digest", dt.digest.String()).Msg("rebuild dedupe: failed to rebuild digest")
}
return err
}
/*
GCTaskGenerator takes all repositories found in the storage.imagestore
and it will execute garbage collection for each repository by creating a task
for each repository and pushing it to the task scheduler.
*/
type GCTaskGenerator struct {
ImgStore storageTypes.ImageStore
lastRepo string
nextRun time.Time
done bool
rand *rand.Rand
}
func (gen *GCTaskGenerator) getRandomDelay() int {
maxDelay := 30
return gen.rand.Intn(maxDelay)
}
func (gen *GCTaskGenerator) Next() (scheduler.Task, error) {
if gen.lastRepo == "" && gen.nextRun.IsZero() {
gen.rand = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) //nolint: gosec
}
delay := gen.getRandomDelay()
gen.nextRun = time.Now().Add(time.Duration(delay) * time.Second)
repo, err := gen.ImgStore.GetNextRepository(gen.lastRepo)
if err != nil && !errors.Is(err, io.EOF) {
return nil, err
}
if repo == "" {
gen.done = true
return nil, nil
}
gen.lastRepo = repo
return NewGCTask(gen.ImgStore, repo), nil
}
func (gen *GCTaskGenerator) IsDone() bool {
return gen.done
}
func (gen *GCTaskGenerator) IsReady() bool {
return time.Now().After(gen.nextRun)
}
func (gen *GCTaskGenerator) Reset() {
gen.lastRepo = ""
gen.done = false
gen.nextRun = time.Time{}
}
type gcTask struct {
imgStore storageTypes.ImageStore
repo string
}
func NewGCTask(imgStore storageTypes.ImageStore, repo string,
) *gcTask {
return &gcTask{imgStore, repo}
}
func (gct *gcTask) DoWork() error {
// run task
return gct.imgStore.RunGCRepo(gct.repo)
}