0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-30 22:34:13 -05:00

fix(digests): do not mandate sha256 as the only algorithm used for hashing blobs (#2075)

Signed-off-by: Andrei Aaron <aaaron@luxoft.com>
This commit is contained in:
Andrei Aaron 2024-07-19 19:56:31 +03:00 committed by GitHub
parent 6421d8b49a
commit 26be383aae
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 530 additions and 129 deletions

View file

@ -11167,6 +11167,211 @@ func RunAuthorizationTests(t *testing.T, client *resty.Client, baseURL, user str
})
}
func TestSupportedDigestAlgorithms(t *testing.T) {
port := test.GetFreePort()
baseURL := test.GetBaseURL(port)
conf := config.New()
conf.HTTP.Port = port
dir := t.TempDir()
ctlr := api.NewController(conf)
ctlr.Config.Storage.RootDirectory = dir
ctlr.Config.Storage.Dedupe = false
ctlr.Config.Storage.GC = false
cm := test.NewControllerManager(ctlr)
cm.StartAndWait(port)
defer cm.StopServer()
Convey("Test SHA512 single-arch image", t, func() {
image := CreateImageWithDigestAlgorithm(godigest.SHA512).
RandomLayers(1, 10).DefaultConfig().Build()
name := "algo-sha512"
tag := "singlearch"
err := UploadImage(image, baseURL, name, tag)
So(err, ShouldBeNil)
client := resty.New()
// The server picks canonical digests when tags are pushed
// See https://github.com/opencontainers/distribution-spec/issues/494
// It would be nice to be able to push tags with other digest algorithms and verify those are returned
// but there is no way to specify a client preference
// so all we can do is verify the correct algorithm is returned
expectedDigestStr := image.DigestForAlgorithm(godigest.Canonical).String()
verifyReturnedManifestDigest(t, client, baseURL, name, tag, expectedDigestStr)
verifyReturnedManifestDigest(t, client, baseURL, name, expectedDigestStr, expectedDigestStr)
})
Convey("Test SHA512 single-arch image pushed by digest", t, func() {
image := CreateImageWithDigestAlgorithm(godigest.SHA512).
RandomLayers(1, 11).DefaultConfig().Build()
name := "algo-sha512-2"
err := UploadImage(image, baseURL, name, image.DigestStr())
So(err, ShouldBeNil)
client := resty.New()
expectedDigestStr := image.DigestForAlgorithm(godigest.SHA512).String()
verifyReturnedManifestDigest(t, client, baseURL, name, expectedDigestStr, expectedDigestStr)
})
Convey("Test SHA384 single-arch image", t, func() {
image := CreateImageWithDigestAlgorithm(godigest.SHA384).
RandomLayers(1, 10).DefaultConfig().Build()
name := "algo-sha384"
tag := "singlearch"
err := UploadImage(image, baseURL, name, tag)
So(err, ShouldBeNil)
client := resty.New()
// The server picks canonical digests when tags are pushed
// See https://github.com/opencontainers/distribution-spec/issues/494
// It would be nice to be able to push tags with other digest algorithms and verify those are returned
// but there is no way to specify a client preference
// so all we can do is verify the correct algorithm is returned
expectedDigestStr := image.DigestForAlgorithm(godigest.Canonical).String()
verifyReturnedManifestDigest(t, client, baseURL, name, tag, expectedDigestStr)
verifyReturnedManifestDigest(t, client, baseURL, name, expectedDigestStr, expectedDigestStr)
})
Convey("Test SHA512 multi-arch image", t, func() {
subImage1 := CreateImageWithDigestAlgorithm(godigest.SHA512).RandomLayers(1, 10).
DefaultConfig().Build()
subImage2 := CreateImageWithDigestAlgorithm(godigest.SHA512).RandomLayers(1, 10).
DefaultConfig().Build()
multiarch := CreateMultiarchWithDigestAlgorithm(godigest.SHA512).
Images([]Image{subImage1, subImage2}).Build()
name := "algo-sha512"
tag := "multiarch"
err := UploadMultiarchImage(multiarch, baseURL, name, tag)
So(err, ShouldBeNil)
client := resty.New()
// The server picks canonical digests when tags are pushed
// See https://github.com/opencontainers/distribution-spec/issues/494
// It would be nice to be able to push tags with other digest algorithms and verify those are returned
// but there is no way to specify a client preference
// so all we can do is verify the correct algorithm is returned
expectedDigestStr := multiarch.DigestForAlgorithm(godigest.Canonical).String()
verifyReturnedManifestDigest(t, client, baseURL, name, tag, expectedDigestStr)
verifyReturnedManifestDigest(t, client, baseURL, name, expectedDigestStr, expectedDigestStr)
// While the expected multiarch manifest digest is always using the canonical algorithm
// the sub-imgage manifest digest can use any algorith
verifyReturnedManifestDigest(t, client, baseURL, name,
subImage1.ManifestDescriptor.Digest.String(), subImage1.ManifestDescriptor.Digest.String())
verifyReturnedManifestDigest(t, client, baseURL, name,
subImage2.ManifestDescriptor.Digest.String(), subImage2.ManifestDescriptor.Digest.String())
})
Convey("Test SHA512 multi-arch image pushed by digest", t, func() {
subImage1 := CreateImageWithDigestAlgorithm(godigest.SHA512).RandomLayers(1, 10).
DefaultConfig().Build()
subImage2 := CreateImageWithDigestAlgorithm(godigest.SHA512).RandomLayers(1, 10).
DefaultConfig().Build()
multiarch := CreateMultiarchWithDigestAlgorithm(godigest.SHA512).
Images([]Image{subImage1, subImage2}).Build()
name := "algo-sha512-2"
t.Log(multiarch.DigestStr())
err := UploadMultiarchImage(multiarch, baseURL, name, multiarch.DigestStr())
So(err, ShouldBeNil)
client := resty.New()
expectedDigestStr := multiarch.DigestForAlgorithm(godigest.SHA512).String()
verifyReturnedManifestDigest(t, client, baseURL, name, expectedDigestStr, expectedDigestStr)
// While the expected multiarch manifest digest is always using the canonical algorithm
// the sub-imgage manifest digest can use any algorith
verifyReturnedManifestDigest(t, client, baseURL, name,
subImage1.ManifestDescriptor.Digest.String(), subImage1.ManifestDescriptor.Digest.String())
verifyReturnedManifestDigest(t, client, baseURL, name,
subImage2.ManifestDescriptor.Digest.String(), subImage2.ManifestDescriptor.Digest.String())
})
Convey("Test SHA384 multi-arch image", t, func() {
subImage1 := CreateImageWithDigestAlgorithm(godigest.SHA384).RandomLayers(1, 10).
DefaultConfig().Build()
subImage2 := CreateImageWithDigestAlgorithm(godigest.SHA384).RandomLayers(1, 10).
DefaultConfig().Build()
multiarch := CreateMultiarchWithDigestAlgorithm(godigest.SHA384).
Images([]Image{subImage1, subImage2}).Build()
name := "algo-sha384"
tag := "multiarch"
err := UploadMultiarchImage(multiarch, baseURL, name, tag)
So(err, ShouldBeNil)
client := resty.New()
// The server picks canonical digests when tags are pushed
// See https://github.com/opencontainers/distribution-spec/issues/494
// It would be nice to be able to push tags with other digest algorithms and verify those are returned
// but there is no way to specify a client preference
// so all we can do is verify the correct algorithm is returned
expectedDigestStr := multiarch.DigestForAlgorithm(godigest.Canonical).String()
verifyReturnedManifestDigest(t, client, baseURL, name, tag, expectedDigestStr)
verifyReturnedManifestDigest(t, client, baseURL, name, expectedDigestStr, expectedDigestStr)
// While the expected multiarch manifest digest is always using the canonical algorithm
// the sub-imgage manifest digest can use any algorith
verifyReturnedManifestDigest(t, client, baseURL, name,
subImage1.ManifestDescriptor.Digest.String(), subImage1.ManifestDescriptor.Digest.String())
verifyReturnedManifestDigest(t, client, baseURL, name,
subImage2.ManifestDescriptor.Digest.String(), subImage2.ManifestDescriptor.Digest.String())
})
}
func verifyReturnedManifestDigest(t *testing.T, client *resty.Client, baseURL, repoName,
reference, expectedDigestStr string,
) {
t.Helper()
t.Logf("Verify Docker-Content-Digest returned for repo %s reference %s is %s",
repoName, reference, expectedDigestStr)
getResponse, err := client.R().Get(fmt.Sprintf("%s/v2/%s/manifests/%s", baseURL, repoName, reference))
So(err, ShouldBeNil)
So(getResponse, ShouldNotBeNil)
So(getResponse.StatusCode(), ShouldEqual, http.StatusOK)
contentDigestStr := getResponse.Header().Get("Docker-Content-Digest")
So(contentDigestStr, ShouldEqual, expectedDigestStr)
getResponse, err = client.R().Head(fmt.Sprintf("%s/v2/%s/manifests/%s", baseURL, repoName, reference))
So(err, ShouldBeNil)
So(getResponse, ShouldNotBeNil)
So(getResponse.StatusCode(), ShouldEqual, http.StatusOK)
contentDigestStr = getResponse.Header().Get("Docker-Content-Digest")
So(contentDigestStr, ShouldEqual, expectedDigestStr)
}
func getEmptyImageConfig() ([]byte, godigest.Digest) {
config := ispec.Image{}

View file

@ -63,19 +63,19 @@ func GetManifestDescByReference(index ispec.Index, reference string) (ispec.Desc
func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaType string, body []byte,
log zlog.Logger,
) (godigest.Digest, error) {
) error {
// validate the manifest
if !IsSupportedMediaType(mediaType) {
log.Debug().Interface("actual", mediaType).
Msg("bad manifest media type")
return "", zerr.ErrBadManifest
return zerr.ErrBadManifest
}
if len(body) == 0 {
log.Debug().Int("len", len(body)).Msg("invalid body length")
return "", zerr.ErrBadManifest
return zerr.ErrBadManifest
}
switch mediaType {
@ -86,13 +86,13 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
if err := ValidateManifestSchema(body); err != nil {
log.Error().Err(err).Msg("failed to validate OCIv1 image manifest schema")
return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error())
return zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error())
}
if err := json.Unmarshal(body, &manifest); err != nil {
log.Error().Err(err).Msg("failed to unmarshal JSON")
return "", zerr.ErrBadManifest
return zerr.ErrBadManifest
}
// validate blobs only for known media types
@ -104,7 +104,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
log.Error().Err(err).Str("digest", manifest.Config.Digest.String()).
Msg("failed to stat blob due to missing config blob")
return "", zerr.ErrBadManifest
return zerr.ErrBadManifest
}
// validate layers - a lightweight check if the blob is present
@ -121,7 +121,7 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
log.Error().Err(err).Str("digest", layer.Digest.String()).
Msg("failed to validate manifest due to missing layer blob")
return "", zerr.ErrBadManifest
return zerr.ErrBadManifest
}
}
}
@ -130,14 +130,14 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
if err := ValidateImageIndexSchema(body); err != nil {
log.Error().Err(err).Msg("failed to validate OCIv1 image index manifest schema")
return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error())
return zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error())
}
var indexManifest ispec.Index
if err := json.Unmarshal(body, &indexManifest); err != nil {
log.Error().Err(err).Msg("failed to unmarshal JSON")
return "", zerr.ErrBadManifest
return zerr.ErrBadManifest
}
for _, manifest := range indexManifest.Manifests {
@ -145,28 +145,37 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
log.Error().Err(err).Str("digest", manifest.Digest.String()).
Msg("failed to stat manifest due to missing manifest blob")
return "", zerr.ErrBadManifest
return zerr.ErrBadManifest
}
}
}
return "", nil
return nil
}
func GetAndValidateRequestDigest(body []byte, digestStr string, log zlog.Logger) (godigest.Digest, error) {
bodyDigest := godigest.FromBytes(body)
// Returns the canonical digest or the digest provided by the reference if any
// Per spec, the canonical digest would always be returned to the client in
// request headers, but that does not make sense if the client requested a different digest algorithm
// See https://github.com/opencontainers/distribution-spec/issues/494
func GetAndValidateRequestDigest(body []byte, reference string, log zlog.Logger) (
godigest.Digest, error,
) {
expectedDigest, err := godigest.Parse(reference)
if err != nil {
// This is a non-digest reference
return godigest.Canonical.FromBytes(body), err
}
d, err := godigest.Parse(digestStr)
if err == nil {
if d.String() != bodyDigest.String() {
log.Error().Str("actual", bodyDigest.String()).Str("expected", d.String()).
actualDigest := expectedDigest.Algorithm().FromBytes(body)
if expectedDigest.String() != actualDigest.String() {
log.Error().Str("actual", actualDigest.String()).Str("expected", expectedDigest.String()).
Msg("failed to validate manifest digest")
return "", zerr.ErrBadManifest
}
return actualDigest, zerr.ErrBadManifest
}
return bodyDigest, err
return actualDigest, nil
}
/*

View file

@ -52,6 +52,29 @@ func TestValidateManifest(t *testing.T) {
So(err, ShouldBeNil)
So(clen, ShouldEqual, len(cblob))
Convey("bad manifest mediatype", func() {
manifest := ispec.Manifest{}
body, err := json.Marshal(manifest)
So(err, ShouldBeNil)
_, _, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageConfig, body)
So(err, ShouldNotBeNil)
So(err, ShouldEqual, zerr.ErrBadManifest)
})
Convey("empty manifest with bad media type", func() {
_, _, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageConfig, []byte(""))
So(err, ShouldNotBeNil)
So(err, ShouldEqual, zerr.ErrBadManifest)
})
Convey("empty manifest with correct media type", func() {
_, _, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, []byte(""))
So(err, ShouldNotBeNil)
So(err, ShouldEqual, zerr.ErrBadManifest)
})
Convey("bad manifest schema version", func() {
manifest := ispec.Manifest{
Config: ispec.Descriptor{

View file

@ -581,11 +581,10 @@ func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duratio
gcBlobs := make([]godigest.Digest, 0)
for _, blob := range allBlobs {
digest := godigest.NewDigestFromEncoded(godigest.SHA256, blob)
for _, digest := range allBlobs {
if err = digest.Validate(); err != nil {
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", blob).
Msg("failed to parse digest")
log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", digest.String()).Msg("failed to parse digest")
return err
}
@ -593,8 +592,8 @@ func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duratio
if _, ok := refBlobs[digest.String()]; !ok {
canGC, err := isBlobOlderThan(gc.imgStore, repo, digest, delay, log)
if err != nil {
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", blob).
Msg("failed to determine GC delay")
log.Error().Err(err).Str("module", "gc").Str("repository", repo).
Str("digest", digest.String()).Msg("failed to determine GC delay")
return err
}

View file

@ -427,8 +427,8 @@ func TestGarbageCollectWithMockedImageStore(t *testing.T) {
GetIndexContentFn: func(repo string) ([]byte, error) {
return returnedIndexJSONBuf, nil
},
GetAllBlobsFn: func(repo string) ([]string, error) {
return []string{}, errGC
GetAllBlobsFn: func(repo string) ([]godigest.Digest, error) {
return []godigest.Digest{}, errGC
},
}

View file

@ -2,7 +2,6 @@ package imagestore
import (
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
@ -490,9 +489,9 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli
refIsDigest = false
}
dig, err := common.ValidateManifest(is, repo, reference, mediaType, body, is.log)
err = common.ValidateManifest(is, repo, reference, mediaType, body, is.log)
if err != nil {
return dig, "", err
return mDigest, "", err
}
index, err := common.GetIndex(is, repo, is.log)
@ -547,11 +546,11 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli
}
if !updateIndex {
return desc.Digest, subjectDigest, nil
return mDigest, subjectDigest, nil
}
// write manifest to "blobs"
dir := path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String())
dir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir, mDigest.Algorithm().String())
manifestPath := path.Join(dir, mDigest.Encoded())
if _, err = is.storeDriver.WriteFile(manifestPath, body); err != nil {
@ -584,7 +583,7 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli
return "", "", err
}
return desc.Digest, subjectDigest, nil
return mDigest, subjectDigest, nil
}
// DeleteImageManifest deletes the image manifest from the repository.
@ -671,7 +670,8 @@ func (is *ImageStore) deleteImageManifest(repo, reference string, detectCollisio
}
if toDelete {
p := path.Join(dir, "blobs", manifestDesc.Digest.Algorithm().String(), manifestDesc.Digest.Encoded())
p := path.Join(dir, ispec.ImageBlobsDir, manifestDesc.Digest.Algorithm().String(),
manifestDesc.Digest.Encoded())
err = is.storeDriver.Delete(p)
if err != nil {
@ -857,7 +857,7 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig
return err
}
srcDigest, err := getBlobDigest(is, src)
srcDigest, err := getBlobDigest(is, src, dstDigest.Algorithm())
if err != nil {
is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob")
@ -871,11 +871,11 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig
return zerr.ErrBadBlobDigest
}
dir := path.Join(is.rootDir, repo, "blobs", dstDigest.Algorithm().String())
dir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir, dstDigest.Algorithm().String())
err = is.storeDriver.EnsureDir(dir)
if err != nil {
is.log.Error().Err(err).Str("dir", dir).Msg("failed to create dir")
is.log.Error().Str("directory", dir).Err(err).Msg("failed to create dir")
return err
}
@ -924,7 +924,10 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi
uuid := u.String()
src := is.BlobUploadPath(repo, uuid)
digester := sha256.New()
dstDigestAlgorithm := dstDigest.Algorithm()
digester := dstDigestAlgorithm.Hash()
blobFile, err := is.storeDriver.Writer(src, false)
if err != nil {
@ -948,7 +951,7 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi
return "", -1, err
}
srcDigest := godigest.NewDigestFromEncoded(godigest.SHA256, fmt.Sprintf("%x", digester.Sum(nil)))
srcDigest := godigest.NewDigestFromEncoded(dstDigestAlgorithm, fmt.Sprintf("%x", digester.Sum(nil)))
if srcDigest != dstDigest {
is.log.Error().Str("srcDigest", srcDigest.String()).
Str("dstDigest", dstDigest.String()).Msg("actual digest not equal to expected digest")
@ -956,7 +959,7 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi
return "", -1, zerr.ErrBadBlobDigest
}
dir := path.Join(is.rootDir, repo, "blobs", dstDigest.Algorithm().String())
dir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir, dstDigestAlgorithm.String())
_ = is.storeDriver.EnsureDir(dir)
var lockLatency time.Time
@ -1111,7 +1114,7 @@ func (is *ImageStore) DeleteBlobUpload(repo, uuid string) error {
// BlobPath returns the repository path of a blob.
func (is *ImageStore) BlobPath(repo string, digest godigest.Digest) string {
return path.Join(is.rootDir, repo, "blobs", digest.Algorithm().String(), digest.Encoded())
return path.Join(is.rootDir, repo, ispec.ImageBlobsDir, digest.Algorithm().String(), digest.Encoded())
}
func (is *ImageStore) GetAllDedupeReposCandidates(digest godigest.Digest) ([]string, error) {
@ -1667,7 +1670,8 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error {
return nil
}
func getBlobDigest(imgStore *ImageStore, path string) (godigest.Digest, error) {
func getBlobDigest(imgStore *ImageStore, path string, digestAlgorithm godigest.Algorithm,
) (godigest.Digest, error) {
fileReader, err := imgStore.storeDriver.Reader(path, 0)
if err != nil {
return "", zerr.ErrUploadNotFound
@ -1675,7 +1679,7 @@ func getBlobDigest(imgStore *ImageStore, path string) (godigest.Digest, error) {
defer fileReader.Close()
digest, err := godigest.FromReader(fileReader)
digest, err := digestAlgorithm.FromReader(fileReader)
if err != nil {
return "", zerr.ErrBadBlobDigest
}
@ -1683,24 +1687,44 @@ func getBlobDigest(imgStore *ImageStore, path string) (godigest.Digest, error) {
return digest, nil
}
func (is *ImageStore) GetAllBlobs(repo string) ([]string, error) {
dir := path.Join(is.rootDir, repo, "blobs", "sha256")
func (is *ImageStore) GetAllBlobs(repo string) ([]godigest.Digest, error) {
blobsDir := path.Join(is.rootDir, repo, ispec.ImageBlobsDir)
files, err := is.storeDriver.List(dir)
ret := []godigest.Digest{}
algorithmPaths, err := is.storeDriver.List(blobsDir)
if err != nil {
if errors.As(err, &driver.PathNotFoundError{}) {
is.log.Debug().Msg("empty rootDir")
is.log.Debug().Str("directory", blobsDir).Msg("empty blobs directory")
return []string{}, nil
return ret, nil
}
return []string{}, err
return ret, err
}
ret := []string{}
for _, algorithmPath := range algorithmPaths {
algorithm := godigest.Algorithm(path.Base(algorithmPath))
for _, file := range files {
ret = append(ret, filepath.Base(file))
if !algorithm.Available() {
continue
}
digestPaths, err := is.storeDriver.List(algorithmPath)
if err != nil {
// algorithmPath was obtained by looking up under the blobs directory
// we are sure it already exists, so PathNotFoundError does not need to be checked
return []godigest.Digest{}, err
}
for _, file := range digestPaths {
digest := godigest.NewDigestFromEncoded(algorithm, filepath.Base(file))
ret = append(ret, digest)
}
}
if len(ret) == 0 {
is.log.Debug().Str("directory", blobsDir).Msg("empty blobs directory")
}
return ret, nil
@ -1729,14 +1753,24 @@ func (is *ImageStore) GetNextDigestWithBlobPaths(repos []string, lastDigests []g
if fileInfo.IsDir() {
// skip repositories not found in repos
repo := path.Base(fileInfo.Path())
if !zcommon.Contains(repos, repo) && repo != ispec.ImageBlobsDir {
candidateAlgorithm := godigest.Algorithm(repo)
if !zcommon.Contains(repos, repo) && repo != "blobs" && repo != "sha256" {
if !candidateAlgorithm.Available() {
return driver.ErrSkipDir
}
}
}
blobDigest := godigest.NewDigestFromEncoded("sha256", path.Base(fileInfo.Path()))
digestHash := path.Base(fileInfo.Path())
digestAlgorithm := godigest.Algorithm(path.Base(path.Dir(fileInfo.Path())))
blobDigest := godigest.NewDigestFromEncoded(digestAlgorithm, digestHash)
if err := blobDigest.Validate(); err != nil { //nolint: nilerr
is.log.Debug().Str("path", fileInfo.Path()).Str("digestHash", digestHash).
Str("digestAlgorithm", digestAlgorithm.String()).
Msg("digest validation failed when walking blob paths")
return nil //nolint: nilerr // ignore files which are not blobs
}

View file

@ -1913,7 +1913,8 @@ func TestGarbageCollectForImageStore(t *testing.T) {
So(err, ShouldBeNil)
manifestDigest := image.ManifestDescriptor.Digest
err = os.Remove(path.Join(dir, repoName, "blobs/sha256", manifestDigest.Encoded()))
err = os.Remove(path.Join(dir, repoName, "blobs",
manifestDigest.Algorithm().String(), manifestDigest.Encoded()))
if err != nil {
panic(err)
}
@ -2108,7 +2109,8 @@ func TestGarbageCollectImageUnknownManifest(t *testing.T) {
So(err, ShouldBeNil)
artifactDigest := godigest.FromBytes(artifactBuf)
err = os.WriteFile(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", artifactDigest.Encoded()),
err = os.WriteFile(path.Join(imgStore.RootDir(), repoName, "blobs",
artifactDigest.Algorithm().String(), artifactDigest.Encoded()),
artifactBuf, storageConstants.DefaultFilePerms)
So(err, ShouldBeNil)
@ -2125,7 +2127,8 @@ func TestGarbageCollectImageUnknownManifest(t *testing.T) {
So(err, ShouldBeNil)
referrerDigest := godigest.FromBytes(referrerBuf)
err = os.WriteFile(path.Join(imgStore.RootDir(), repoName, "blobs", "sha256", referrerDigest.Encoded()),
err = os.WriteFile(path.Join(imgStore.RootDir(), repoName, "blobs",
artifactDigest.Algorithm().String(), referrerDigest.Encoded()),
referrerBuf, storageConstants.DefaultFilePerms)
So(err, ShouldBeNil)

View file

@ -2205,7 +2205,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
},
@ -2221,7 +2221,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
Convey("Trigger GetContent error in restoreDedupedBlobs()", t, func() {
imgStore := createMockStorage(testDir, tdir, false, &StorageDriverMock{
StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) {
if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) {
return &FileInfoMock{
SizeFn: func() int64 {
return int64(0)
@ -2241,7 +2241,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
_ = walkFn(&FileInfoMock{
@ -2249,7 +2249,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/second/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
@ -2270,7 +2270,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
Convey("Trigger GetContent error in restoreDedupedBlobs()", t, func() {
imgStore := createMockStorage(testDir, tdir, false, &StorageDriverMock{
StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) {
if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) {
return &FileInfoMock{
SizeFn: func() int64 {
return int64(0)
@ -2290,7 +2290,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
_ = walkFn(&FileInfoMock{
@ -2298,7 +2298,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/second/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
@ -2319,7 +2319,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
Convey("Trigger Stat() error in restoreDedupedBlobs()", t, func() {
imgStore := createMockStorage(testDir, tdir, false, &StorageDriverMock{
StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) {
if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) {
return &FileInfoMock{
SizeFn: func() int64 {
return int64(10)
@ -2339,7 +2339,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
_ = walkFn(&FileInfoMock{
@ -2347,7 +2347,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/second/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
@ -2364,7 +2364,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
Convey("Trigger Stat() error in dedupeBlobs()", func() {
imgStore := createMockStorage(testDir, t.TempDir(), true, &StorageDriverMock{
StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) {
if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) {
return &FileInfoMock{
SizeFn: func() int64 {
return int64(10)
@ -2384,7 +2384,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
_ = walkFn(&FileInfoMock{
@ -2392,7 +2392,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/second/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
@ -2412,7 +2412,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
tdir := t.TempDir()
imgStore := createMockStorage(testDir, tdir, true, &StorageDriverMock{
StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) {
if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) {
return &FileInfoMock{
SizeFn: func() int64 {
return int64(0)
@ -2432,7 +2432,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
_ = walkFn(&FileInfoMock{
@ -2440,7 +2440,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/second/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
@ -2463,7 +2463,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
tdir := t.TempDir()
imgStore := createMockStorage(testDir, tdir, true, &StorageDriverMock{
StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) {
if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) {
return &FileInfoMock{
SizeFn: func() int64 {
return int64(0)
@ -2483,7 +2483,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
_ = walkFn(&FileInfoMock{
@ -2491,7 +2491,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/second/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
@ -2531,7 +2531,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
_ = walkFn(&FileInfoMock{
@ -2539,7 +2539,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/second/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
@ -2569,7 +2569,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
Convey("Trigger cache errors", t, func() {
storageDriverMockIfBranch := &StorageDriverMock{
StatFn: func(ctx context.Context, path string) (driver.FileInfo, error) {
if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) {
if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) {
return &FileInfoMock{
SizeFn: func() int64 {
return int64(0)
@ -2589,7 +2589,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
_ = walkFn(&FileInfoMock{
@ -2597,7 +2597,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/second/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
@ -2627,7 +2627,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
_ = walkFn(&FileInfoMock{
@ -2635,7 +2635,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PathFn: func() string {
return fmt.Sprintf("path/to/second/%s", validDigest.Encoded())
return fmt.Sprintf("path/to/second/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded())
},
})
@ -2668,7 +2668,7 @@ func TestRebuildDedupeMockStoreDriver(t *testing.T) {
return false
},
PutBlobFn: func(digest godigest.Digest, path string) error {
if path == fmt.Sprintf("path/to/%s", validDigest.Encoded()) {
if path == fmt.Sprintf("path/to/%s/%s", validDigest.Algorithm().String(), validDigest.Encoded()) {
return errCache
}

View file

@ -285,6 +285,22 @@ func TestStorageAPIs(t *testing.T) {
So(v, ShouldBeEmpty)
})
Convey("Full blob upload unavailable algorithm", func() {
body := []byte("this blob will be hashed using an unavailable hashing algorithm")
buf := bytes.NewBuffer(body)
digest := godigest.Digest("md5:8114c3f59ef9dcf737410e0f4b00a154")
upload, n, err := imgStore.FullBlobUpload("test", buf, digest)
So(err, ShouldEqual, godigest.ErrDigestUnsupported)
So(n, ShouldEqual, -1)
So(upload, ShouldEqual, "")
// Check no blobs are returned and there are no errors
// if other paths for different algorithms are missing
digests, err := imgStore.GetAllBlobs("test")
So(err, ShouldBeNil)
So(digests, ShouldBeEmpty)
})
Convey("Full blob upload", func() {
body := []byte("this is a blob")
buf := bytes.NewBuffer(body)
@ -296,6 +312,51 @@ func TestStorageAPIs(t *testing.T) {
err = imgStore.VerifyBlobDigestValue("test", digest)
So(err, ShouldBeNil)
// Check the blob is returned and there are no errors
// if other paths for different algorithms are missing
digests, err := imgStore.GetAllBlobs("test")
So(err, ShouldBeNil)
So(digests, ShouldContain, digest)
So(len(digests), ShouldEqual, 1)
})
Convey("Full blob upload sha512", func() {
body := []byte("this blob will be hashed using sha512")
buf := bytes.NewBuffer(body)
digest := godigest.SHA512.FromBytes(body)
upload, n, err := imgStore.FullBlobUpload("test", buf, digest)
So(err, ShouldBeNil)
So(n, ShouldEqual, len(body))
So(upload, ShouldNotBeEmpty)
// Check the blob is returned and there are no errors
// if other paths for different algorithms are missing
digests, err := imgStore.GetAllBlobs("test")
So(err, ShouldBeNil)
So(digests, ShouldContain, digest)
// imgStore is reused so look for this digest and
// the ones uploaded by previous tests
So(len(digests), ShouldEqual, 2)
})
Convey("Full blob upload sha384", func() {
body := []byte("this blob will be hashed using sha384")
buf := bytes.NewBuffer(body)
digest := godigest.SHA384.FromBytes(body)
upload, n, err := imgStore.FullBlobUpload("test", buf, digest)
So(err, ShouldBeNil)
So(n, ShouldEqual, len(body))
So(upload, ShouldNotBeEmpty)
// Check the blob is returned and there are no errors
// if other paths for different algorithms are missing
digests, err := imgStore.GetAllBlobs("test")
So(err, ShouldBeNil)
So(digests, ShouldContain, digest)
// imgStore is reused so look for this digest and
// the ones uploaded by previous tests
So(len(digests), ShouldEqual, 3)
})
Convey("New blob upload", func() {

View file

@ -60,7 +60,7 @@ type ImageStore interface { //nolint:interfacebloat
RunDedupeBlobs(interval time.Duration, sch *scheduler.Scheduler)
RunDedupeForDigest(ctx context.Context, digest godigest.Digest, dedupe bool, duplicateBlobs []string) error
GetNextDigestWithBlobPaths(repos []string, lastDigests []godigest.Digest) (godigest.Digest, []string, error)
GetAllBlobs(repo string) ([]string, error)
GetAllBlobs(repo string) ([]godigest.Digest, error)
PopulateStorageMetrics(interval time.Duration, sch *scheduler.Scheduler)
VerifyBlobDigestValue(repo string, digest godigest.Digest) error
GetAllDedupeReposCandidates(digest godigest.Digest) ([]string, error)

View file

@ -91,6 +91,7 @@ type Image struct {
Manifest ispec.Manifest
Config ispec.Image
Layers [][]byte
digestAlgorithm godigest.Algorithm
ConfigDescriptor ispec.Descriptor
ManifestDescriptor ispec.Descriptor
@ -108,13 +109,28 @@ func (img *Image) Digest() godigest.Digest {
panic("unreachable: ispec.Manifest should always be marshable")
}
return godigest.FromBytes(blob)
digestAlgorithm := img.digestAlgorithm
if digestAlgorithm == "" {
digestAlgorithm = godigest.Canonical
}
return digestAlgorithm.FromBytes(blob)
}
func (img *Image) DigestStr() string {
return img.Digest().String()
}
func (img *Image) DigestForAlgorithm(digestAlgorithm godigest.Algorithm) godigest.Digest {
blob, err := json.Marshal(img.Manifest)
if err != nil {
panic("unreachable: ispec.Manifest should always be marshable")
}
return digestAlgorithm.FromBytes(blob)
}
func (img *Image) Size() int {
size := img.ConfigDescriptor.Size + img.ManifestDescriptor.Size
@ -167,7 +183,15 @@ type Layer struct {
// specifying the layers of the image.
func CreateImageWith() LayerBuilder {
// set default values here
return &BaseImageBuilder{}
return &BaseImageBuilder{
digestAlgorithm: godigest.Canonical,
}
}
func CreateImageWithDigestAlgorithm(digestAlgorithm godigest.Algorithm) LayerBuilder {
return &BaseImageBuilder{
digestAlgorithm: digestAlgorithm,
}
}
func CreateDefaultImage() Image {
@ -223,6 +247,8 @@ type BaseImageBuilder struct {
annotations map[string]string
subject *ispec.Descriptor
artifactType string
digestAlgorithm godigest.Algorithm
}
func (ib *BaseImageBuilder) Layers(layers []Layer) ConfigBuilder {
@ -236,7 +262,7 @@ func (ib *BaseImageBuilder) LayerBlobs(layers [][]byte) ConfigBuilder {
ib.layers = append(ib.layers, Layer{
Blob: layer,
MediaType: ispec.MediaTypeImageLayerGzip,
Digest: godigest.FromBytes(layer),
Digest: ib.digestAlgorithm.FromBytes(layer),
})
}
@ -267,7 +293,7 @@ func (ib *BaseImageBuilder) RandomLayers(count, size int) ConfigBuilder {
ib.layers = append(ib.layers, Layer{
Blob: layer,
MediaType: ispec.MediaTypeImageLayerGzip,
Digest: godigest.FromBytes(layer),
Digest: ib.digestAlgorithm.FromBytes(layer),
})
}
@ -290,7 +316,7 @@ func (ib *BaseImageBuilder) VulnerableLayers() VulnerableConfigBuilder {
{
Blob: layer,
MediaType: ispec.MediaTypeImageLayerGzip,
Digest: godigest.FromBytes(layer),
Digest: ib.digestAlgorithm.FromBytes(layer),
},
}
@ -309,7 +335,7 @@ func (ib *BaseImageBuilder) ImageConfig(config ispec.Image) ManifestBuilder {
MediaType: ispec.MediaTypeImageConfig,
Size: int64(len(configBlob)),
Data: configBlob,
Digest: godigest.FromBytes(configBlob),
Digest: ib.digestAlgorithm.FromBytes(configBlob),
}
return ib
@ -351,7 +377,7 @@ func (ib *BaseImageBuilder) CustomConfigBlob(configBlob []byte, mediaType string
MediaType: mediaType,
Size: int64(len(configBlob)),
Data: configBlob,
Digest: godigest.FromBytes(configBlob),
Digest: ib.digestAlgorithm.FromBytes(configBlob),
}
return ib
@ -372,7 +398,7 @@ func (ib *BaseImageBuilder) RandomConfig() ManifestBuilder {
ib.configDescriptor = ispec.Descriptor{
MediaType: ispec.MediaTypeImageConfig,
Digest: godigest.FromBytes(configBlob),
Digest: ib.digestAlgorithm.FromBytes(configBlob),
Size: int64(len(configBlob)),
Data: configBlob,
}
@ -390,7 +416,7 @@ func (ib *BaseImageBuilder) DefaultVulnConfig() ManifestBuilder {
vulnConfigDescriptor := ispec.Descriptor{
MediaType: ispec.MediaTypeImageConfig,
Digest: godigest.FromBytes(configBlob),
Digest: ib.digestAlgorithm.FromBytes(configBlob),
Size: int64(len(configBlob)),
Data: configBlob,
}
@ -421,7 +447,7 @@ func (ib *BaseImageBuilder) VulnerableConfig(config ispec.Image) ManifestBuilder
vulnConfigDescriptor := ispec.Descriptor{
MediaType: ispec.MediaTypeImageConfig,
Digest: godigest.FromBytes(configBlob),
Digest: ib.digestAlgorithm.FromBytes(configBlob),
Size: int64(len(configBlob)),
Data: configBlob,
}
@ -446,7 +472,7 @@ func (ib *BaseImageBuilder) RandomVulnConfig() ManifestBuilder {
vulnConfigDescriptor := ispec.Descriptor{
MediaType: ispec.MediaTypeImageConfig,
Digest: godigest.FromBytes(configBlob),
Digest: ib.digestAlgorithm.FromBytes(configBlob),
Size: int64(len(configBlob)),
Data: configBlob,
}
@ -493,6 +519,7 @@ func (ib *BaseImageBuilder) Build() Image {
Subject: ib.subject,
Annotations: ib.annotations,
},
digestAlgorithm: ib.digestAlgorithm,
}
manifestBlob, err := json.Marshal(img.Manifest)
@ -502,7 +529,7 @@ func (ib *BaseImageBuilder) Build() Image {
img.ManifestDescriptor = ispec.Descriptor{
MediaType: ispec.MediaTypeImageManifest,
Digest: godigest.FromBytes(manifestBlob),
Digest: ib.digestAlgorithm.FromBytes(manifestBlob),
Size: int64(len(manifestBlob)),
Data: manifestBlob,
}

View file

@ -13,6 +13,7 @@ import (
type MultiarchImage struct {
Index ispec.Index
Images []Image
digestAlgorithm godigest.Algorithm
IndexDescriptor ispec.Descriptor
}
@ -23,13 +24,28 @@ func (mi *MultiarchImage) Digest() godigest.Digest {
panic("unreachable: ispec.Index should always be marshable")
}
return godigest.FromBytes(indexBlob)
digestAlgorithm := mi.digestAlgorithm
if digestAlgorithm == "" {
digestAlgorithm = godigest.Canonical
}
return digestAlgorithm.FromBytes(indexBlob)
}
func (mi *MultiarchImage) DigestStr() string {
return mi.Digest().String()
}
func (mi *MultiarchImage) DigestForAlgorithm(digestAlgorithm godigest.Algorithm) godigest.Digest {
blob, err := json.Marshal(mi.Index)
if err != nil {
panic("unreachable: ispec.Index should always be marshable")
}
return digestAlgorithm.FromBytes(blob)
}
func (mi MultiarchImage) AsImageMeta() mTypes.ImageMeta {
index := mi.Index
@ -61,7 +77,15 @@ type MultiarchBuilder interface {
}
func CreateMultiarchWith() ImagesBuilder {
return &BaseMultiarchBuilder{}
return &BaseMultiarchBuilder{
digestAlgorithm: godigest.Canonical,
}
}
func CreateMultiarchWithDigestAlgorithm(digestAlgorithm godigest.Algorithm) ImagesBuilder {
return &BaseMultiarchBuilder{
digestAlgorithm: digestAlgorithm,
}
}
func CreateRandomMultiarch() MultiarchImage {
@ -89,6 +113,7 @@ type BaseMultiarchBuilder struct {
subject *ispec.Descriptor
artifactType string
annotations map[string]string
digestAlgorithm godigest.Algorithm
}
func (mb *BaseMultiarchBuilder) Images(images []Image) MultiarchBuilder {
@ -154,11 +179,12 @@ func (mb *BaseMultiarchBuilder) Build() MultiarchImage {
panic("unreachable: ispec.Index should always be marshable")
}
indexDigest := godigest.FromBytes(indexBlob)
indexDigest := mb.digestAlgorithm.FromBytes(indexBlob)
return MultiarchImage{
Index: index,
Images: mb.images,
digestAlgorithm: mb.digestAlgorithm,
IndexDescriptor: ispec.Descriptor{
MediaType: ispec.MediaTypeImageIndex,

View file

@ -21,6 +21,12 @@ var (
)
func UploadImage(img Image, baseURL, repo, ref string) error {
digestAlgorithm := img.digestAlgorithm
if digestAlgorithm == "" {
digestAlgorithm = godigest.Canonical
}
for _, blob := range img.Layers {
resp, err := resty.R().Post(baseURL + "/v2/" + repo + "/blobs/uploads/")
if err != nil {
@ -33,7 +39,7 @@ func UploadImage(img Image, baseURL, repo, ref string) error {
loc := resp.Header().Get("Location")
digest := godigest.FromBytes(blob).String()
digest := digestAlgorithm.FromBytes(blob).String()
resp, err = resty.R().
SetHeader("Content-Length", fmt.Sprintf("%d", len(blob))).
@ -63,7 +69,7 @@ func UploadImage(img Image, baseURL, repo, ref string) error {
}
}
cdigest := godigest.FromBytes(cblob)
cdigest := digestAlgorithm.FromBytes(cblob)
if img.Manifest.Config.MediaType == ispec.MediaTypeEmptyJSON ||
img.Manifest.Config.Digest == ispec.DescriptorEmptyJSON.Digest {
@ -117,14 +123,16 @@ func UploadImage(img Image, baseURL, repo, ref string) error {
return ErrPutBlob
}
if inject.ErrStatusCode(resp.StatusCode()) != http.StatusCreated {
return ErrPutBlob
}
return err
}
func UploadImageWithBasicAuth(img Image, baseURL, repo, ref, user, password string) error {
digestAlgorithm := img.digestAlgorithm
if digestAlgorithm == "" {
digestAlgorithm = godigest.Canonical
}
for _, blob := range img.Layers {
resp, err := resty.R().
SetBasicAuth(user, password).
@ -139,7 +147,7 @@ func UploadImageWithBasicAuth(img Image, baseURL, repo, ref, user, password stri
loc := resp.Header().Get("Location")
digest := godigest.FromBytes(blob).String()
digest := digestAlgorithm.FromBytes(blob).String()
resp, err = resty.R().
SetBasicAuth(user, password).
@ -163,7 +171,7 @@ func UploadImageWithBasicAuth(img Image, baseURL, repo, ref, user, password stri
return err
}
cdigest := godigest.FromBytes(cblob)
cdigest := digestAlgorithm.FromBytes(cblob)
if img.Manifest.Config.MediaType == ispec.MediaTypeEmptyJSON {
cblob = ispec.DescriptorEmptyJSON.Data

View file

@ -18,9 +18,15 @@ func WriteImageToFileSystem(image Image, repoName, ref string, storeController s
return err
}
digestAlgorithm := image.digestAlgorithm
if digestAlgorithm == "" {
digestAlgorithm = godigest.Canonical
}
for _, layerBlob := range image.Layers {
layerReader := bytes.NewReader(layerBlob)
layerDigest := godigest.FromBytes(layerBlob)
layerDigest := digestAlgorithm.FromBytes(layerBlob)
_, _, err = store.FullBlobUpload(repoName, layerReader, layerDigest)
if err != nil {
@ -34,7 +40,7 @@ func WriteImageToFileSystem(image Image, repoName, ref string, storeController s
}
configReader := bytes.NewReader(configBlob)
configDigest := godigest.FromBytes(configBlob)
configDigest := digestAlgorithm.FromBytes(configBlob)
_, _, err = store.FullBlobUpload(repoName, configReader, configDigest)
if err != nil {

View file

@ -51,7 +51,7 @@ type MockedImageStore struct {
RunDedupeForDigestFn func(ctx context.Context, digest godigest.Digest, dedupe bool,
duplicateBlobs []string) error
GetNextDigestWithBlobPathsFn func(repos []string, lastDigests []godigest.Digest) (godigest.Digest, []string, error)
GetAllBlobsFn func(repo string) ([]string, error)
GetAllBlobsFn func(repo string) ([]godigest.Digest, error)
CleanupRepoFn func(repo string, blobs []godigest.Digest, removeRepo bool) (int, error)
PutIndexContentFn func(repo string, index ispec.Index) error
PopulateStorageMetricsFn func(interval time.Duration, sch *scheduler.Scheduler)
@ -165,12 +165,12 @@ func (is MockedImageStore) GetImageTags(name string) ([]string, error) {
return []string{}, nil
}
func (is MockedImageStore) GetAllBlobs(repo string) ([]string, error) {
func (is MockedImageStore) GetAllBlobs(repo string) ([]godigest.Digest, error) {
if is.GetAllBlobsFn != nil {
return is.GetAllBlobsFn(repo)
}
return []string{}, nil
return []godigest.Digest{}, nil
}
func (is MockedImageStore) DeleteImageManifest(name string, reference string, detectCollision bool) error {