mirror of
https://github.com/project-zot/zot.git
synced 2024-12-30 22:34:13 -05:00
feat(api): repair corrupted blobs when pushed again (#1927)
CheckBlob() returns ErrBlobNotFound on corrupted blobs closes #1922 Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
parent
1675f30d4a
commit
7ab2032a21
10 changed files with 467 additions and 51 deletions
|
@ -809,6 +809,94 @@ func GetOrasManifestByDigest(imgStore storageTypes.ImageStore, repo string, dige
|
||||||
return artManifest, nil
|
return artManifest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get blob descriptor from it's manifest contents, if blob can not be found it will return error.
|
||||||
|
func GetBlobDescriptorFromRepo(imgStore storageTypes.ImageStore, repo string, blobDigest godigest.Digest,
|
||||||
|
log zlog.Logger,
|
||||||
|
) (ispec.Descriptor, error) {
|
||||||
|
index, err := GetIndex(imgStore, repo, log)
|
||||||
|
if err != nil {
|
||||||
|
return ispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return GetBlobDescriptorFromIndex(imgStore, index, repo, blobDigest, log)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetBlobDescriptorFromIndex(imgStore storageTypes.ImageStore, index ispec.Index, repo string,
|
||||||
|
blobDigest godigest.Digest, log zlog.Logger,
|
||||||
|
) (ispec.Descriptor, error) {
|
||||||
|
for _, desc := range index.Manifests {
|
||||||
|
if desc.Digest == blobDigest {
|
||||||
|
return desc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch desc.MediaType {
|
||||||
|
case ispec.MediaTypeImageManifest:
|
||||||
|
if foundDescriptor, err := getBlobDescriptorFromManifest(imgStore, repo, blobDigest, desc, log); err == nil {
|
||||||
|
return foundDescriptor, nil
|
||||||
|
}
|
||||||
|
case ispec.MediaTypeImageIndex:
|
||||||
|
indexImage, err := GetImageIndex(imgStore, repo, desc.Digest, log)
|
||||||
|
if err != nil {
|
||||||
|
return ispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if foundDescriptor, err := GetBlobDescriptorFromIndex(imgStore, indexImage, repo, blobDigest, log); err == nil {
|
||||||
|
return foundDescriptor, nil
|
||||||
|
}
|
||||||
|
case oras.MediaTypeArtifactManifest:
|
||||||
|
if foundDescriptor, err := getBlobDescriptorFromORASManifest(imgStore, repo, blobDigest, desc, log); err == nil {
|
||||||
|
return foundDescriptor, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ispec.Descriptor{}, zerr.ErrBlobNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBlobDescriptorFromManifest(imgStore storageTypes.ImageStore, repo string, blobDigest godigest.Digest,
|
||||||
|
desc ispec.Descriptor, log zlog.Logger,
|
||||||
|
) (ispec.Descriptor, error) {
|
||||||
|
manifest, err := GetImageManifest(imgStore, repo, desc.Digest, log)
|
||||||
|
if err != nil {
|
||||||
|
return ispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if manifest.Config.Digest == blobDigest {
|
||||||
|
return manifest.Config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, layer := range manifest.Layers {
|
||||||
|
if layer.Digest == blobDigest {
|
||||||
|
return layer, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ispec.Descriptor{}, zerr.ErrBlobNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBlobDescriptorFromORASManifest(imgStore storageTypes.ImageStore, repo string, blobDigest godigest.Digest,
|
||||||
|
desc ispec.Descriptor, log zlog.Logger,
|
||||||
|
) (ispec.Descriptor, error) {
|
||||||
|
manifest, err := GetOrasManifestByDigest(imgStore, repo, desc.Digest, log)
|
||||||
|
if err != nil {
|
||||||
|
return ispec.Descriptor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, layer := range manifest.Blobs {
|
||||||
|
if layer.Digest == blobDigest {
|
||||||
|
return ispec.Descriptor{
|
||||||
|
MediaType: layer.MediaType,
|
||||||
|
Size: layer.Size,
|
||||||
|
Digest: layer.Digest,
|
||||||
|
ArtifactType: layer.ArtifactType,
|
||||||
|
Annotations: layer.Annotations,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ispec.Descriptor{}, zerr.ErrBlobNotFound
|
||||||
|
}
|
||||||
|
|
||||||
func IsSupportedMediaType(mediaType string) bool {
|
func IsSupportedMediaType(mediaType string) bool {
|
||||||
return mediaType == ispec.MediaTypeImageIndex ||
|
return mediaType == ispec.MediaTypeImageIndex ||
|
||||||
mediaType == ispec.MediaTypeImageManifest ||
|
mediaType == ispec.MediaTypeImageManifest ||
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"zotregistry.io/zot/pkg/storage"
|
"zotregistry.io/zot/pkg/storage"
|
||||||
"zotregistry.io/zot/pkg/storage/cache"
|
"zotregistry.io/zot/pkg/storage/cache"
|
||||||
common "zotregistry.io/zot/pkg/storage/common"
|
common "zotregistry.io/zot/pkg/storage/common"
|
||||||
|
"zotregistry.io/zot/pkg/storage/imagestore"
|
||||||
"zotregistry.io/zot/pkg/storage/local"
|
"zotregistry.io/zot/pkg/storage/local"
|
||||||
. "zotregistry.io/zot/pkg/test/image-utils"
|
. "zotregistry.io/zot/pkg/test/image-utils"
|
||||||
"zotregistry.io/zot/pkg/test/mocks"
|
"zotregistry.io/zot/pkg/test/mocks"
|
||||||
|
@ -420,6 +421,87 @@ func TestGetImageIndexErrors(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetBlobDescriptorFromRepo(t *testing.T) {
|
||||||
|
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||||
|
metrics := monitoring.NewMetricsServer(false, log)
|
||||||
|
|
||||||
|
tdir := t.TempDir()
|
||||||
|
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
|
||||||
|
RootDir: tdir,
|
||||||
|
Name: "cache",
|
||||||
|
UseRelPaths: true,
|
||||||
|
}, log)
|
||||||
|
|
||||||
|
driver := local.New(true)
|
||||||
|
imgStore := imagestore.NewImageStore(tdir, tdir, true,
|
||||||
|
true, log, metrics, nil, driver, cacheDriver)
|
||||||
|
|
||||||
|
repoName := "zot-test"
|
||||||
|
|
||||||
|
Convey("Test error paths", t, func() {
|
||||||
|
storeController := storage.StoreController{DefaultStore: imgStore}
|
||||||
|
|
||||||
|
image := CreateRandomMultiarch()
|
||||||
|
|
||||||
|
tag := "index"
|
||||||
|
|
||||||
|
err := WriteMultiArchImageToFileSystem(image, repoName, tag, storeController)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
blob := image.Images[0].Layers[0]
|
||||||
|
blobDigest := godigest.FromBytes(blob)
|
||||||
|
blobSize := len(blob)
|
||||||
|
|
||||||
|
desc, err := common.GetBlobDescriptorFromIndex(imgStore, ispec.Index{Manifests: []ispec.Descriptor{
|
||||||
|
{
|
||||||
|
Digest: image.Digest(),
|
||||||
|
MediaType: ispec.MediaTypeImageIndex,
|
||||||
|
},
|
||||||
|
}}, repoName, blobDigest, log)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(desc.Digest, ShouldEqual, blobDigest)
|
||||||
|
So(desc.Size, ShouldEqual, blobSize)
|
||||||
|
|
||||||
|
desc, err = common.GetBlobDescriptorFromRepo(imgStore, repoName, blobDigest, log)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(desc.Digest, ShouldEqual, blobDigest)
|
||||||
|
So(desc.Size, ShouldEqual, blobSize)
|
||||||
|
|
||||||
|
indexBlobPath := imgStore.BlobPath(repoName, image.Digest())
|
||||||
|
err = os.Chmod(indexBlobPath, 0o000)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err = os.Chmod(indexBlobPath, 0o644)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = common.GetBlobDescriptorFromIndex(imgStore, ispec.Index{Manifests: []ispec.Descriptor{
|
||||||
|
{
|
||||||
|
Digest: image.Digest(),
|
||||||
|
MediaType: ispec.MediaTypeImageIndex,
|
||||||
|
},
|
||||||
|
}}, repoName, blobDigest, log)
|
||||||
|
So(err, ShouldNotBeNil)
|
||||||
|
|
||||||
|
manifestDigest := image.Images[0].Digest()
|
||||||
|
manifestBlobPath := imgStore.BlobPath(repoName, manifestDigest)
|
||||||
|
err = os.Chmod(manifestBlobPath, 0o000)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err = os.Chmod(manifestBlobPath, 0o644)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = common.GetBlobDescriptorFromRepo(imgStore, repoName, blobDigest, log)
|
||||||
|
So(err, ShouldNotBeNil)
|
||||||
|
|
||||||
|
_, err = common.GetBlobDescriptorFromRepo(imgStore, repoName, manifestDigest, log)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestIsSignature(t *testing.T) {
|
func TestIsSignature(t *testing.T) {
|
||||||
Convey("Unknown media type", t, func(c C) {
|
Convey("Unknown media type", t, func(c C) {
|
||||||
isSingature := common.IsSignature(ispec.Descriptor{
|
isSingature := common.IsSignature(ispec.Descriptor{
|
||||||
|
|
|
@ -866,20 +866,11 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fileReader, err := is.storeDriver.Reader(src, 0)
|
srcDigest, err := getBlobDigest(is, src)
|
||||||
if err != nil {
|
|
||||||
is.log.Error().Err(err).Str("blob", src).Msg("failed to open file")
|
|
||||||
|
|
||||||
return zerr.ErrUploadNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
defer fileReader.Close()
|
|
||||||
|
|
||||||
srcDigest, err := godigest.FromReader(fileReader)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob")
|
is.log.Error().Err(err).Str("blob", src).Msg("failed to open blob")
|
||||||
|
|
||||||
return zerr.ErrBadBlobDigest
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if srcDigest != dstDigest {
|
if srcDigest != dstDigest {
|
||||||
|
@ -906,7 +897,7 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig
|
||||||
defer is.Unlock(&lockLatency)
|
defer is.Unlock(&lockLatency)
|
||||||
|
|
||||||
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
|
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
|
||||||
err = is.DedupeBlob(src, dstDigest, dst)
|
err = is.DedupeBlob(src, dstDigest, repo, dst)
|
||||||
if err := inject.Error(err); err != nil {
|
if err := inject.Error(err); err != nil {
|
||||||
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
||||||
Str("dst", dst).Msg("unable to dedupe blob")
|
Str("dst", dst).Msg("unable to dedupe blob")
|
||||||
|
@ -985,7 +976,7 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi
|
||||||
dst := is.BlobPath(repo, dstDigest)
|
dst := is.BlobPath(repo, dstDigest)
|
||||||
|
|
||||||
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
|
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
|
||||||
if err := is.DedupeBlob(src, dstDigest, dst); err != nil {
|
if err := is.DedupeBlob(src, dstDigest, repo, dst); err != nil {
|
||||||
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
||||||
Str("dst", dst).Msg("unable to dedupe blob")
|
Str("dst", dst).Msg("unable to dedupe blob")
|
||||||
|
|
||||||
|
@ -1003,7 +994,7 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi
|
||||||
return uuid, int64(nbytes), nil
|
return uuid, int64(nbytes), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (is *ImageStore) DedupeBlob(src string, dstDigest godigest.Digest, dst string) error {
|
func (is *ImageStore) DedupeBlob(src string, dstDigest godigest.Digest, dstRepo string, dst string) error {
|
||||||
retry:
|
retry:
|
||||||
is.log.Debug().Str("src", src).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: enter")
|
is.log.Debug().Str("src", src).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: enter")
|
||||||
|
|
||||||
|
@ -1033,12 +1024,11 @@ retry:
|
||||||
} else {
|
} else {
|
||||||
// cache record exists, but due to GC and upgrades from older versions,
|
// cache record exists, but due to GC and upgrades from older versions,
|
||||||
// disk content and cache records may go out of sync
|
// disk content and cache records may go out of sync
|
||||||
|
|
||||||
if is.cache.UsesRelativePaths() {
|
if is.cache.UsesRelativePaths() {
|
||||||
dstRecord = path.Join(is.rootDir, dstRecord)
|
dstRecord = path.Join(is.rootDir, dstRecord)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := is.storeDriver.Stat(dstRecord)
|
blobInfo, err := is.storeDriver.Stat(dstRecord)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat")
|
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat")
|
||||||
// the actual blob on disk may have been removed by GC, so sync the cache
|
// the actual blob on disk may have been removed by GC, so sync the cache
|
||||||
|
@ -1066,6 +1056,22 @@ retry:
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// if it's same file then it was already uploaded, check if blob is corrupted
|
||||||
|
if desc, err := common.GetBlobDescriptorFromRepo(is, dstRepo, dstDigest, is.log); err == nil {
|
||||||
|
// blob corrupted, replace content
|
||||||
|
if desc.Size != blobInfo.Size() {
|
||||||
|
if err := is.storeDriver.Move(src, dst); err != nil {
|
||||||
|
is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("dedupe: unable to rename blob")
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
is.log.Debug().Str("src", src).Msg("dedupe: remove")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove temp blobupload
|
// remove temp blobupload
|
||||||
|
@ -1134,9 +1140,20 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6
|
||||||
|
|
||||||
binfo, err := is.storeDriver.Stat(blobPath)
|
binfo, err := is.storeDriver.Stat(blobPath)
|
||||||
if err == nil && binfo.Size() > 0 {
|
if err == nil && binfo.Size() > 0 {
|
||||||
is.log.Debug().Str("blob path", blobPath).Msg("blob path found")
|
// try to find blob size in blob descriptors, if blob can not be found
|
||||||
|
desc, err := common.GetBlobDescriptorFromRepo(is, repo, digest, is.log)
|
||||||
|
if err != nil || desc.Size == binfo.Size() {
|
||||||
|
// blob not found in descriptors, can not compare, just return
|
||||||
|
is.log.Debug().Str("blob path", blobPath).Msg("blob path found")
|
||||||
|
|
||||||
return true, binfo.Size(), nil
|
return true, binfo.Size(), nil //nolint: nilerr
|
||||||
|
}
|
||||||
|
|
||||||
|
if desc.Size != binfo.Size() {
|
||||||
|
is.log.Debug().Str("blob path", blobPath).Msg("blob path found, but it's corrupted")
|
||||||
|
|
||||||
|
return false, -1, zerr.ErrBlobNotFound
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// otherwise is a 'deduped' blob (empty file)
|
// otherwise is a 'deduped' blob (empty file)
|
||||||
|
|
||||||
|
@ -1634,6 +1651,22 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getBlobDigest(imgStore *ImageStore, path string) (godigest.Digest, error) {
|
||||||
|
fileReader, err := imgStore.storeDriver.Reader(path, 0)
|
||||||
|
if err != nil {
|
||||||
|
return "", zerr.ErrUploadNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
defer fileReader.Close()
|
||||||
|
|
||||||
|
digest, err := godigest.FromReader(fileReader)
|
||||||
|
if err != nil {
|
||||||
|
return "", zerr.ErrBadBlobDigest
|
||||||
|
}
|
||||||
|
|
||||||
|
return digest, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (is *ImageStore) GetAllBlobs(repo string) ([]string, error) {
|
func (is *ImageStore) GetAllBlobs(repo string) ([]string, error) {
|
||||||
dir := path.Join(is.rootDir, repo, "blobs", "sha256")
|
dir := path.Join(is.rootDir, repo, "blobs", "sha256")
|
||||||
|
|
||||||
|
|
|
@ -818,7 +818,7 @@ func FuzzDedupeBlob(f *testing.F) {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = imgStore.DedupeBlob(src, blobDigest, dst)
|
err = imgStore.DedupeBlob(src, blobDigest, "repoName", dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
@ -1514,7 +1514,7 @@ func TestDedupe(t *testing.T) {
|
||||||
Convey("Dedupe", t, func(c C) {
|
Convey("Dedupe", t, func(c C) {
|
||||||
Convey("Nil ImageStore", func() {
|
Convey("Nil ImageStore", func() {
|
||||||
var is storageTypes.ImageStore
|
var is storageTypes.ImageStore
|
||||||
So(func() { _ = is.DedupeBlob("", "", "") }, ShouldPanic)
|
So(func() { _ = is.DedupeBlob("", "", "", "") }, ShouldPanic)
|
||||||
})
|
})
|
||||||
|
|
||||||
Convey("Valid ImageStore", func() {
|
Convey("Valid ImageStore", func() {
|
||||||
|
@ -1530,7 +1530,7 @@ func TestDedupe(t *testing.T) {
|
||||||
|
|
||||||
il := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
il := local.NewImageStore(dir, true, true, log, metrics, nil, cacheDriver)
|
||||||
|
|
||||||
So(il.DedupeBlob("", "", ""), ShouldNotBeNil)
|
So(il.DedupeBlob("", "", "", ""), ShouldNotBeNil)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -3552,7 +3552,7 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
|
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
|
||||||
|
|
||||||
// trigger unable to insert blob record
|
// trigger unable to insert blob record
|
||||||
err := imgStore.DedupeBlob("", digest, "")
|
err := imgStore.DedupeBlob("", digest, "", "")
|
||||||
So(err, ShouldNotBeNil)
|
So(err, ShouldNotBeNil)
|
||||||
|
|
||||||
imgStore = createMockStorage(testDir, tdir, true, &StorageDriverMock{
|
imgStore = createMockStorage(testDir, tdir, true, &StorageDriverMock{
|
||||||
|
@ -3565,11 +3565,11 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
// trigger unable to rename blob
|
// trigger unable to rename blob
|
||||||
err = imgStore.DedupeBlob("", digest, "dst")
|
err = imgStore.DedupeBlob("", digest, "", "dst")
|
||||||
So(err, ShouldNotBeNil)
|
So(err, ShouldNotBeNil)
|
||||||
|
|
||||||
// trigger retry
|
// trigger retry
|
||||||
err = imgStore.DedupeBlob("", digest, "dst")
|
err = imgStore.DedupeBlob("", digest, "", "dst")
|
||||||
So(err, ShouldNotBeNil)
|
So(err, ShouldNotBeNil)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -3586,11 +3586,11 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
|
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
|
||||||
err := imgStore.DedupeBlob("", digest, "dst")
|
err := imgStore.DedupeBlob("", digest, "", "dst")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
// error will be triggered in driver.SameFile()
|
// error will be triggered in driver.SameFile()
|
||||||
err = imgStore.DedupeBlob("", digest, "dst2")
|
err = imgStore.DedupeBlob("", digest, "", "dst2")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -3606,10 +3606,10 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
|
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
|
||||||
err := imgStore.DedupeBlob("", digest, "dst")
|
err := imgStore.DedupeBlob("", digest, "", "dst")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
err = imgStore.DedupeBlob("", digest, "dst2")
|
err = imgStore.DedupeBlob("", digest, "", "dst2")
|
||||||
So(err, ShouldNotBeNil)
|
So(err, ShouldNotBeNil)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -3622,10 +3622,10 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
|
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
|
||||||
err := imgStore.DedupeBlob("", digest, "dst")
|
err := imgStore.DedupeBlob("", digest, "", "dst")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
err = imgStore.DedupeBlob("", digest, "")
|
err = imgStore.DedupeBlob("", digest, "", "")
|
||||||
So(err, ShouldNotBeNil)
|
So(err, ShouldNotBeNil)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -3641,10 +3641,10 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
|
digest := godigest.NewDigestFromEncoded(godigest.SHA256, "digest")
|
||||||
err := imgStore.DedupeBlob("", digest, "dst")
|
err := imgStore.DedupeBlob("", digest, "", "dst")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
err = imgStore.DedupeBlob("", digest, "dst")
|
err = imgStore.DedupeBlob("", digest, "", "dst")
|
||||||
So(err, ShouldNotBeNil)
|
So(err, ShouldNotBeNil)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -3665,7 +3665,7 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256,
|
digest := godigest.NewDigestFromEncoded(godigest.SHA256,
|
||||||
"7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc")
|
"7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc")
|
||||||
|
|
||||||
err := imgStore.DedupeBlob("repo", digest, "dst")
|
err := imgStore.DedupeBlob("repo", digest, "", "dst")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
_, _, err = imgStore.CheckBlob("repo", digest)
|
_, _, err = imgStore.CheckBlob("repo", digest)
|
||||||
|
@ -3686,7 +3686,7 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256,
|
digest := godigest.NewDigestFromEncoded(godigest.SHA256,
|
||||||
"7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc")
|
"7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc")
|
||||||
|
|
||||||
err := imgStore.DedupeBlob("repo", digest, "dst")
|
err := imgStore.DedupeBlob("repo", digest, "", "dst")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
_, _, err = imgStore.CheckBlob("repo", digest)
|
_, _, err = imgStore.CheckBlob("repo", digest)
|
||||||
|
@ -3704,7 +3704,7 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256,
|
digest := godigest.NewDigestFromEncoded(godigest.SHA256,
|
||||||
"7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc")
|
"7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc")
|
||||||
|
|
||||||
err := imgStore.DedupeBlob("repo", digest, "dst")
|
err := imgStore.DedupeBlob("repo", digest, "", "dst")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
_, _, err = imgStore.CheckBlob("repo", digest)
|
_, _, err = imgStore.CheckBlob("repo", digest)
|
||||||
|
@ -3719,10 +3719,10 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256,
|
digest := godigest.NewDigestFromEncoded(godigest.SHA256,
|
||||||
"7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc")
|
"7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc")
|
||||||
|
|
||||||
err := imgStore.DedupeBlob("/src/dst", digest, "/repo1/dst1")
|
err := imgStore.DedupeBlob("/src/dst", digest, "", "/repo1/dst1")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
err = imgStore.DedupeBlob("/src/dst", digest, "/repo2/dst2")
|
err = imgStore.DedupeBlob("/src/dst", digest, "", "/repo2/dst2")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
// copy cache db to the new imagestore
|
// copy cache db to the new imagestore
|
||||||
|
@ -3767,10 +3767,10 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
digest := godigest.NewDigestFromEncoded(godigest.SHA256,
|
digest := godigest.NewDigestFromEncoded(godigest.SHA256,
|
||||||
"7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc")
|
"7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc")
|
||||||
|
|
||||||
err := imgStore.DedupeBlob("/src/dst", digest, "/repo1/dst1")
|
err := imgStore.DedupeBlob("/src/dst", digest, "", "/repo1/dst1")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
err = imgStore.DedupeBlob("/src/dst", digest, "/repo2/dst2")
|
err = imgStore.DedupeBlob("/src/dst", digest, "", "/repo2/dst2")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
// copy cache db to the new imagestore
|
// copy cache db to the new imagestore
|
||||||
|
@ -3871,7 +3871,7 @@ func TestS3DedupeErr(t *testing.T) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
err := imgStore.DedupeBlob("repo", digest, blobPath)
|
err := imgStore.DedupeBlob("repo", digest, "", blobPath)
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
_, _, err = imgStore.CheckBlob("repo2", digest)
|
_, _, err = imgStore.CheckBlob("repo2", digest)
|
||||||
|
@ -3922,11 +3922,11 @@ func TestInjectDedupe(t *testing.T) {
|
||||||
return &FileInfoMock{}, errS3
|
return &FileInfoMock{}, errS3
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
err := imgStore.DedupeBlob("blob", "digest", "newblob")
|
err := imgStore.DedupeBlob("blob", "digest", "", "newblob")
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
injected := inject.InjectFailure(0)
|
injected := inject.InjectFailure(0)
|
||||||
err = imgStore.DedupeBlob("blob", "digest", "newblob")
|
err = imgStore.DedupeBlob("blob", "digest", "", "newblob")
|
||||||
if injected {
|
if injected {
|
||||||
So(err, ShouldNotBeNil)
|
So(err, ShouldNotBeNil)
|
||||||
} else {
|
} else {
|
||||||
|
@ -3934,7 +3934,7 @@ func TestInjectDedupe(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
injected = inject.InjectFailure(1)
|
injected = inject.InjectFailure(1)
|
||||||
err = imgStore.DedupeBlob("blob", "digest", "newblob")
|
err = imgStore.DedupeBlob("blob", "digest", "", "newblob")
|
||||||
if injected {
|
if injected {
|
||||||
So(err, ShouldNotBeNil)
|
So(err, ShouldNotBeNil)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1156,6 +1156,219 @@ func TestDeleteBlobsInUse(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReuploadCorruptedBlob(t *testing.T) {
|
||||||
|
for _, testcase := range testCases {
|
||||||
|
testcase := testcase
|
||||||
|
t.Run(testcase.testCaseName, func(t *testing.T) {
|
||||||
|
var imgStore storageTypes.ImageStore
|
||||||
|
var testDir, tdir string
|
||||||
|
var store driver.StorageDriver
|
||||||
|
var driver storageTypes.Driver
|
||||||
|
|
||||||
|
log := log.Logger{Logger: zerolog.New(os.Stdout)}
|
||||||
|
metrics := monitoring.NewMetricsServer(false, log)
|
||||||
|
|
||||||
|
if testcase.storageType == storageConstants.S3StorageDriverName {
|
||||||
|
tskip.SkipS3(t)
|
||||||
|
|
||||||
|
uuid, err := guuid.NewV4()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testDir = path.Join("/oci-repo-test", uuid.String())
|
||||||
|
tdir = t.TempDir()
|
||||||
|
|
||||||
|
store, imgStore, _ = createObjectsStore(testDir, tdir)
|
||||||
|
driver = s3.New(store)
|
||||||
|
defer cleanupStorage(store, testDir)
|
||||||
|
} else {
|
||||||
|
tdir = t.TempDir()
|
||||||
|
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
|
||||||
|
RootDir: tdir,
|
||||||
|
Name: "cache",
|
||||||
|
UseRelPaths: true,
|
||||||
|
}, log)
|
||||||
|
driver = local.New(true)
|
||||||
|
imgStore = imagestore.NewImageStore(tdir, tdir, true,
|
||||||
|
true, log, metrics, nil, driver, cacheDriver)
|
||||||
|
}
|
||||||
|
|
||||||
|
Convey("Test errors paths", t, func() {
|
||||||
|
storeController := storage.StoreController{DefaultStore: imgStore}
|
||||||
|
|
||||||
|
image := CreateRandomImage()
|
||||||
|
|
||||||
|
err := WriteImageToFileSystem(image, repoName, tag, storeController)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
})
|
||||||
|
|
||||||
|
Convey("Test reupload repair corrupted image", t, func() {
|
||||||
|
storeController := storage.StoreController{DefaultStore: imgStore}
|
||||||
|
|
||||||
|
image := CreateRandomImage()
|
||||||
|
|
||||||
|
err := WriteImageToFileSystem(image, repoName, tag, storeController)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
blob := image.Layers[0]
|
||||||
|
blobDigest := godigest.FromBytes(blob)
|
||||||
|
blobSize := len(blob)
|
||||||
|
blobPath := imgStore.BlobPath(repoName, blobDigest)
|
||||||
|
|
||||||
|
ok, size, err := imgStore.CheckBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeTrue)
|
||||||
|
So(size, ShouldEqual, blobSize)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
_, err = driver.WriteFile(blobPath, []byte("corrupted"))
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
ok, size, err = imgStore.CheckBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeFalse)
|
||||||
|
So(size, ShouldNotEqual, blobSize)
|
||||||
|
So(err, ShouldEqual, zerr.ErrBlobNotFound)
|
||||||
|
|
||||||
|
err = WriteImageToFileSystem(image, repoName, tag, storeController)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
ok, size, _, err = imgStore.StatBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeTrue)
|
||||||
|
So(blobSize, ShouldEqual, size)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
ok, size, err = imgStore.CheckBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeTrue)
|
||||||
|
So(size, ShouldEqual, blobSize)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
})
|
||||||
|
|
||||||
|
Convey("Test reupload repair corrupted image index", t, func() {
|
||||||
|
storeController := storage.StoreController{DefaultStore: imgStore}
|
||||||
|
|
||||||
|
image := CreateRandomMultiarch()
|
||||||
|
|
||||||
|
tag := "index"
|
||||||
|
|
||||||
|
err := WriteMultiArchImageToFileSystem(image, repoName, tag, storeController)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
blob := image.Images[0].Layers[0]
|
||||||
|
blobDigest := godigest.FromBytes(blob)
|
||||||
|
blobSize := len(blob)
|
||||||
|
blobPath := imgStore.BlobPath(repoName, blobDigest)
|
||||||
|
|
||||||
|
ok, size, err := imgStore.CheckBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeTrue)
|
||||||
|
So(size, ShouldEqual, blobSize)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
_, err = driver.WriteFile(blobPath, []byte("corrupted"))
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
ok, size, err = imgStore.CheckBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeFalse)
|
||||||
|
So(size, ShouldNotEqual, blobSize)
|
||||||
|
So(err, ShouldEqual, zerr.ErrBlobNotFound)
|
||||||
|
|
||||||
|
err = WriteMultiArchImageToFileSystem(image, repoName, tag, storeController)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
ok, size, _, err = imgStore.StatBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeTrue)
|
||||||
|
So(blobSize, ShouldEqual, size)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
ok, size, err = imgStore.CheckBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeTrue)
|
||||||
|
So(size, ShouldEqual, blobSize)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
})
|
||||||
|
|
||||||
|
Convey("Test reupload repair corrupted oras artifact", t, func() {
|
||||||
|
storeController := storage.StoreController{DefaultStore: imgStore}
|
||||||
|
|
||||||
|
image := CreateRandomImage()
|
||||||
|
|
||||||
|
tag := "oras-artifact"
|
||||||
|
err := WriteImageToFileSystem(image, repoName, tag, storeController)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
body := []byte("this is a blob")
|
||||||
|
blobDigest := godigest.FromBytes(body)
|
||||||
|
blobPath := imgStore.BlobPath(repoName, blobDigest)
|
||||||
|
buf := bytes.NewBuffer(body)
|
||||||
|
blobSize := int64(buf.Len())
|
||||||
|
|
||||||
|
_, size, err := imgStore.FullBlobUpload(repoName, buf, blobDigest)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(size, ShouldEqual, blobSize)
|
||||||
|
|
||||||
|
artifactManifest := artifactspec.Manifest{}
|
||||||
|
artifactType := "signature"
|
||||||
|
artifactManifest.ArtifactType = artifactType
|
||||||
|
artifactManifest.Subject = &artifactspec.Descriptor{
|
||||||
|
MediaType: ispec.MediaTypeImageManifest,
|
||||||
|
Digest: image.Digest(),
|
||||||
|
Size: image.ManifestDescriptor.Size,
|
||||||
|
}
|
||||||
|
|
||||||
|
artifactManifest.Blobs = []artifactspec.Descriptor{
|
||||||
|
{
|
||||||
|
Digest: blobDigest,
|
||||||
|
Size: blobSize,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
manBuf, err := json.Marshal(artifactManifest)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
manBufLen := len(manBuf)
|
||||||
|
manDigest := godigest.FromBytes(manBuf)
|
||||||
|
|
||||||
|
_, _, err = imgStore.PutImageManifest(repoName, manDigest.String(), artifactspec.MediaTypeArtifactManifest, manBuf)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
descriptors, err := imgStore.GetOrasReferrers(repoName, image.Digest(), artifactType)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(descriptors, ShouldNotBeEmpty)
|
||||||
|
So(descriptors[0].ArtifactType, ShouldEqual, artifactType)
|
||||||
|
So(descriptors[0].MediaType, ShouldEqual, artifactspec.MediaTypeArtifactManifest)
|
||||||
|
So(descriptors[0].Size, ShouldEqual, manBufLen)
|
||||||
|
So(descriptors[0].Digest, ShouldEqual, manDigest)
|
||||||
|
|
||||||
|
ok, size, err := imgStore.CheckBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeTrue)
|
||||||
|
So(size, ShouldEqual, blobSize)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
_, err = driver.WriteFile(blobPath, []byte("corrupted"))
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
ok, size, err = imgStore.CheckBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeFalse)
|
||||||
|
So(size, ShouldNotEqual, blobSize)
|
||||||
|
So(err, ShouldEqual, zerr.ErrBlobNotFound)
|
||||||
|
|
||||||
|
buf = bytes.NewBuffer(body)
|
||||||
|
_, size, err = imgStore.FullBlobUpload(repoName, buf, blobDigest)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
So(size, ShouldEqual, blobSize)
|
||||||
|
|
||||||
|
ok, size, _, err = imgStore.StatBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeTrue)
|
||||||
|
So(blobSize, ShouldEqual, size)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
|
ok, size, err = imgStore.CheckBlob(repoName, blobDigest)
|
||||||
|
So(ok, ShouldBeTrue)
|
||||||
|
So(size, ShouldEqual, blobSize)
|
||||||
|
So(err, ShouldBeNil)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestStorageHandler(t *testing.T) {
|
func TestStorageHandler(t *testing.T) {
|
||||||
for _, testcase := range testCases {
|
for _, testcase := range testCases {
|
||||||
testcase := testcase
|
testcase := testcase
|
||||||
|
|
|
@ -42,7 +42,7 @@ type ImageStore interface { //nolint:interfacebloat
|
||||||
BlobUploadInfo(repo, uuid string) (int64, error)
|
BlobUploadInfo(repo, uuid string) (int64, error)
|
||||||
FinishBlobUpload(repo, uuid string, body io.Reader, digest godigest.Digest) error
|
FinishBlobUpload(repo, uuid string, body io.Reader, digest godigest.Digest) error
|
||||||
FullBlobUpload(repo string, body io.Reader, digest godigest.Digest) (string, int64, error)
|
FullBlobUpload(repo string, body io.Reader, digest godigest.Digest) (string, int64, error)
|
||||||
DedupeBlob(src string, dstDigest godigest.Digest, dst string) error
|
DedupeBlob(src string, dstDigest godigest.Digest, dstRepo, dst string) error
|
||||||
DeleteBlobUpload(repo, uuid string) error
|
DeleteBlobUpload(repo, uuid string) error
|
||||||
BlobPath(repo string, digest godigest.Digest) string
|
BlobPath(repo string, digest godigest.Digest) string
|
||||||
CheckBlob(repo string, digest godigest.Digest) (bool, int64, error)
|
CheckBlob(repo string, digest godigest.Digest) (bool, int64, error)
|
||||||
|
|
|
@ -32,7 +32,7 @@ type MockedImageStore struct {
|
||||||
PutBlobChunkFn func(repo string, uuid string, from int64, to int64, body io.Reader) (int64, error)
|
PutBlobChunkFn func(repo string, uuid string, from int64, to int64, body io.Reader) (int64, error)
|
||||||
FinishBlobUploadFn func(repo string, uuid string, body io.Reader, digest godigest.Digest) error
|
FinishBlobUploadFn func(repo string, uuid string, body io.Reader, digest godigest.Digest) error
|
||||||
FullBlobUploadFn func(repo string, body io.Reader, digest godigest.Digest) (string, int64, error)
|
FullBlobUploadFn func(repo string, body io.Reader, digest godigest.Digest) (string, int64, error)
|
||||||
DedupeBlobFn func(src string, dstDigest godigest.Digest, dst string) error
|
DedupeBlobFn func(src string, dstDigest godigest.Digest, dstRepo, dst string) error
|
||||||
DeleteBlobUploadFn func(repo string, uuid string) error
|
DeleteBlobUploadFn func(repo string, uuid string) error
|
||||||
BlobPathFn func(repo string, digest godigest.Digest) string
|
BlobPathFn func(repo string, digest godigest.Digest) string
|
||||||
CheckBlobFn func(repo string, digest godigest.Digest) (bool, int64, error)
|
CheckBlobFn func(repo string, digest godigest.Digest) (bool, int64, error)
|
||||||
|
@ -240,9 +240,9 @@ func (is MockedImageStore) FullBlobUpload(repo string, body io.Reader, digest go
|
||||||
return "", 0, nil
|
return "", 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (is MockedImageStore) DedupeBlob(src string, dstDigest godigest.Digest, dst string) error {
|
func (is MockedImageStore) DedupeBlob(src string, dstDigest godigest.Digest, dstRepo, dst string) error {
|
||||||
if is.DedupeBlobFn != nil {
|
if is.DedupeBlobFn != nil {
|
||||||
return is.DedupeBlobFn(src, dstDigest, dst)
|
return is.DedupeBlobFn(src, dstDigest, dstRepo, dst)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -4,8 +4,8 @@
|
||||||
"rootDirectory": "/tmp/zot/s3",
|
"rootDirectory": "/tmp/zot/s3",
|
||||||
"gc": true,
|
"gc": true,
|
||||||
"gcReferrers": false,
|
"gcReferrers": false,
|
||||||
"gcDelay": "3m",
|
"gcDelay": "4m",
|
||||||
"untaggedImageRetentionDelay": "3m",
|
"untaggedImageRetentionDelay": "4m",
|
||||||
"gcInterval": "1s",
|
"gcInterval": "1s",
|
||||||
"storageDriver": {
|
"storageDriver": {
|
||||||
"name": "s3",
|
"name": "s3",
|
||||||
|
|
|
@ -4,8 +4,8 @@
|
||||||
"rootDirectory": "/tmp/zot/s3",
|
"rootDirectory": "/tmp/zot/s3",
|
||||||
"gc": true,
|
"gc": true,
|
||||||
"gcReferrers": true,
|
"gcReferrers": true,
|
||||||
"gcDelay": "3m",
|
"gcDelay": "4m",
|
||||||
"untaggedImageRetentionDelay": "3m",
|
"untaggedImageRetentionDelay": "4m",
|
||||||
"gcInterval": "1s",
|
"gcInterval": "1s",
|
||||||
"storageDriver": {
|
"storageDriver": {
|
||||||
"name": "s3",
|
"name": "s3",
|
||||||
|
|
Loading…
Reference in a new issue