0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-16 21:56:37 -05:00

refactor(sync): use task scheduler (#1301)

Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
peusebiu 2023-05-31 20:26:23 +03:00 committed by GitHub
parent e148343540
commit 612a12e5a8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
40 changed files with 4343 additions and 3604 deletions

View file

@ -12,7 +12,7 @@ permissions: read-all
# 2. run zot with s3 storage and dynamodb and dedupe enabled, push images, restart zot with dedupe false and no cache
# task scheduler will start a restore all blobs process at zot startup, after it finishes all blobs should be restored to their original state (have content)
jobs:
client-tools:
dedupe:
name: Dedupe/restore blobs
runs-on: ubuntu-latest
steps:
@ -55,3 +55,19 @@ jobs:
env:
AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake
sync:
name: Sync harness
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: 1.20.x
- name: Install dependencies
run: |
cd $GITHUB_WORKSPACE
go install github.com/swaggo/swag/cmd/swag
go mod download
- name: Run sync harness
run: |
make test-sync-harness

View file

@ -334,6 +334,14 @@ test-push-pull-running-dedupe: binary check-skopeo $(BATS) $(REGCLIENT) $(ORAS)
test-push-pull-running-dedupe-verbose: binary check-skopeo $(BATS) $(REGCLIENT) $(ORAS) $(HELM) $(CRICTL)
$(BATS) --trace --verbose-run --print-output-on-failure --show-output-of-passing-tests test/blackbox/pushpull_running_dedupe.bats
.PHONY: test-sync-harness
test-sync-harness: binary binary-minimal bench check-skopeo $(BATS)
$(BATS) --trace --print-output-on-failure test/blackbox/sync_harness.bats
.PHONY: test-sync-harness-verbose
test-sync-harness-verbose: binary binary-minimal bench check-skopeo $(BATS)
$(BATS) --trace --verbose-run --print-output-on-failure --show-output-of-passing-tests test/blackbox/sync_harness.bats
.PHONY: test-restore-s3-blobs
test-restore-s3-blobs: binary check-skopeo $(BATS) $(REGCLIENT) $(ORAS) $(HELM) $(CRICTL)
$(BATS) --trace --print-output-on-failure test/blackbox/restore_s3_blobs.bats
@ -362,13 +370,13 @@ test-cloud-only-verbose: binary check-skopeo $(BATS)
.PHONY: test-bats-sync
test-bats-sync: BUILD_LABELS=sync
test-bats-sync: binary binary-minimal check-skopeo $(BATS) $(NOTATION) $(COSIGN)
test-bats-sync: binary binary-minimal bench check-skopeo $(BATS) $(NOTATION) $(COSIGN)
$(BATS) --trace --print-output-on-failure test/blackbox/sync.bats
$(BATS) --trace --print-output-on-failure test/blackbox/sync_docker.bats
.PHONY: test-bats-sync-verbose
test-bats-sync-verbose: BUILD_LABELS=sync
test-bats-sync-verbose: binary binary-minimal check-skopeo $(BATS) $(NOTATION) $(COSIGN)
test-bats-sync-verbose: binary binary-minimal bench check-skopeo $(BATS) $(NOTATION) $(COSIGN)
$(BATS) --trace -t -x -p --verbose-run --print-output-on-failure --show-output-of-passing-tests test/blackbox/sync.bats
$(BATS) --trace -t -x -p --verbose-run --print-output-on-failure --show-output-of-passing-tests test/blackbox/sync_docker.bats

View file

@ -53,7 +53,6 @@ var (
ErrSyncInvalidUpstreamURL = errors.New("sync: upstream url not found in sync config")
ErrRegistryNoContent = errors.New("sync: could not find a Content that matches localRepo")
ErrSyncReferrerNotFound = errors.New("sync: couldn't find upstream referrer")
ErrSyncReferrer = errors.New("sync: failed to get upstream referrer")
ErrImageLintAnnotations = errors.New("routes: lint checks failed")
ErrParsingAuthHeader = errors.New("auth: failed parsing authorization header")
ErrBadType = errors.New("core: invalid type")
@ -92,4 +91,7 @@ var (
ErrSignConfigDirNotSet = errors.New("signatures: signature config dir not set")
ErrBadManifestDigest = errors.New("signatures: bad manifest digest")
ErrInvalidSignatureType = errors.New("signatures: invalid signature type")
ErrSyncPingRegistry = errors.New("sync: unable to ping any registry URLs")
ErrSyncImageNotSigned = errors.New("sync: image is not signed")
ErrSyncImageFilteredOut = errors.New("sync: image is filtered out by sync config")
)

View file

@ -43,6 +43,7 @@ type Controller struct {
Server *http.Server
Metrics monitoring.MetricServer
CveInfo ext.CveInfo
SyncOnDemand SyncOnDemand
// runtime params
chosenPort int // kernel-chosen port
}
@ -279,16 +280,30 @@ func (c *Controller) LoadNewConfig(reloadCtx context.Context, config *config.Con
// reload access control config
c.Config.HTTP.AccessControl = config.HTTP.AccessControl
// Enable extensions if extension config is provided
if config.Extensions != nil && config.Extensions.Sync != nil {
// reload sync config
// reload periodical gc interval
c.Config.Storage.GCInterval = config.Storage.GCInterval
// reload background tasks
if config.Extensions != nil {
// reload sync extension
c.Config.Extensions.Sync = config.Extensions.Sync
ext.EnableSyncExtension(reloadCtx, c.Config, c.RepoDB, c.StoreController, c.Log)
} else if c.Config.Extensions != nil {
c.Config.Extensions.Sync = nil
// reload search cve extension
if c.Config.Extensions.Search != nil {
// reload only if search is enabled and reloaded config has search extension
if *c.Config.Extensions.Search.Enable && config.Extensions.Search != nil {
c.Config.Extensions.Search.CVE = config.Extensions.Search.CVE
}
}
// reload scrub extension
c.Config.Extensions.Scrub = config.Extensions.Scrub
} else {
c.Config.Extensions = nil
}
c.Log.Info().Interface("reloaded params", c.Config.Sanitize()).Msg("new configuration settings")
c.StartBackgroundTasks(reloadCtx)
c.Log.Info().Interface("reloaded params", c.Config.Sanitize()).
Msg("loaded new configuration settings")
}
func (c *Controller) Shutdown() {
@ -334,14 +349,19 @@ func (c *Controller) StartBackgroundTasks(reloadCtx context.Context) {
}
}
// Enable extensions if extension config is provided for storeController
if c.Config.Extensions != nil {
if c.Config.Extensions.Sync != nil {
ext.EnableSyncExtension(reloadCtx, c.Config, c.RepoDB, c.StoreController, c.Log)
}
}
if c.Config.Extensions != nil {
ext.EnableScrubExtension(c.Config, c.Log, c.StoreController, taskScheduler)
syncOnDemand, err := ext.EnableSyncExtension(c.Config, c.RepoDB, c.StoreController, taskScheduler, c.Log)
if err != nil {
c.Log.Error().Err(err).Msg("unable to start sync extension")
}
c.SyncOnDemand = syncOnDemand
}
}
type SyncOnDemand interface {
SyncImage(repo, reference string) error
SyncReference(repo string, subjectDigestStr string, referenceType string) error
}

View file

@ -8,7 +8,6 @@
package api
import (
"context"
"encoding/json"
"errors"
"fmt"
@ -27,6 +26,7 @@ import (
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
"github.com/sigstore/cosign/v2/pkg/oci/remote"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/api/constants"
@ -34,7 +34,7 @@ import (
gqlPlayground "zotregistry.io/zot/pkg/debug/gqlplayground"
debug "zotregistry.io/zot/pkg/debug/swagger"
ext "zotregistry.io/zot/pkg/extensions"
"zotregistry.io/zot/pkg/extensions/sync"
syncConstants "zotregistry.io/zot/pkg/extensions/sync/constants"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta"
zreg "zotregistry.io/zot/pkg/regexp"
@ -376,8 +376,7 @@ func (rh *RouteHandler) CheckManifest(response http.ResponseWriter, request *htt
return
}
content, digest, mediaType, err := getImageManifest(request.Context(), rh, imgStore,
name, reference) //nolint:contextcheck
content, digest, mediaType, err := getImageManifest(rh, imgStore, name, reference) //nolint:contextcheck
if err != nil {
if errors.Is(err, zerr.ErrRepoNotFound) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain
WriteJSON(response, http.StatusNotFound,
@ -449,8 +448,7 @@ func (rh *RouteHandler) GetManifest(response http.ResponseWriter, request *http.
return
}
content, digest, mediaType, err := getImageManifest(request.Context(), rh,
imgStore, name, reference) //nolint: contextcheck
content, digest, mediaType, err := getImageManifest(rh, imgStore, name, reference) //nolint: contextcheck
if err != nil {
if errors.Is(err, zerr.ErrRepoNotFound) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain
WriteJSON(response, http.StatusNotFound,
@ -488,31 +486,26 @@ type ImageIndex struct {
ispec.Index
}
func getReferrers(ctx context.Context, routeHandler *RouteHandler,
func getReferrers(routeHandler *RouteHandler,
imgStore storageTypes.ImageStore, name string, digest godigest.Digest,
artifactTypes []string,
) (ispec.Index, error) {
references, err := imgStore.GetReferrers(name, digest, artifactTypes)
if err != nil || len(references.Manifests) == 0 {
if routeHandler.c.Config.Extensions != nil &&
routeHandler.c.Config.Extensions.Sync != nil &&
*routeHandler.c.Config.Extensions.Sync.Enable {
refs, err := imgStore.GetReferrers(name, digest, artifactTypes)
if err != nil || len(refs.Manifests) == 0 {
if isSyncOnDemandEnabled(*routeHandler.c) {
routeHandler.c.Log.Info().Str("repository", name).Str("reference", digest.String()).
Msg("referrers not found, trying to get reference by syncing on demand")
errSync := ext.SyncOneImage(ctx, routeHandler.c.Config, routeHandler.c.RepoDB, routeHandler.c.StoreController,
name, digest.String(), sync.OCIReference, routeHandler.c.Log)
if errSync != nil {
routeHandler.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to get references")
return ispec.Index{}, err
if errSync := routeHandler.c.SyncOnDemand.SyncReference(name, digest.String(), syncConstants.OCI); errSync != nil {
routeHandler.c.Log.Err(errSync).Str("repository", name).Str("reference", digest.String()).
Msg("error encounter while syncing OCI reference for image")
}
references, err = imgStore.GetReferrers(name, digest, artifactTypes)
refs, err = imgStore.GetReferrers(name, digest, artifactTypes)
}
}
return references, err
return refs, err
}
// GetReferrers godoc
@ -559,7 +552,7 @@ func (rh *RouteHandler) GetReferrers(response http.ResponseWriter, request *http
imgStore := rh.getImageStore(name)
referrers, err := getReferrers(request.Context(), rh, imgStore, name, digest, artifactTypes)
referrers, err := getReferrers(rh, imgStore, name, digest, artifactTypes)
if err != nil {
if errors.Is(err, zerr.ErrManifestNotFound) || errors.Is(err, zerr.ErrRepoNotFound) {
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("manifest not found")
@ -1722,15 +1715,10 @@ func (rh *RouteHandler) getImageStore(name string) storageTypes.ImageStore {
}
// will sync on demand if an image is not found, in case sync extensions is enabled.
func getImageManifest(ctx context.Context, routeHandler *RouteHandler, imgStore storageTypes.ImageStore,
name, reference string,
func getImageManifest(routeHandler *RouteHandler, imgStore storageTypes.ImageStore, name,
reference string,
) ([]byte, godigest.Digest, string, error) {
syncEnabled := false
if routeHandler.c.Config.Extensions != nil &&
routeHandler.c.Config.Extensions.Sync != nil &&
*routeHandler.c.Config.Extensions.Sync.Enable {
syncEnabled = true
}
syncEnabled := isSyncOnDemandEnabled(*routeHandler.c)
_, digestErr := godigest.Parse(reference)
if digestErr == nil {
@ -1745,36 +1733,47 @@ func getImageManifest(ctx context.Context, routeHandler *RouteHandler, imgStore
routeHandler.c.Log.Info().Str("repository", name).Str("reference", reference).
Msg("trying to get updated image by syncing on demand")
errSync := ext.SyncOneImage(ctx, routeHandler.c.Config, routeHandler.c.RepoDB, routeHandler.c.StoreController,
name, reference, "", routeHandler.c.Log)
if errSync != nil {
// we use a custom method for syncing cosign signatures for the moment, even though it's also an oci image.
if isCosignTag(reference) {
if errSync := routeHandler.c.SyncOnDemand.SyncReference(name, reference, syncConstants.Cosign); errSync != nil {
routeHandler.c.Log.Err(errSync).Str("repository", name).Str("reference", reference).
Msg("error encounter while syncing cosign signature for image")
}
} else {
if errSync := routeHandler.c.SyncOnDemand.SyncImage(name, reference); errSync != nil {
routeHandler.c.Log.Err(errSync).Str("repository", name).Str("reference", reference).
Msg("error encounter while syncing image")
}
}
}
return imgStore.GetImageManifest(name, reference)
}
// this function will check if tag is a cosign tag (signature or sbom).
func isCosignTag(tag string) bool {
if strings.HasPrefix(tag, "sha256-") &&
(strings.HasSuffix(tag, remote.SignatureTagSuffix) || strings.HasSuffix(tag, remote.SBOMTagSuffix)) {
return true
}
return false
}
// will sync referrers on demand if they are not found, in case sync extensions is enabled.
func getOrasReferrers(ctx context.Context, routeHandler *RouteHandler,
func getOrasReferrers(routeHandler *RouteHandler,
imgStore storageTypes.ImageStore, name string, digest godigest.Digest,
artifactType string,
) ([]artifactspec.Descriptor, error) {
refs, err := imgStore.GetOrasReferrers(name, digest, artifactType)
if err != nil {
if routeHandler.c.Config.Extensions != nil &&
routeHandler.c.Config.Extensions.Sync != nil &&
*routeHandler.c.Config.Extensions.Sync.Enable {
if isSyncOnDemandEnabled(*routeHandler.c) {
routeHandler.c.Log.Info().Str("repository", name).Str("reference", digest.String()).
Msg("artifact not found, trying to get artifact by syncing on demand")
errSync := ext.SyncOneImage(ctx, routeHandler.c.Config, routeHandler.c.RepoDB, routeHandler.c.StoreController,
name, digest.String(), sync.OrasArtifact, routeHandler.c.Log)
if errSync != nil {
routeHandler.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to get references")
return []artifactspec.Descriptor{}, err
if errSync := routeHandler.c.SyncOnDemand.SyncReference(name, digest.String(), syncConstants.Oras); errSync != nil {
routeHandler.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).
Msg("unable to get references")
}
refs, err = imgStore.GetOrasReferrers(name, digest, artifactType)
@ -1838,7 +1837,7 @@ func (rh *RouteHandler) GetOrasReferrers(response http.ResponseWriter, request *
rh.c.Log.Info().Str("digest", digest.String()).Str("artifactType", artifactType).Msg("getting manifest")
refs, err := getOrasReferrers(request.Context(), rh, imgStore, name, digest, artifactType) //nolint:contextcheck
refs, err := getOrasReferrers(rh, imgStore, name, digest, artifactType) //nolint:contextcheck
if err != nil {
if errors.Is(err, zerr.ErrManifestNotFound) || errors.Is(err, zerr.ErrRepoNotFound) {
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("manifest not found")
@ -1883,3 +1882,14 @@ func getBlobUploadLocation(url *url.URL, name string, digest godigest.Digest) st
return url.String()
}
func isSyncOnDemandEnabled(ctlr Controller) bool {
if ctlr.Config.Extensions != nil &&
ctlr.Config.Extensions.Sync != nil &&
*ctlr.Config.Extensions.Sync.Enable &&
fmt.Sprintf("%v", ctlr.SyncOnDemand) != fmt.Sprintf("%v", nil) {
return true
}
return false
}

View file

@ -44,7 +44,7 @@ func TestConfigReloader(t *testing.T) {
content := fmt.Sprintf(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -78,7 +78,7 @@ func TestConfigReloader(t *testing.T) {
"level": "debug",
"output": "%s"
}
}`, port, htpasswdPath, logFile.Name())
}`, t.TempDir(), port, htpasswdPath, logFile.Name())
cfgfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
@ -102,7 +102,7 @@ func TestConfigReloader(t *testing.T) {
content = fmt.Sprintf(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -136,7 +136,7 @@ func TestConfigReloader(t *testing.T) {
"level": "debug",
"output": "%s"
}
}`, port, htpasswdPath, logFile.Name())
}`, t.TempDir(), port, htpasswdPath, logFile.Name())
err = cfgfile.Truncate(0)
So(err, ShouldBeNil)
@ -155,8 +155,10 @@ func TestConfigReloader(t *testing.T) {
data, err := os.ReadFile(logFile.Name())
So(err, ShouldBeNil)
t.Logf("log file: %s", data)
So(string(data), ShouldContainSubstring, "reloaded params")
So(string(data), ShouldContainSubstring, "new configuration settings")
So(string(data), ShouldContainSubstring, "loaded new configuration settings")
So(string(data), ShouldContainSubstring, "\"Users\":[\"alice\"]")
So(string(data), ShouldContainSubstring, "\"Actions\":[\"read\",\"create\",\"update\",\"delete\"]")
})
@ -173,7 +175,7 @@ func TestConfigReloader(t *testing.T) {
content := fmt.Sprintf(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -204,7 +206,7 @@ func TestConfigReloader(t *testing.T) {
}]
}
}
}`, port, logFile.Name())
}`, t.TempDir(), port, logFile.Name())
cfgfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
@ -228,7 +230,7 @@ func TestConfigReloader(t *testing.T) {
content = fmt.Sprintf(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -259,7 +261,7 @@ func TestConfigReloader(t *testing.T) {
}]
}
}
}`, port, logFile.Name())
}`, t.TempDir(), port, logFile.Name())
err = cfgfile.Truncate(0)
So(err, ShouldBeNil)
@ -278,8 +280,10 @@ func TestConfigReloader(t *testing.T) {
data, err := os.ReadFile(logFile.Name())
So(err, ShouldBeNil)
t.Logf("log file: %s", data)
So(string(data), ShouldContainSubstring, "reloaded params")
So(string(data), ShouldContainSubstring, "new configuration settings")
So(string(data), ShouldContainSubstring, "loaded new configuration settings")
So(string(data), ShouldContainSubstring, "\"URLs\":[\"http://localhost:9999\"]")
So(string(data), ShouldContainSubstring, "\"TLSVerify\":true")
So(string(data), ShouldContainSubstring, "\"OnDemand\":false")
@ -291,6 +295,111 @@ func TestConfigReloader(t *testing.T) {
So(string(data), ShouldContainSubstring, "\"Semver\":false")
})
Convey("reload scrub and CVE config", t, func(c C) {
port := test.GetFreePort()
baseURL := test.GetBaseURL(port)
logFile, err := os.CreateTemp("", "zot-log*.txt")
So(err, ShouldBeNil)
defer os.Remove(logFile.Name()) // clean up
content := fmt.Sprintf(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
"port": "%s"
},
"log": {
"level": "debug",
"output": "%s"
},
"extensions": {
"search": {
"cve": {
"updateInterval": "24h",
"trivy": {
"DBRepository": "ghcr.io/aquasecurity/trivy-db"
}
}
},
"scrub": {
"enable": true,
"interval": "24h"
}
}
}`, t.TempDir(), port, logFile.Name())
cfgfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(cfgfile.Name()) // clean up
_, err = cfgfile.Write([]byte(content))
So(err, ShouldBeNil)
os.Args = []string{"cli_test", "serve", cfgfile.Name()}
go func() {
err = cli.NewServerRootCmd().Execute()
So(err, ShouldBeNil)
}()
test.WaitTillServerReady(baseURL)
content = fmt.Sprintf(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
"port": "%s"
},
"log": {
"level": "debug",
"output": "%s"
},
"extensions": {
"search": {
"cve": {
"updateInterval": "5h",
"trivy": {
"DBRepository": "ghcr.io/project-zot/trivy-db"
}
}
}
}
}`, t.TempDir(), port, logFile.Name())
err = cfgfile.Truncate(0)
So(err, ShouldBeNil)
_, err = cfgfile.Seek(0, io.SeekStart)
So(err, ShouldBeNil)
_, err = cfgfile.WriteString(content)
So(err, ShouldBeNil)
err = cfgfile.Close()
So(err, ShouldBeNil)
// wait for config reload
time.Sleep(2 * time.Second)
data, err := os.ReadFile(logFile.Name())
So(err, ShouldBeNil)
t.Logf("log file: %s", data)
So(string(data), ShouldContainSubstring, "reloaded params")
So(string(data), ShouldContainSubstring, "loaded new configuration settings")
So(string(data), ShouldContainSubstring, "\"UpdateInterval\":18000000000000")
So(string(data), ShouldContainSubstring, "\"Scrub\":null")
So(string(data), ShouldContainSubstring, "\"DBRepository\":\"ghcr.io/project-zot/trivy-db\"")
})
Convey("reload bad config", t, func(c C) {
port := test.GetFreePort()
baseURL := test.GetBaseURL(port)
@ -303,7 +412,7 @@ func TestConfigReloader(t *testing.T) {
content := fmt.Sprintf(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "/tmp/zot"
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
@ -334,7 +443,7 @@ func TestConfigReloader(t *testing.T) {
}]
}
}
}`, port, logFile.Name())
}`, t.TempDir(), port, logFile.Name())
cfgfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
@ -374,6 +483,8 @@ func TestConfigReloader(t *testing.T) {
data, err := os.ReadFile(logFile.Name())
So(err, ShouldBeNil)
t.Logf("log file: %s", data)
So(string(data), ShouldNotContainSubstring, "reloaded params")
So(string(data), ShouldNotContainSubstring, "new configuration settings")
So(string(data), ShouldContainSubstring, "\"URLs\":[\"http://localhost:8080\"]")

View file

@ -174,13 +174,15 @@ func TypeOf(v interface{}) string {
func MakeHTTPGetRequest(httpClient *http.Client, username string, password string, resultPtr interface{},
blobURL string, mediaType string, log log.Logger,
) ([]byte, int, error) {
) ([]byte, string, int, error) {
req, err := http.NewRequest(http.MethodGet, blobURL, nil) //nolint
if err != nil {
return nil, 0, err
return nil, "", 0, err
}
if mediaType != "" {
req.Header.Set("Accept", mediaType)
}
if username != "" && password != "" {
req.SetBasicAuth(username, password)
@ -191,7 +193,7 @@ func MakeHTTPGetRequest(httpClient *http.Client, username string, password strin
log.Error().Str("errorType", TypeOf(err)).
Err(err).Str("blobURL", blobURL).Msg("couldn't get blob")
return nil, -1, err
return nil, "", -1, err
}
body, err := io.ReadAll(resp.Body)
@ -199,7 +201,7 @@ func MakeHTTPGetRequest(httpClient *http.Client, username string, password strin
log.Error().Str("errorType", TypeOf(err)).
Err(err).Str("blobURL", blobURL).Msg("couldn't get blob")
return nil, resp.StatusCode, err
return nil, "", resp.StatusCode, err
}
defer resp.Body.Close()
@ -208,20 +210,21 @@ func MakeHTTPGetRequest(httpClient *http.Client, username string, password strin
log.Error().Str("status code", fmt.Sprint(resp.StatusCode)).
Err(err).Str("blobURL", blobURL).Msg("couldn't get blob")
return nil, resp.StatusCode, errors.New(string(body)) //nolint:goerr113
return nil, "", resp.StatusCode, errors.New(string(body)) //nolint:goerr113
}
// read blob
if len(body) > 0 {
err = json.Unmarshal(body, &resultPtr)
if err != nil {
log.Error().Str("errorType", TypeOf(err)).
Err(err).Str("blobURL", blobURL).Msg("couldn't unmarshal blob")
log.Error().Str("errorType", TypeOf(err)).Str("blobURL", blobURL).
Err(err).Msg("couldn't unmarshal remote blob")
return body, resp.StatusCode, err
return body, "", resp.StatusCode, err
}
}
return body, resp.StatusCode, err
return body, resp.Header.Get("Content-Type"), resp.StatusCode, err
}
func DirExists(d string) bool {

View file

@ -115,9 +115,9 @@ func TestCommon(t *testing.T) {
var resultPtr interface{}
httpClient, err := common.CreateHTTPClient(true, "localhost", tempDir)
So(err, ShouldBeNil)
_, _, err = common.MakeHTTPGetRequest(httpClient, "", "",
_, _, _, err = common.MakeHTTPGetRequest(httpClient, "", "",
resultPtr, baseURL+"/v2/", ispec.MediaTypeImageManifest, log.NewLogger("", ""))
So(err, ShouldNotBeNil)
So(err, ShouldBeNil)
})
Convey("Index func", t, func() {

View file

@ -4,33 +4,48 @@
package extensions
import (
"context"
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/extensions/sync"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/scheduler"
"zotregistry.io/zot/pkg/storage"
)
func EnableSyncExtension(ctx context.Context, config *config.Config,
repoDB repodb.RepoDB, storeController storage.StoreController, log log.Logger,
) {
func EnableSyncExtension(config *config.Config, repoDB repodb.RepoDB,
storeController storage.StoreController, sch *scheduler.Scheduler, log log.Logger,
) (*sync.BaseOnDemand, error) {
if config.Extensions.Sync != nil && *config.Extensions.Sync.Enable {
if err := sync.Run(ctx, *config.Extensions.Sync, repoDB, storeController, log); err != nil {
log.Error().Err(err).Msg("Error encountered while setting up syncing")
onDemand := sync.NewOnDemand(log)
for _, registryConfig := range config.Extensions.Sync.Registries {
isPeriodical := len(registryConfig.Content) != 0 && registryConfig.PollInterval != 0
isOnDemand := registryConfig.OnDemand
if isPeriodical || isOnDemand {
service, err := sync.New(registryConfig, config.Extensions.Sync.CredentialsFile,
storeController, repoDB, log)
if err != nil {
return nil, err
}
} else {
if isPeriodical {
// add to task scheduler periodic sync
gen := sync.NewTaskGenerator(service, log)
sch.SubmitGenerator(gen, registryConfig.PollInterval, scheduler.MediumPriority)
}
if isOnDemand {
// onDemand services used in routes.go
onDemand.Add(service)
}
}
}
return onDemand, nil
}
log.Info().Msg("Sync registries config not provided or disabled, skipping sync")
}
}
func SyncOneImage(ctx context.Context, config *config.Config, repoDB repodb.RepoDB,
storeController storage.StoreController, repoName, reference string, artifactType string, log log.Logger,
) error {
log.Info().Str("repository", repoName).Str("reference", reference).Msg("syncing image")
err := sync.OneImage(ctx, *config.Extensions.Sync, repoDB, storeController, repoName, reference, artifactType, log)
return err
return nil, nil //nolint: nilnil
}

View file

@ -4,28 +4,20 @@
package extensions
import (
"context"
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/extensions/sync"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/scheduler"
"zotregistry.io/zot/pkg/storage"
)
// EnableSyncExtension ...
func EnableSyncExtension(ctx context.Context, config *config.Config, repoDB repodb.RepoDB,
storeController storage.StoreController, log log.Logger,
) {
func EnableSyncExtension(config *config.Config, repoDB repodb.RepoDB,
storeController storage.StoreController, sch *scheduler.Scheduler, log log.Logger,
) (*sync.BaseOnDemand, error) {
log.Warn().Msg("skipping enabling sync extension because given zot binary doesn't include this feature," +
"please build a binary that does so")
}
// SyncOneImage ...
func SyncOneImage(ctx context.Context, config *config.Config, repoDB repodb.RepoDB,
storeController storage.StoreController, repoName, reference string, artifactType string, log log.Logger,
) error {
log.Warn().Msg("skipping syncing on demand because given zot binary doesn't include this feature," +
"please build a binary that does so")
return nil
return nil, nil //nolint: nilnil
}

View file

@ -0,0 +1,8 @@
package constants
// references type.
const (
Oras = "OrasReference"
Cosign = "CosignSignature"
OCI = "OCIReference"
)

View file

@ -0,0 +1,257 @@
//go:build sync
// +build sync
package sync
import (
"regexp"
"strings"
"github.com/Masterminds/semver"
glob "github.com/bmatcuk/doublestar/v4"
"zotregistry.io/zot/pkg/common"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
"zotregistry.io/zot/pkg/log"
)
/* ContentManager uses registry content configuration to filter repos/tags
and also manages applying destination/stripPrefix rules
eg: "content": [
{
"prefix": "/repo1/repo",
"destination": "/repo",
"stripPrefix": true
"tags": {
"regex": "4.*",
"semver": true
}
}
]
*/
type ContentManager struct {
contents []syncconf.Content
log log.Logger
}
func NewContentManager(contents []syncconf.Content, log log.Logger) ContentManager {
return ContentManager{contents: contents, log: log}
}
/*
MatchesContent returns whether a repo matches a registry
config content (is not filtered out by content config rules).
*/
func (cm ContentManager) MatchesContent(repo string) bool {
content := cm.getContentByUpstreamRepo(repo)
return content != nil
}
// FilterTags filters a repo tags based on content config rules (semver, regex).
func (cm ContentManager) FilterTags(repo string, tags []string) ([]string, error) {
content := cm.getContentByLocalRepo(repo)
var err error
// filter based on tags rules
if content != nil && content.Tags != nil {
if content.Tags.Regex != nil {
tags, err = filterTagsByRegex(tags, *content.Tags.Regex, cm.log)
if err != nil {
return []string{}, err
}
}
if content.Tags.Semver != nil && *content.Tags.Semver {
tags = filterTagsBySemver(tags, cm.log)
}
}
return tags, nil
}
/*
GetRepoDestination applies content destination config rule and returns the final repo namespace.
- used by periodically sync.
*/
func (cm ContentManager) GetRepoDestination(repo string) string {
content := cm.getContentByUpstreamRepo(repo)
if content == nil {
return ""
}
return getRepoDestination(repo, *content)
}
/*
GetRepoSource is the inverse function of GetRepoDestination, needed in on demand to find out
the remote name of a repo given a local repo.
- used by on demand sync.
*/
func (cm ContentManager) GetRepoSource(repo string) string {
content := cm.getContentByLocalRepo(repo)
if content == nil {
return ""
}
return getRepoSource(repo, *content)
}
// utilies functions.
func (cm ContentManager) getContentByUpstreamRepo(repo string) *syncconf.Content {
for _, content := range cm.contents {
var prefix string
// handle prefixes starting with '/'
if strings.HasPrefix(content.Prefix, "/") {
prefix = content.Prefix[1:]
} else {
prefix = content.Prefix
}
matched, err := glob.Match(prefix, repo)
if err != nil {
cm.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("pattern",
prefix).Msg("error while parsing glob pattern, skipping it...")
continue
}
if matched {
return &content
}
}
return nil
}
func (cm ContentManager) getContentByLocalRepo(repo string) *syncconf.Content {
contentID := -1
repo = strings.Trim(repo, "/")
for cID, content := range cm.contents {
// make sure prefix ends in "/" to extract the meta characters
prefix := strings.Trim(content.Prefix, "/") + "/"
destination := strings.Trim(content.Destination, "/")
var patternSlice []string
if content.StripPrefix {
_, metaCharacters := glob.SplitPattern(prefix)
patternSlice = append(patternSlice, destination, metaCharacters)
} else {
patternSlice = append(patternSlice, destination, prefix)
}
pattern := strings.Trim(strings.Join(patternSlice, "/"), "/")
matched, err := glob.Match(pattern, repo)
if err != nil {
continue
}
if matched {
contentID = cID
break
}
}
if contentID == -1 {
return nil
}
return &cm.contents[contentID]
}
func getRepoSource(localRepo string, content syncconf.Content) string {
localRepo = strings.Trim(localRepo, "/")
destination := strings.Trim(content.Destination, "/")
prefix := strings.Trim(content.Prefix, "/*")
var localRepoSlice []string
localRepo = strings.TrimPrefix(localRepo, destination)
localRepo = strings.Trim(localRepo, "/")
if content.StripPrefix {
localRepoSlice = append([]string{prefix}, localRepo)
} else {
localRepoSlice = []string{localRepo}
}
repoSource := strings.Join(localRepoSlice, "/")
if repoSource == "/" {
return repoSource
}
return strings.Trim(repoSource, "/")
}
// getRepoDestination returns the local storage path of the synced repo based on the specified destination.
func getRepoDestination(remoteRepo string, content syncconf.Content) string {
remoteRepo = strings.Trim(remoteRepo, "/")
destination := strings.Trim(content.Destination, "/")
prefix := strings.Trim(content.Prefix, "/*")
var repoDestSlice []string
if content.StripPrefix {
remoteRepo = strings.TrimPrefix(remoteRepo, prefix)
remoteRepo = strings.Trim(remoteRepo, "/")
repoDestSlice = append(repoDestSlice, destination, remoteRepo)
} else {
repoDestSlice = append(repoDestSlice, destination, remoteRepo)
}
repoDestination := strings.Join(repoDestSlice, "/")
if repoDestination == "/" {
return "/"
}
return strings.Trim(repoDestination, "/")
}
// filterTagsByRegex filters images by tag regex given in the config.
func filterTagsByRegex(tags []string, regex string, log log.Logger) ([]string, error) {
filteredTags := []string{}
if len(tags) == 0 || regex == "" {
return filteredTags, nil
}
log.Info().Str("regex", regex).Msg("filtering tags using regex")
tagReg, err := regexp.Compile(regex)
if err != nil {
log.Error().Err(err).Str("regex", regex).Msg("couldn't compile regex")
return filteredTags, err
}
for _, tag := range tags {
if tagReg.MatchString(tag) {
filteredTags = append(filteredTags, tag)
}
}
return filteredTags, nil
}
// filterTagsBySemver filters tags by checking if they are semver compliant.
func filterTagsBySemver(tags []string, log log.Logger) []string {
filteredTags := []string{}
log.Info().Msg("start filtering using semver compliant rule")
for _, tag := range tags {
_, err := semver.NewVersion(tag)
if err == nil {
filteredTags = append(filteredTags, tag)
}
}
return filteredTags
}

View file

@ -0,0 +1,248 @@
//go:build sync
// +build sync
package sync
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
"zotregistry.io/zot/pkg/log"
)
func TestContentManager(t *testing.T) {
testCases := []struct {
repo string
content syncconf.Content
expected string
}{
{
repo: "alpine/zot-fold/alpine",
content: syncconf.Content{Prefix: "zot-fold/alpine", Destination: "/alpine", StripPrefix: false},
expected: "zot-fold/alpine",
},
{
repo: "zot-fold/alpine",
content: syncconf.Content{Prefix: "zot-fold/alpine", Destination: "/", StripPrefix: false},
expected: "zot-fold/alpine",
},
{
repo: "alpine",
content: syncconf.Content{Prefix: "zot-fold/alpine", Destination: "/alpine", StripPrefix: true},
expected: "zot-fold/alpine",
},
{
repo: "/",
content: syncconf.Content{Prefix: "zot-fold/alpine", Destination: "/", StripPrefix: true},
expected: "zot-fold/alpine",
},
{
repo: "",
content: syncconf.Content{Prefix: "/", Destination: "/", StripPrefix: true},
expected: "/",
},
{
repo: "alpine",
content: syncconf.Content{Prefix: "zot-fold/alpine", Destination: "/alpine", StripPrefix: true},
expected: "zot-fold/alpine",
},
{
repo: "alpine",
content: syncconf.Content{Prefix: "zot-fold/*", Destination: "/", StripPrefix: true},
expected: "zot-fold/alpine",
},
{
repo: "alpine",
content: syncconf.Content{Prefix: "zot-fold/**", Destination: "/", StripPrefix: true},
expected: "zot-fold/alpine",
},
{
repo: "zot-fold/alpine",
content: syncconf.Content{Prefix: "zot-fold/**", Destination: "/", StripPrefix: false},
expected: "zot-fold/alpine",
},
}
Convey("Test GetRepoDestination()", t, func() {
for _, test := range testCases {
cm := NewContentManager([]syncconf.Content{test.content}, log.Logger{})
actualResult := cm.GetRepoDestination(test.expected)
So(actualResult, ShouldEqual, test.repo)
}
})
// this is the inverse function of getRepoDestination()
Convey("Test GetRepoSource()", t, func() {
for _, test := range testCases {
cm := NewContentManager([]syncconf.Content{test.content}, log.Logger{})
actualResult := cm.GetRepoSource(test.repo)
So(actualResult, ShouldEqual, test.expected)
}
})
Convey("Test MatchesContent() error", t, func() {
content := syncconf.Content{Prefix: "[repo%^&"}
cm := NewContentManager([]syncconf.Content{content}, log.Logger{})
So(cm.MatchesContent("repo"), ShouldEqual, false)
})
}
func TestGetContentByLocalRepo(t *testing.T) {
testCases := []struct {
repo string
content []syncconf.Content
expected int
}{
{
repo: "alpine/zot-fold/alpine",
content: []syncconf.Content{
{Prefix: "zot-fold/alpine/", Destination: "/alpine", StripPrefix: true},
{Prefix: "zot-fold/alpine", Destination: "/alpine", StripPrefix: false},
},
expected: 1,
},
{
repo: "alpine/zot-fold/alpine",
content: []syncconf.Content{
{Prefix: "zot-fold/*", Destination: "/alpine", StripPrefix: false},
{Prefix: "zot-fold/alpine", Destination: "/alpine", StripPrefix: true},
},
expected: 0,
},
{
repo: "myFold/zot-fold/internal/alpine",
content: []syncconf.Content{
{Prefix: "zot-fold/alpine", Destination: "/alpine", StripPrefix: true},
{Prefix: "zot-fold/**", Destination: "/myFold", StripPrefix: false},
},
expected: 1,
},
{
repo: "alpine",
content: []syncconf.Content{
{Prefix: "zot-fold/*", Destination: "/alpine", StripPrefix: true},
{Prefix: "zot-fold/alpine", Destination: "/", StripPrefix: true},
},
expected: -1,
},
{
repo: "alpine",
content: []syncconf.Content{
{Prefix: "zot-fold/*", Destination: "/alpine", StripPrefix: true},
{Prefix: "zot-fold/*", Destination: "/", StripPrefix: true},
},
expected: 1,
},
{
repo: "alpine/alpine",
content: []syncconf.Content{
{Prefix: "zot-fold/*", Destination: "/alpine", StripPrefix: true},
{Prefix: "zot-fold/*", Destination: "/", StripPrefix: true},
},
expected: 0,
},
}
Convey("Test getContentByLocalRepo()", t, func() {
for _, test := range testCases {
cm := NewContentManager(test.content, log.Logger{})
actualResult := cm.getContentByLocalRepo(test.repo)
if test.expected == -1 {
So(actualResult, ShouldEqual, nil)
} else {
So(actualResult, ShouldEqual, &test.content[test.expected])
}
}
})
Convey("Test getContentByLocalRepo() error", t, func() {
content := syncconf.Content{Prefix: "[repo%^&"}
cm := NewContentManager([]syncconf.Content{content}, log.Logger{})
So(cm.getContentByLocalRepo("repo"), ShouldBeNil)
})
}
func TestFilterTags(t *testing.T) {
allTagsRegex := ".*"
badRegex := "[*"
semverFalse := false
semverTrue := true
testCases := []struct {
tags []string
repo string
content []syncconf.Content
filteredTags []string
err bool
}{
{
repo: "alpine",
content: []syncconf.Content{
{Prefix: "**", Tags: &syncconf.Tags{Regex: &allTagsRegex, Semver: &semverFalse}},
},
tags: []string{"v1", "v2", "v3"},
filteredTags: []string{"v1", "v2", "v3"},
err: false,
},
{
repo: "alpine",
content: []syncconf.Content{
{Prefix: "**", Tags: &syncconf.Tags{}},
},
tags: []string{"v1", "v2", "v3"},
filteredTags: []string{"v1", "v2", "v3"},
err: false,
},
{
repo: "alpine",
content: []syncconf.Content{
{Prefix: "**", Tags: &syncconf.Tags{Regex: &allTagsRegex, Semver: &semverTrue}},
},
tags: []string{"1s0", "2v9", "v3.0.3"},
filteredTags: []string{"v3.0.3"},
err: false,
},
{
repo: "infra/busybox",
content: []syncconf.Content{
{Prefix: "infra/*", Tags: &syncconf.Tags{Semver: &semverTrue}},
},
tags: []string{"latest", "v1.0.1"},
filteredTags: []string{"v1.0.1"},
err: false,
},
{
repo: "repo",
content: []syncconf.Content{
{Prefix: "repo*", Tags: &syncconf.Tags{Regex: &badRegex}},
},
tags: []string{"latest", "v2.0.1"},
filteredTags: []string{},
err: true,
},
{
repo: "repo",
content: []syncconf.Content{
{Prefix: "repo", Tags: &syncconf.Tags{Regex: &allTagsRegex}},
},
tags: []string{},
filteredTags: []string{},
err: false,
},
}
Convey("Test FilterTags()", t, func() {
for _, test := range testCases {
cm := NewContentManager(test.content, log.NewLogger("debug", ""))
actualResult, err := cm.FilterTags(test.repo, test.tags)
So(actualResult, ShouldResemble, test.filteredTags)
if test.err {
So(err, ShouldNotBeNil)
} else {
So(err, ShouldBeNil)
}
}
})
}

View file

@ -0,0 +1,101 @@
package client
import (
"net/http"
"net/url"
"sync"
"zotregistry.io/zot/pkg/common"
"zotregistry.io/zot/pkg/log"
)
type Config struct {
URL string
Username string
Password string
CertDir string
TLSVerify bool
}
type Client struct {
config *Config
client *http.Client
url *url.URL
lock *sync.RWMutex
log log.Logger
}
func New(config Config, log log.Logger) (*Client, error) {
client := &Client{log: log, lock: new(sync.RWMutex)}
if err := client.SetConfig(config); err != nil {
return nil, err
}
return client, nil
}
func (httpClient *Client) GetConfig() *Config {
httpClient.lock.RLock()
defer httpClient.lock.RUnlock()
return httpClient.config
}
func (httpClient *Client) GetHostname() string {
httpClient.lock.RLock()
defer httpClient.lock.RUnlock()
return httpClient.url.Host
}
func (httpClient *Client) SetConfig(config Config) error {
httpClient.lock.Lock()
defer httpClient.lock.Unlock()
clientURL, err := url.Parse(config.URL)
if err != nil {
return err
}
httpClient.url = clientURL
client, err := common.CreateHTTPClient(config.TLSVerify, clientURL.Host, config.CertDir)
if err != nil {
return err
}
httpClient.client = client
httpClient.config = &config
return nil
}
func (httpClient *Client) IsAvailable() bool {
_, _, statusCode, err := httpClient.MakeGetRequest(nil, "", "/v2/")
if err != nil || statusCode != http.StatusOK {
return false
}
return true
}
func (httpClient *Client) MakeGetRequest(resultPtr interface{}, mediaType string,
route ...string,
) ([]byte, string, int, error) {
httpClient.lock.RLock()
defer httpClient.lock.RUnlock()
url := *httpClient.url
for _, r := range route {
url = *url.JoinPath(r)
}
url.RawQuery = url.Query().Encode()
body, mediaType, statusCode, err := common.MakeHTTPGetRequest(httpClient.client, httpClient.config.Username,
httpClient.config.Password, resultPtr,
url.String(), mediaType, httpClient.log)
return body, mediaType, statusCode, err
}

View file

@ -0,0 +1,287 @@
//go:build sync
// +build sync
package sync
import (
"encoding/json"
"errors"
"fmt"
"os"
"path"
"strings"
"time"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/common"
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/storage"
storageCommon "zotregistry.io/zot/pkg/storage/common"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/storage/local"
storageTypes "zotregistry.io/zot/pkg/storage/types"
)
type LocalRegistry struct {
storeController storage.StoreController
tempStorage OciLayoutStorage
repoDB repodb.RepoDB
log log.Logger
}
func NewLocalRegistry(storeController storage.StoreController, repoDB repodb.RepoDB, log log.Logger) Local {
return &LocalRegistry{
storeController: storeController,
repoDB: repoDB,
// first we sync from remote (using containers/image copy from docker:// to oci:) to a temp imageStore
// then we copy the image from tempStorage to zot's storage using ImageStore APIs
tempStorage: NewOciLayoutStorage(storeController),
log: log,
}
}
func (registry *LocalRegistry) CanSkipImage(repo, tag string, imageDigest digest.Digest) (bool, error) {
// check image already synced
imageStore := registry.storeController.GetImageStore(repo)
_, localImageManifestDigest, _, err := imageStore.GetImageManifest(repo, tag)
if err != nil {
if errors.Is(err, zerr.ErrRepoNotFound) || errors.Is(err, zerr.ErrManifestNotFound) {
return false, nil
}
registry.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).Str("reference", tag).
Err(err).Msg("couldn't get local image manifest")
return false, err
}
if localImageManifestDigest != imageDigest {
registry.log.Info().Str("repo", repo).Str("reference", tag).
Str("localDigest", localImageManifestDigest.String()).
Str("remoteDigest", imageDigest.String()).
Msg("remote image digest changed, syncing again")
return false, nil
}
return true, nil
}
func (registry *LocalRegistry) GetContext() *types.SystemContext {
return registry.tempStorage.GetContext()
}
func (registry *LocalRegistry) GetImageReference(repo, reference string) (types.ImageReference, error) {
return registry.tempStorage.GetImageReference(repo, reference)
}
// finalize a syncing image.
func (registry *LocalRegistry) CommitImage(imageReference types.ImageReference, repo, reference string) error {
imageStore := registry.storeController.GetImageStore(repo)
tempImageStore := getImageStoreFromImageReference(imageReference, repo, reference)
defer os.RemoveAll(tempImageStore.RootDir())
registry.log.Info().Str("syncTempDir", path.Join(tempImageStore.RootDir(), repo)).Str("reference", reference).
Msg("pushing synced local image to local registry")
var lockLatency time.Time
manifestBlob, manifestDigest, mediaType, err := tempImageStore.GetImageManifest(repo, reference)
if err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("dir", path.Join(tempImageStore.RootDir(), repo)).Str("repo", repo).Str("reference", reference).
Msg("couldn't find synced manifest in temporary sync dir")
return err
}
// is image manifest
switch mediaType {
case ispec.MediaTypeImageManifest:
if err := registry.copyManifest(repo, manifestBlob, reference, tempImageStore); err != nil {
if errors.Is(err, zerr.ErrImageLintAnnotations) {
registry.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't upload manifest because of missing annotations")
return nil
}
return err
}
case ispec.MediaTypeImageIndex:
// is image index
var indexManifest ispec.Index
if err := json.Unmarshal(manifestBlob, &indexManifest); err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("dir", path.Join(tempImageStore.RootDir(), repo)).
Msg("invalid JSON")
return err
}
for _, manifest := range indexManifest.Manifests {
tempImageStore.RLock(&lockLatency)
manifestBuf, err := tempImageStore.GetBlobContent(repo, manifest.Digest)
tempImageStore.RUnlock(&lockLatency)
if err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("dir", path.Join(tempImageStore.RootDir(), repo)).Str("digest", manifest.Digest.String()).
Msg("couldn't find manifest which is part of an image index")
return err
}
if err := registry.copyManifest(repo, manifestBuf, manifest.Digest.String(),
tempImageStore); err != nil {
if errors.Is(err, zerr.ErrImageLintAnnotations) {
registry.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't upload manifest because of missing annotations")
return nil
}
return err
}
}
_, _, err = imageStore.PutImageManifest(repo, reference, mediaType, manifestBlob)
if err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).Str("reference", reference).
Err(err).Msg("couldn't upload manifest")
return err
}
if registry.repoDB != nil {
err = repodb.SetImageMetaFromInput(repo, reference, mediaType,
manifestDigest, manifestBlob, imageStore, registry.repoDB, registry.log)
if err != nil {
return fmt.Errorf("repoDB: failed to set metadata for image '%s %s': %w", repo, reference, err)
}
registry.log.Debug().Str("repo", repo).Str("reference", reference).Msg("repoDB: successfully set metadata for image")
}
}
registry.log.Info().Str("image", fmt.Sprintf("%s:%s", repo, reference)).Msg("successfully synced image")
return nil
}
func (registry *LocalRegistry) copyManifest(repo string, manifestContent []byte, reference string,
tempImageStore storageTypes.ImageStore,
) error {
imageStore := registry.storeController.GetImageStore(repo)
var manifest ispec.Manifest
var err error
if err := json.Unmarshal(manifestContent, &manifest); err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("dir", path.Join(tempImageStore.RootDir(), repo)).
Msg("invalid JSON")
return err
}
for _, blob := range manifest.Layers {
if storageCommon.IsNonDistributable(blob.MediaType) {
continue
}
err = registry.copyBlob(repo, blob.Digest, blob.MediaType, tempImageStore)
if err != nil {
return err
}
}
err = registry.copyBlob(repo, manifest.Config.Digest, manifest.Config.MediaType, tempImageStore)
if err != nil {
return err
}
digest, _, err := imageStore.PutImageManifest(repo, reference,
ispec.MediaTypeImageManifest, manifestContent)
if err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't upload manifest")
return err
}
if registry.repoDB != nil {
err = repodb.SetImageMetaFromInput(repo, reference, ispec.MediaTypeImageManifest,
digest, manifestContent, imageStore, registry.repoDB, registry.log)
if err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't set metadata from input")
return err
}
registry.log.Debug().Str("repo", repo).Str("reference", reference).Msg("successfully set metadata for image")
}
return nil
}
// Copy a blob from one image store to another image store.
func (registry *LocalRegistry) copyBlob(repo string, blobDigest digest.Digest, blobMediaType string,
tempImageStore storageTypes.ImageStore,
) error {
imageStore := registry.storeController.GetImageStore(repo)
if found, _, _ := imageStore.CheckBlob(repo, blobDigest); found {
// Blob is already at destination, nothing to do
return nil
}
blobReadCloser, _, err := tempImageStore.GetBlob(repo, blobDigest, blobMediaType)
if err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("dir", path.Join(tempImageStore.RootDir(), repo)).
Str("blob digest", blobDigest.String()).Str("media type", blobMediaType).
Msg("couldn't read blob")
return err
}
defer blobReadCloser.Close()
_, _, err = imageStore.FullBlobUpload(repo, blobReadCloser, blobDigest)
if err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("blob digest", blobDigest.String()).Str("media type", blobMediaType).
Msg("couldn't upload blob")
}
return err
}
func getImageStoreFromImageReference(imageReference types.ImageReference, repo, reference string,
) storageTypes.ImageStore {
var tempRootDir string
if strings.HasSuffix(imageReference.StringWithinTransport(), reference) {
tempRootDir = strings.ReplaceAll(imageReference.StringWithinTransport(), fmt.Sprintf("%s:%s", repo, reference), "")
} else {
tempRootDir = strings.ReplaceAll(imageReference.StringWithinTransport(), fmt.Sprintf("%s:", repo), "")
}
metrics := monitoring.NewMetricsServer(false, log.Logger{})
tempImageStore := local.NewImageStore(tempRootDir, false,
storageConstants.DefaultGCDelay, false, false, log.Logger{}, metrics, nil, nil)
return tempImageStore
}

View file

@ -0,0 +1,70 @@
//go:build sync
// +build sync
package sync
import (
"fmt"
"os"
"path"
"github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/types"
"github.com/gofrs/uuid"
"zotregistry.io/zot/pkg/storage"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/test/inject"
)
type OciLayoutStorageImpl struct {
storeController storage.StoreController
context *types.SystemContext
}
func NewOciLayoutStorage(storeController storage.StoreController) OciLayoutStorage {
context := &types.SystemContext{}
// preserve compression
context.OCIAcceptUncompressedLayers = true
return OciLayoutStorageImpl{
storeController: storeController,
context: context,
}
}
func (oci OciLayoutStorageImpl) GetContext() *types.SystemContext {
return oci.context
}
func (oci OciLayoutStorageImpl) GetImageReference(repo string, reference string) (types.ImageReference, error) {
localImageStore := oci.storeController.GetImageStore(repo)
tempSyncPath := path.Join(localImageStore.RootDir(), repo, SyncBlobUploadDir)
// create session folder
uuid, err := uuid.NewV4()
// hard to reach test case, injected error, see pkg/test/dev.go
if err := inject.Error(err); err != nil {
return nil, err
}
sessionRepoPath := path.Join(tempSyncPath, uuid.String())
localRepo := path.Join(sessionRepoPath, repo)
if err := os.MkdirAll(localRepo, storageConstants.DefaultDirPerms); err != nil {
return nil, err
}
_, refIsDigest := parseReference(reference)
if !refIsDigest {
localRepo = fmt.Sprintf("%s:%s", localRepo, reference)
}
localImageRef, err := layout.ParseReference(localRepo)
if err != nil {
return nil, err
}
return localImageRef, nil
}

View file

@ -1,78 +1,64 @@
//go:build sync
// +build sync
package sync
import (
"context"
"fmt"
"net/url"
"os"
"errors"
"sync"
"time"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/types"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/common"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/storage"
)
const (
OrasArtifact = "orasArtifact"
OCIReference = "ociReference"
)
type syncContextUtils struct {
policyCtx *signature.PolicyContext
localCtx *types.SystemContext
upstreamCtx *types.SystemContext
upstreamAddr string
copyOptions copy.Options
retryOptions *retry.Options
enforceSignatures bool
type request struct {
repo string
reference string
// used for background retries, at most one background retry per service
serviceID int
isBackground bool
}
//nolint:gochecknoglobals
var demandedImgs demandedImages
/*
a request can be an image/signature/sbom
type demandedImages struct {
syncedMap sync.Map
keep track of all parallel requests, if two requests of same image/signature/sbom comes at the same time,
process just the first one, also keep track of all background retrying routines.
*/
type BaseOnDemand struct {
services []Service
// map[request]chan err
requestStore *sync.Map
log log.Logger
}
func (di *demandedImages) loadOrStoreChan(key string, value chan error) (chan error, bool) {
val, found := di.syncedMap.LoadOrStore(key, value)
errChannel, _ := val.(chan error)
return errChannel, found
func NewOnDemand(log log.Logger) *BaseOnDemand {
return &BaseOnDemand{log: log, requestStore: &sync.Map{}}
}
func (di *demandedImages) loadOrStoreStr(key, value string) (string, bool) {
val, found := di.syncedMap.LoadOrStore(key, value)
str, _ := val.(string)
return str, found
func (onDemand *BaseOnDemand) Add(service Service) {
onDemand.services = append(onDemand.services, service)
}
func (di *demandedImages) delete(key string) {
di.syncedMap.Delete(key)
}
func (onDemand *BaseOnDemand) SyncImage(repo, reference string) error {
req := request{
repo: repo,
reference: reference,
}
func OneImage(ctx context.Context, cfg syncconf.Config, repoDB repodb.RepoDB,
storeController storage.StoreController, repo, reference string, artifactType string, log log.Logger,
) error {
// guard against multiple parallel requests
demandedImage := fmt.Sprintf("%s:%s", repo, reference)
// loadOrStore image-based channel
imageChannel, found := demandedImgs.loadOrStoreChan(demandedImage, make(chan error))
// if value found wait on channel receive or close
val, found := onDemand.requestStore.Load(req)
if found {
log.Info().Str("demandedImage", demandedImage).
Msg("image already demanded by another client, waiting on imageChannel")
onDemand.log.Info().Str("repo", repo).Str("reference", reference).
Msg("image already demanded, waiting on channel")
err, ok := <-imageChannel
syncResult, _ := val.(chan error)
err, ok := <-syncResult
// if channel closed exit
if !ok {
return nil
@ -81,12 +67,15 @@ func OneImage(ctx context.Context, cfg syncconf.Config, repoDB repodb.RepoDB,
return err
}
defer demandedImgs.delete(demandedImage)
defer close(imageChannel)
syncResult := make(chan error)
onDemand.requestStore.Store(req, syncResult)
go syncOneImage(ctx, imageChannel, cfg, repoDB, storeController, repo, reference, artifactType, log)
defer onDemand.requestStore.Delete(req)
defer close(syncResult)
err, ok := <-imageChannel
go onDemand.syncImage(repo, reference, syncResult)
err, ok := <-syncResult
if !ok {
return nil
}
@ -94,268 +83,87 @@ func OneImage(ctx context.Context, cfg syncconf.Config, repoDB repodb.RepoDB,
return err
}
func syncOneImage(ctx context.Context, imageChannel chan error, cfg syncconf.Config,
repoDB repodb.RepoDB, storeController storage.StoreController,
localRepo, reference string, artifactType string, log log.Logger,
) {
var credentialsFile syncconf.CredentialsFile
if cfg.CredentialsFile != "" {
func (onDemand *BaseOnDemand) SyncReference(repo string, subjectDigestStr string, referenceType string) error {
var err error
credentialsFile, err = getFileCredentials(cfg.CredentialsFile)
for _, service := range onDemand.services {
err = service.SetNextAvailableURL()
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("credentialsFile", cfg.CredentialsFile).Msg("couldn't get registry credentials from file")
return err
}
imageChannel <- err
return
err = service.SyncReference(repo, subjectDigestStr, referenceType)
if err != nil {
continue
} else {
return nil
}
}
localCtx, policyCtx, err := getLocalContexts(log)
return err
}
func (onDemand *BaseOnDemand) syncImage(repo, reference string, syncResult chan error) {
var err error
for serviceID, service := range onDemand.services {
err = service.SetNextAvailableURL()
if err != nil {
imageChannel <- err
syncResult <- err
return
}
for _, registryCfg := range cfg.Registries {
regCfg := registryCfg
if !regCfg.OnDemand {
log.Info().Strs("registry", regCfg.URLs).
Msg("skipping syncing on demand from registry, onDemand flag is false")
continue
}
upstreamRepo := localRepo
// if content config is not specified, then don't filter, just sync demanded image
if len(regCfg.Content) != 0 {
contentID, err := findRepoMatchingContentID(localRepo, regCfg.Content)
err = service.SyncImage(repo, reference)
if err != nil {
log.Info().Str("localRepo", localRepo).Strs("registry",
regCfg.URLs).Msg("skipping syncing on demand repo from registry because it's filtered out by content config")
if errors.Is(err, zerr.ErrManifestNotFound) ||
errors.Is(err, zerr.ErrSyncImageFilteredOut) ||
errors.Is(err, zerr.ErrSyncImageNotSigned) {
continue
}
upstreamRepo = getRepoSource(localRepo, regCfg.Content[contentID])
req := request{
repo: repo,
reference: reference,
serviceID: serviceID,
isBackground: true,
}
retryOptions := &retry.Options{}
if regCfg.MaxRetries != nil {
retryOptions.MaxRetry = *regCfg.MaxRetries
if regCfg.RetryDelay != nil {
retryOptions.Delay = *regCfg.RetryDelay
}
}
log.Info().Strs("registry", regCfg.URLs).Msg("syncing on demand with registry")
for _, regCfgURL := range regCfg.URLs {
upstreamURL := regCfgURL
upstreamAddr := StripRegistryTransport(upstreamURL)
var TLSverify bool
if regCfg.TLSVerify != nil && *regCfg.TLSVerify {
TLSverify = true
}
registryURL, err := url.Parse(upstreamURL)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("url", upstreamURL).Msg("couldn't parse url")
imageChannel <- err
return
}
httpClient, err := common.CreateHTTPClient(TLSverify, registryURL.Host, regCfg.CertDir)
if err != nil {
imageChannel <- err
return
}
sig := newSignaturesCopier(httpClient, credentialsFile[upstreamAddr], *registryURL, repoDB,
storeController, log)
upstreamCtx := getUpstreamContext(&regCfg, credentialsFile[upstreamAddr])
options := getCopyOptions(upstreamCtx, localCtx)
/* demanded object is a signature or artifact
at tis point we already have images synced, but not their signatures. */
if isCosignTag(reference) || artifactType != "" {
//nolint: contextcheck
err = syncSignaturesArtifacts(sig, localRepo, upstreamRepo, reference, artifactType)
if err != nil {
// if there is already a background routine, skip
if _, requested := onDemand.requestStore.LoadOrStore(req, struct{}{}); requested {
continue
}
imageChannel <- nil
retryOptions := service.GetRetryOptions()
return
}
var enforeSignatures bool
if regCfg.OnlySigned != nil && *regCfg.OnlySigned {
enforeSignatures = true
}
syncContextUtils := syncContextUtils{
policyCtx: policyCtx,
localCtx: localCtx,
upstreamCtx: upstreamCtx,
upstreamAddr: upstreamAddr,
copyOptions: options,
retryOptions: &retry.Options{}, // we don't want to retry inline
enforceSignatures: enforeSignatures,
}
//nolint:contextcheck
skipped, copyErr := syncRun(localRepo, upstreamRepo, reference, syncContextUtils, sig, log)
if skipped {
continue
}
// key used to check if we already have a go routine syncing this image
demandedImageRef := fmt.Sprintf("%s/%s:%s", upstreamAddr, upstreamRepo, reference)
if copyErr != nil {
// don't retry in background if maxretry is 0
if retryOptions.MaxRetry == 0 {
continue
}
_, found := demandedImgs.loadOrStoreStr(demandedImageRef, "")
if found {
log.Info().Str("demandedImageRef", demandedImageRef).Msg("image already demanded in background")
/* we already have a go routine spawned for this image
or retryOptions is not configured */
continue
}
// spawn goroutine to later pull the image
go func() {
if retryOptions.MaxRetry > 0 {
// retry in background
go func(service Service) {
// remove image after syncing
defer func() {
demandedImgs.delete(demandedImageRef)
log.Info().Str("demandedImageRef", demandedImageRef).Msg("sync routine: demanded image exited")
onDemand.requestStore.Delete(req)
onDemand.log.Info().Str("repo", repo).Str("reference", reference).
Msg("sync routine for image exited")
}()
log.Info().Str("demandedImageRef", demandedImageRef).Str("copyErr", copyErr.Error()).
Msg("sync routine: starting routine to copy image, err encountered")
onDemand.log.Info().Str("repo", repo).Str(reference, "reference").Str("err", err.Error()).
Msg("sync routine: starting routine to copy image, because of error")
time.Sleep(retryOptions.Delay)
if err = retry.RetryIfNecessary(ctx, func() error {
_, err := syncRun(localRepo, upstreamRepo, reference, syncContextUtils, sig, log)
if err = retry.RetryIfNecessary(context.Background(), func() error {
err := service.SyncImage(repo, reference)
return err
}, retryOptions); err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("demandedImageRef", demandedImageRef).Msg("sync routine: error while copying image")
onDemand.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).Str("reference", reference).
Err(err).Msg("sync routine: error while copying image")
}
}(service)
}
}()
} else {
imageChannel <- nil
return
}
break
}
}
imageChannel <- nil
}
func syncRun(localRepo, upstreamRepo, reference string, utils syncContextUtils, sig *signaturesCopier,
log log.Logger,
) (bool, error) {
upstreamImageRef, err := getImageRef(utils.upstreamAddr, upstreamRepo, reference)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("repository", utils.upstreamAddr+"/"+upstreamRepo+":"+reference).
Msg("error creating docker reference for repository")
return false, err
}
imageStore := sig.storeController.GetImageStore(localRepo)
localCachePath, err := getLocalCachePath(imageStore, localRepo)
if err != nil {
log.Error().Err(err).Str("repository", localRepo).Msg("couldn't get localCachePath for repository")
return false, err
}
defer os.RemoveAll(localCachePath)
return syncImageWithRefs(context.Background(), localRepo, upstreamRepo, reference, upstreamImageRef,
utils, sig, localCachePath, log)
}
func syncSignaturesArtifacts(sig *signaturesCopier, localRepo, upstreamRepo, reference, artifactType string) error {
upstreamURL := sig.upstreamURL.String()
switch {
case isCosignTag(reference):
// is cosign signature
cosignManifest, err := sig.getCosignManifest(upstreamRepo, reference)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("image", upstreamURL+"/"+upstreamRepo+":"+reference).Msg("couldn't get upstream image cosign manifest")
return err
}
err = sig.syncCosignSignature(localRepo, upstreamRepo, reference, cosignManifest)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("image", upstreamURL+"/"+upstreamRepo+":"+reference).Msg("couldn't copy upstream image cosign signature")
return err
}
case artifactType == OrasArtifact:
// is oras artifact
refs, err := sig.getORASRefs(upstreamRepo, reference)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("image", upstreamURL+"/"+upstreamRepo+":"+reference).Msg("couldn't get upstream image ORAS references")
return err
}
err = sig.syncORASRefs(localRepo, upstreamRepo, reference, refs)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("image", upstreamURL+"/"+upstreamRepo+":"+reference).Msg("couldn't copy image ORAS references")
return err
}
case artifactType == OCIReference:
// this contains notary signatures
index, err := sig.getOCIRefs(upstreamRepo, reference)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("image", upstreamURL+"/"+upstreamRepo+":"+reference).Msg("couldn't get OCI references")
return err
}
err = sig.syncOCIRefs(localRepo, upstreamRepo, reference, index)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("image", upstreamURL+"/"+upstreamRepo+":"+reference).Msg("couldn't copy OCI references")
return err
}
}
return nil
syncResult <- err
}

View file

@ -0,0 +1,14 @@
//go:build !sync
// +build !sync
package sync
type BaseOnDemand struct{}
func (onDemand *BaseOnDemand) SyncImage(repo, reference string) error {
return nil
}
func (onDemand *BaseOnDemand) SyncReference(repo string, subjectDigestStr string, referenceType string) error {
return nil
}

View file

@ -0,0 +1,247 @@
//go:build sync
// +build sync
package references
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sigstore/cosign/v2/pkg/oci/remote"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/common"
"zotregistry.io/zot/pkg/extensions/sync/constants"
client "zotregistry.io/zot/pkg/extensions/sync/httpclient"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/storage"
)
type CosignReference struct {
client *client.Client
storeController storage.StoreController
repoDB repodb.RepoDB
log log.Logger
}
func NewCosignReference(httpClient *client.Client, storeController storage.StoreController,
repoDB repodb.RepoDB, log log.Logger,
) CosignReference {
return CosignReference{
client: httpClient,
storeController: storeController,
repoDB: repoDB,
log: log,
}
}
func (ref CosignReference) Name() string {
return constants.Cosign
}
func (ref CosignReference) IsSigned(upstreamRepo, subjectDigestStr string) bool {
cosignSignatureTag := getCosignSignatureTagFromSubjectDigest(subjectDigestStr)
_, err := ref.getManifest(upstreamRepo, cosignSignatureTag)
return err == nil
}
func (ref CosignReference) canSkipReferences(localRepo, cosignTag string, manifest *ispec.Manifest) (
bool, error,
) {
if manifest == nil {
return true, nil
}
imageStore := ref.storeController.GetImageStore(localRepo)
// check cosign signature already synced
var localManifest ispec.Manifest
/* we need to use tag (cosign format: sha256-$IMAGE_TAG.sig) instead of digest to get local cosign manifest
because of an issue where cosign digests differs between upstream and downstream */
localManifestBuf, _, _, err := imageStore.GetImageManifest(localRepo, cosignTag)
if err != nil {
if errors.Is(err, zerr.ErrManifestNotFound) {
return false, nil
}
ref.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("repository", localRepo).Str("reference", cosignTag).
Msg("couldn't get local cosign manifest")
return false, err
}
err = json.Unmarshal(localManifestBuf, &localManifest)
if err != nil {
ref.log.Error().Str("errorType", common.TypeOf(err)).
Str("repository", localRepo).Str("reference", cosignTag).
Err(err).Msg("couldn't unmarshal local cosign signature manifest")
return false, err
}
if !manifestsEqual(localManifest, *manifest) {
ref.log.Info().Str("repository", localRepo).Str("reference", cosignTag).
Msg("upstream cosign signatures changed, syncing again")
return false, nil
}
ref.log.Info().Str("repository", localRepo).Str("reference", cosignTag).
Msg("skipping syncing cosign signature, already synced")
return true, nil
}
func (ref CosignReference) SyncReferences(localRepo, remoteRepo, subjectDigestStr string) error {
cosignTags := getCosignTagsFromSubjectDigest(subjectDigestStr)
for _, cosignTag := range cosignTags {
manifest, err := ref.getManifest(remoteRepo, cosignTag)
if err != nil && errors.Is(err, zerr.ErrSyncReferrerNotFound) {
return err
}
skip, err := ref.canSkipReferences(localRepo, cosignTag, manifest)
if err != nil {
ref.log.Error().Err(err).Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("couldn't check if the remote image cosign reference can be skipped")
}
if skip {
continue
}
imageStore := ref.storeController.GetImageStore(localRepo)
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("syncing cosign reference for image")
for _, blob := range manifest.Layers {
if err := syncBlob(ref.client, imageStore, localRepo, remoteRepo, blob.Digest, ref.log); err != nil {
return err
}
}
// sync config blob
if err := syncBlob(ref.client, imageStore, localRepo, remoteRepo, manifest.Config.Digest, ref.log); err != nil {
return err
}
manifestBuf, err := json.Marshal(manifest)
if err != nil {
ref.log.Error().Str("errorType", common.TypeOf(err)).
Str("repository", localRepo).Str("subject", subjectDigestStr).
Err(err).Msg("couldn't marshal cosign reference manifest")
return err
}
// push manifest
referenceDigest, _, err := imageStore.PutImageManifest(localRepo, cosignTag,
ispec.MediaTypeImageManifest, manifestBuf)
if err != nil {
ref.log.Error().Str("errorType", common.TypeOf(err)).
Str("repository", localRepo).Str("subject", subjectDigestStr).
Err(err).Msg("couldn't upload cosign reference manifest for image")
return err
}
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("successfully synced cosign reference for image")
if ref.repoDB != nil {
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("repoDB: trying to sync cosign reference for image")
isSig, sigType, signedManifestDig, err := storage.CheckIsImageSignature(localRepo, manifestBuf,
cosignTag)
if err != nil {
return fmt.Errorf("failed to check if cosign reference '%s@%s' is a signature: %w", localRepo,
cosignTag, err)
}
if isSig {
err = ref.repoDB.AddManifestSignature(localRepo, signedManifestDig, repodb.SignatureMetadata{
SignatureType: sigType,
SignatureDigest: referenceDigest.String(),
})
} else {
err = repodb.SetImageMetaFromInput(localRepo, cosignTag, manifest.MediaType,
referenceDigest, manifestBuf, ref.storeController.GetImageStore(localRepo),
ref.repoDB, ref.log)
}
if err != nil {
return fmt.Errorf("failed to set metadata for cosign reference in '%s@%s': %w", localRepo, subjectDigestStr, err)
}
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("repoDB: successfully added cosign reference for image")
}
}
return nil
}
func (ref CosignReference) getManifest(repo, cosignTag string) (*ispec.Manifest, error) {
var cosignManifest ispec.Manifest
_, _, statusCode, err := ref.client.MakeGetRequest(&cosignManifest, ispec.MediaTypeImageManifest,
"v2", repo, "manifests", cosignTag)
if err != nil {
if statusCode == http.StatusNotFound {
ref.log.Debug().Str("errorType", common.TypeOf(err)).
Str("repository", repo).Str("tag", cosignTag).
Err(err).Msg("couldn't find any cosign manifest for image")
return nil, zerr.ErrSyncReferrerNotFound
}
ref.log.Error().Str("errorType", common.TypeOf(err)).
Str("repository", repo).Str("tag", cosignTag).Int("statusCode", statusCode).
Err(err).Msg("couldn't get cosign manifest for image")
return nil, err
}
return &cosignManifest, nil
}
func getCosignSignatureTagFromSubjectDigest(digestStr string) string {
return strings.Replace(digestStr, ":", "-", 1) + "." + remote.SignatureTagSuffix
}
func getCosignSBOMTagFromSubjectDigest(digestStr string) string {
return strings.Replace(digestStr, ":", "-", 1) + "." + remote.SBOMTagSuffix
}
func getCosignTagsFromSubjectDigest(digestStr string) []string {
var cosignTags []string
// signature tag
cosignTags = append(cosignTags, getCosignSignatureTagFromSubjectDigest(digestStr))
// sbom tag
cosignTags = append(cosignTags, getCosignSBOMTagFromSubjectDigest(digestStr))
return cosignTags
}
// this function will check if tag is a cosign tag (signature or sbom).
func IsCosignTag(tag string) bool {
if strings.HasPrefix(tag, "sha256-") &&
(strings.HasSuffix(tag, remote.SignatureTagSuffix) || strings.HasSuffix(tag, remote.SBOMTagSuffix)) {
return true
}
return false
}

View file

@ -0,0 +1,226 @@
//go:build sync
// +build sync
package references
import (
"encoding/json"
"errors"
"fmt"
"net/http"
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/common"
"zotregistry.io/zot/pkg/extensions/sync/constants"
client "zotregistry.io/zot/pkg/extensions/sync/httpclient"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/storage"
)
type OciReferences struct {
client *client.Client
storeController storage.StoreController
repoDB repodb.RepoDB
log log.Logger
}
func NewOciReferences(httpClient *client.Client, storeController storage.StoreController,
repoDB repodb.RepoDB, log log.Logger,
) OciReferences {
return OciReferences{
client: httpClient,
storeController: storeController,
repoDB: repoDB,
log: log,
}
}
func (ref OciReferences) Name() string {
return constants.OCI
}
func (ref OciReferences) IsSigned(remoteRepo, subjectDigestStr string) bool {
// use artifactTypeFilter
index, err := ref.getIndex(remoteRepo, subjectDigestStr)
if err != nil {
return false
}
if len(getNotationManifestsFromOCIRefs(index)) > 0 {
return true
}
return false
}
func (ref OciReferences) canSkipReferences(localRepo, subjectDigestStr string, index ispec.Index) (bool, error) {
imageStore := ref.storeController.GetImageStore(localRepo)
digest := godigest.Digest(subjectDigestStr)
// check oci references already synced
if len(index.Manifests) > 0 {
localRefs, err := imageStore.GetReferrers(localRepo, digest, nil)
if err != nil {
if errors.Is(err, zerr.ErrManifestNotFound) {
return false, nil
}
ref.log.Error().Str("errorType", common.TypeOf(err)).
Str("repository", localRepo).Str("subject", subjectDigestStr).
Err(err).Msg("couldn't get local oci references for image")
return false, err
}
if !descriptorsEqual(localRefs.Manifests, index.Manifests) {
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("remote oci references for image changed, syncing again")
return false, nil
}
}
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("skipping oci references for image, already synced")
return true, nil
}
func (ref OciReferences) SyncReferences(localRepo, remoteRepo, subjectDigestStr string) error {
index, err := ref.getIndex(remoteRepo, subjectDigestStr)
if err != nil {
return err
}
skipOCIRefs, err := ref.canSkipReferences(localRepo, subjectDigestStr, index)
if err != nil {
ref.log.Error().Err(err).Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("couldn't check if the upstream oci references for image can be skipped")
}
if skipOCIRefs {
return nil
}
imageStore := ref.storeController.GetImageStore(localRepo)
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("syncing oci references for image")
for _, referrer := range index.Manifests {
var artifactManifest ispec.Manifest
OCIRefBuf, _, statusCode, err := ref.client.MakeGetRequest(&artifactManifest, ispec.MediaTypeImageManifest,
"v2", remoteRepo, "manifests", referrer.Digest.String())
if err != nil {
if statusCode == http.StatusNotFound {
return zerr.ErrSyncReferrerNotFound
}
ref.log.Error().Str("errorType", common.TypeOf(err)).
Str("repository", localRepo).Str("subject", subjectDigestStr).
Err(err).Msg("couldn't get oci reference manifest for image")
return err
}
if referrer.MediaType == ispec.MediaTypeImageManifest {
// read manifest
var manifest ispec.Manifest
err = json.Unmarshal(OCIRefBuf, &manifest)
if err != nil {
ref.log.Error().Str("errorType", common.TypeOf(err)).
Str("repository", localRepo).Str("subject", subjectDigestStr).
Err(err).Msg("couldn't unmarshal oci reference manifest for image")
return err
}
for _, layer := range manifest.Layers {
if err := syncBlob(ref.client, imageStore, localRepo, remoteRepo, layer.Digest, ref.log); err != nil {
return err
}
}
// sync config blob
if err := syncBlob(ref.client, imageStore, localRepo, remoteRepo, manifest.Config.Digest, ref.log); err != nil {
return err
}
} else {
continue
}
digest, _, err := imageStore.PutImageManifest(localRepo, referrer.Digest.String(),
referrer.MediaType, OCIRefBuf)
if err != nil {
ref.log.Error().Str("errorType", common.TypeOf(err)).
Str("repository", localRepo).Str("subject", subjectDigestStr).
Err(err).Msg("couldn't upload oci reference for image")
return err
}
if ref.repoDB != nil {
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("repoDB: trying to add oci references for image")
isSig, sigType, signedManifestDig, err := storage.CheckIsImageSignature(localRepo, OCIRefBuf,
referrer.Digest.String())
if err != nil {
return fmt.Errorf("failed to check if oci reference '%s@%s' is a signature: %w", localRepo,
referrer.Digest.String(), err)
}
if isSig {
err = ref.repoDB.AddManifestSignature(localRepo, signedManifestDig, repodb.SignatureMetadata{
SignatureType: sigType,
SignatureDigest: digest.String(),
})
} else {
err = repodb.SetImageMetaFromInput(localRepo, digest.String(), referrer.MediaType,
digest, OCIRefBuf, ref.storeController.GetImageStore(localRepo),
ref.repoDB, ref.log)
}
if err != nil {
return fmt.Errorf("failed to set metadata for oci reference in '%s@%s': %w", localRepo, subjectDigestStr, err)
}
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("repoDB: successfully added oci references to RepoDB for image")
}
}
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("successfully synced oci references for image")
return nil
}
func (ref OciReferences) getIndex(repo, subjectDigestStr string) (ispec.Index, error) {
var index ispec.Index
_, _, statusCode, err := ref.client.MakeGetRequest(&index, ispec.MediaTypeImageIndex,
"v2", repo, "referrers", subjectDigestStr)
if err != nil {
if statusCode == http.StatusNotFound {
ref.log.Debug().Str("repository", repo).Str("subject", subjectDigestStr).
Msg("couldn't find any oci reference for image, skipping")
return index, zerr.ErrSyncReferrerNotFound
}
ref.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("repository", repo).Str("subject", subjectDigestStr).Int("statusCode", statusCode).
Msg("couldn't get oci reference for image")
return index, err
}
return index, nil
}

View file

@ -0,0 +1,183 @@
//go:build sync
// +build sync
package references
import (
"errors"
"fmt"
"net/http"
godigest "github.com/opencontainers/go-digest"
oras "github.com/oras-project/artifacts-spec/specs-go/v1"
zerr "zotregistry.io/zot/errors"
apiConstants "zotregistry.io/zot/pkg/api/constants"
"zotregistry.io/zot/pkg/common"
"zotregistry.io/zot/pkg/extensions/sync/constants"
client "zotregistry.io/zot/pkg/extensions/sync/httpclient"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/storage"
)
type ReferenceList struct {
References []oras.Descriptor `json:"references"`
}
type ORASReferences struct {
client *client.Client
storeController storage.StoreController
repoDB repodb.RepoDB
log log.Logger
}
func NewORASReferences(httpClient *client.Client, storeController storage.StoreController,
repoDB repodb.RepoDB, log log.Logger,
) ORASReferences {
return ORASReferences{
client: httpClient,
storeController: storeController,
repoDB: repoDB,
log: log,
}
}
func (ref ORASReferences) Name() string {
return constants.Oras
}
func (ref ORASReferences) IsSigned(remoteRepo, subjectDigestStr string) bool {
return false
}
func (ref ORASReferences) canSkipReferences(localRepo, subjectDigestStr string, referrers ReferenceList) (bool, error) {
imageStore := ref.storeController.GetImageStore(localRepo)
digest := godigest.Digest(subjectDigestStr)
// check oras artifacts already synced
if len(referrers.References) > 0 {
localRefs, err := imageStore.GetOrasReferrers(localRepo, digest, "")
if err != nil {
if errors.Is(err, zerr.ErrManifestNotFound) {
return false, nil
}
ref.log.Error().Str("errorType", common.TypeOf(err)).Str("repository", localRepo).
Str("subject", subjectDigestStr).
Err(err).Msg("couldn't get local ORAS artifact for image")
return false, err
}
if !artifactDescriptorsEqual(localRefs, referrers.References) {
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("upstream ORAS artifacts for image changed, syncing again")
return false, nil
}
}
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("skipping ORAS artifact for image, already synced")
return true, nil
}
func (ref ORASReferences) SyncReferences(localRepo, remoteRepo, subjectDigestStr string) error {
referrers, err := ref.getReferenceList(remoteRepo, subjectDigestStr)
if err != nil {
return err
}
skipORASRefs, err := ref.canSkipReferences(localRepo, subjectDigestStr, referrers)
if err != nil {
ref.log.Error().Err(err).Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("couldn't check if ORAS artifact for image can be skipped")
}
if skipORASRefs {
return nil
}
imageStore := ref.storeController.GetImageStore(localRepo)
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("syncing ORAS artifacts for image")
for _, referrer := range referrers.References {
var artifactManifest oras.Manifest
orasBuf, _, statusCode, err := ref.client.MakeGetRequest(&artifactManifest, oras.MediaTypeDescriptor,
"v2", remoteRepo, "manifests", referrer.Digest.String())
if err != nil {
if statusCode == http.StatusNotFound {
return zerr.ErrSyncReferrerNotFound
}
ref.log.Error().Str("errorType", common.TypeOf(err)).
Str("repository", localRepo).Str("subject", subjectDigestStr).
Err(err).Msg("couldn't get ORAS artifact for image")
return err
}
for _, blob := range artifactManifest.Blobs {
if err := syncBlob(ref.client, imageStore, localRepo, remoteRepo, blob.Digest, ref.log); err != nil {
return err
}
}
digest, _, err := imageStore.PutImageManifest(localRepo, referrer.Digest.String(),
oras.MediaTypeArtifactManifest, orasBuf)
if err != nil {
ref.log.Error().Str("errorType", common.TypeOf(err)).
Str("repository", localRepo).Str("subject", subjectDigestStr).
Err(err).Msg("couldn't upload ORAS artifact for image")
return err
}
if ref.repoDB != nil {
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("repoDB: trying to sync oras artifact for image")
err := repodb.SetImageMetaFromInput(localRepo, digest.String(), referrer.MediaType,
digest, orasBuf, ref.storeController.GetImageStore(localRepo),
ref.repoDB, ref.log)
if err != nil {
return fmt.Errorf("repoDB: failed to set metadata for oras artifact '%s@%s': %w", localRepo, subjectDigestStr, err)
}
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("repoDB: successfully added oras artifacts to RepoDB for image")
}
}
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
Msg("successfully synced oras artifacts for image")
return nil
}
func (ref ORASReferences) getReferenceList(repo, subjectDigestStr string) (ReferenceList, error) {
var referrers ReferenceList
_, _, statusCode, err := ref.client.MakeGetRequest(&referrers, "application/json",
apiConstants.ArtifactSpecRoutePrefix, repo, "manifests", subjectDigestStr, "referrers")
if err != nil {
if statusCode == http.StatusNotFound || statusCode == http.StatusBadRequest {
ref.log.Debug().Str("repository", repo).Str("subject", subjectDigestStr).Err(err).
Msg("couldn't find any ORAS artifact for image")
return referrers, zerr.ErrSyncReferrerNotFound
}
ref.log.Error().Err(err).Str("repository", repo).Str("subject", subjectDigestStr).
Msg("couldn't get ORAS artifacts for image")
return referrers, err
}
return referrers, nil
}

View file

@ -0,0 +1,181 @@
//go:build sync
// +build sync
package references
import (
"bytes"
"fmt"
"net/http"
notreg "github.com/notaryproject/notation-go/registry"
"github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
"github.com/sigstore/cosign/v2/pkg/oci/static"
"zotregistry.io/zot/pkg/common"
client "zotregistry.io/zot/pkg/extensions/sync/httpclient"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/storage"
storageTypes "zotregistry.io/zot/pkg/storage/types"
)
type Reference interface {
Name() string
IsSigned(upstreamRepo, subjectDigestStr string) bool
SyncReferences(localRepo, upstreamRepo, subjectDigestStr string) error
}
type References struct {
refernceList []Reference
log log.Logger
}
func NewReferences(httpClient *client.Client, storeController storage.StoreController,
repoDB repodb.RepoDB, log log.Logger,
) References {
refs := References{log: log}
refs.refernceList = append(refs.refernceList, NewCosignReference(httpClient, storeController, repoDB, log))
refs.refernceList = append(refs.refernceList, NewOciReferences(httpClient, storeController, repoDB, log))
refs.refernceList = append(refs.refernceList, NewORASReferences(httpClient, storeController, repoDB, log))
return refs
}
func (refs References) IsSigned(upstreamRepo, subjectDigestStr string) bool {
for _, ref := range refs.refernceList {
ok := ref.IsSigned(upstreamRepo, subjectDigestStr)
if ok {
return true
}
}
return false
}
func (refs References) SyncAll(localRepo, upstreamRepo, subjectDigestStr string) error {
var err error
for _, ref := range refs.refernceList {
err = ref.SyncReferences(localRepo, upstreamRepo, subjectDigestStr)
if err != nil {
refs.log.Error().Err(err).
Str("reference type", ref.Name()).
Str("image", fmt.Sprintf("%s:%s", upstreamRepo, subjectDigestStr)).
Msg("couldn't sync image referrer")
}
}
return err
}
func (refs References) SyncReference(localRepo, upstreamRepo, subjectDigestStr, referenceType string) error {
for _, ref := range refs.refernceList {
if ref.Name() == referenceType {
if err := ref.SyncReferences(localRepo, upstreamRepo, subjectDigestStr); err != nil {
refs.log.Error().Err(err).
Str("reference type", ref.Name()).
Str("image", fmt.Sprintf("%s:%s", upstreamRepo, subjectDigestStr)).
Msg("couldn't sync image referrer")
return err
}
}
}
return nil
}
func syncBlob(client *client.Client, imageStore storageTypes.ImageStore, localRepo, remoteRepo string,
digest digest.Digest, log log.Logger,
) error {
var resultPtr interface{}
body, _, statusCode, err := client.MakeGetRequest(resultPtr, "", "v2", remoteRepo, "blobs", digest.String())
if err != nil {
if statusCode != http.StatusOK {
log.Info().Str("repo", remoteRepo).Str("digest", digest.String()).Msg("couldn't get remote blob")
return err
}
}
_, _, err = imageStore.FullBlobUpload(localRepo, bytes.NewBuffer(body), digest)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Str("digest", digest.String()).Str("repo", localRepo).
Err(err).Msg("couldn't upload blob")
return err
}
return nil
}
func manifestsEqual(manifest1, manifest2 ispec.Manifest) bool {
if manifest1.Config.Digest == manifest2.Config.Digest &&
manifest1.Config.MediaType == manifest2.Config.MediaType &&
manifest1.Config.Size == manifest2.Config.Size {
if descriptorsEqual(manifest1.Layers, manifest2.Layers) {
return true
}
}
return false
}
func artifactDescriptorsEqual(desc1, desc2 []artifactspec.Descriptor) bool {
if len(desc1) != len(desc2) {
return false
}
for id, desc := range desc1 {
if desc.Digest != desc2[id].Digest ||
desc.Size != desc2[id].Size ||
desc.MediaType != desc2[id].MediaType ||
desc.ArtifactType != desc2[id].ArtifactType {
return false
}
}
return true
}
func descriptorsEqual(desc1, desc2 []ispec.Descriptor) bool {
if len(desc1) != len(desc2) {
return false
}
for id, desc := range desc1 {
if !descriptorEqual(desc, desc2[id]) {
return false
}
}
return true
}
func descriptorEqual(desc1, desc2 ispec.Descriptor) bool {
if desc1.Size == desc2.Size &&
desc1.Digest == desc2.Digest &&
desc1.MediaType == desc2.MediaType &&
desc1.Annotations[static.SignatureAnnotationKey] == desc2.Annotations[static.SignatureAnnotationKey] {
return true
}
return false
}
func getNotationManifestsFromOCIRefs(ociRefs ispec.Index) []ispec.Descriptor {
notaryManifests := []ispec.Descriptor{}
for _, ref := range ociRefs.Manifests {
if ref.ArtifactType == notreg.ArtifactTypeNotation {
notaryManifests = append(notaryManifests, ref)
}
}
return notaryManifests
}

View file

@ -0,0 +1,368 @@
//go:build sync
// +build sync
package references
import (
"errors"
"testing"
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
. "github.com/smartystreets/goconvey/convey"
zerr "zotregistry.io/zot/errors"
client "zotregistry.io/zot/pkg/extensions/sync/httpclient"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/test/mocks"
)
var errRef = errors.New("err")
func TestCosign(t *testing.T) {
Convey("trigger errors", t, func() {
cfg := client.Config{
URL: "url",
TLSVerify: false,
}
client, err := client.New(cfg, log.NewLogger("debug", ""))
So(err, ShouldBeNil)
cosign := NewCosignReference(client, storage.StoreController{DefaultStore: mocks.MockedImageStore{
GetImageManifestFn: func(repo, reference string) ([]byte, godigest.Digest, string, error) {
return []byte{}, "", "", errRef
},
}}, nil, log.NewLogger("debug", ""))
ok, err := cosign.canSkipReferences("repo", "tag", nil)
So(err, ShouldBeNil)
So(ok, ShouldBeTrue)
// trigger GetImageManifest err
ok, err = cosign.canSkipReferences("repo", "tag", &ispec.Manifest{MediaType: ispec.MediaTypeImageManifest})
So(err, ShouldNotBeNil)
So(ok, ShouldBeFalse)
cosign = NewCosignReference(client, storage.StoreController{DefaultStore: mocks.MockedImageStore{
GetImageManifestFn: func(repo, reference string) ([]byte, godigest.Digest, string, error) {
return []byte{}, "", "", nil
},
}}, nil, log.NewLogger("debug", ""))
// trigger unmarshal err
ok, err = cosign.canSkipReferences("repo", "tag", &ispec.Manifest{MediaType: ispec.MediaTypeImageManifest})
So(err, ShouldNotBeNil)
So(ok, ShouldBeFalse)
})
}
func TestOci(t *testing.T) {
Convey("trigger errors", t, func() {
cfg := client.Config{
URL: "url",
TLSVerify: false,
}
client, err := client.New(cfg, log.NewLogger("debug", ""))
So(err, ShouldBeNil)
oci := NewOciReferences(client, storage.StoreController{DefaultStore: mocks.MockedImageStore{
GetReferrersFn: func(repo string, digest godigest.Digest, artifactTypes []string) (ispec.Index, error) {
return ispec.Index{}, zerr.ErrManifestNotFound
},
}}, nil, log.NewLogger("debug", ""))
ok := oci.IsSigned("repo", "")
So(ok, ShouldBeFalse)
// trigger GetReferrers err
ok, err = oci.canSkipReferences("repo", "tag", ispec.Index{Manifests: []ispec.Descriptor{{Digest: "digest1"}}})
So(err, ShouldBeNil)
So(ok, ShouldBeFalse)
})
}
func TestORAS(t *testing.T) {
Convey("trigger errors", t, func() {
cfg := client.Config{
URL: "url",
TLSVerify: false,
}
client, err := client.New(cfg, log.NewLogger("debug", ""))
So(err, ShouldBeNil)
orasRefs := []artifactspec.Descriptor{
{
MediaType: "oras",
ArtifactType: "oras",
Digest: "digest1",
},
}
oras := NewORASReferences(client, storage.StoreController{DefaultStore: mocks.MockedImageStore{
GetOrasReferrersFn: func(repo string, digest godigest.Digest, artifactType string) (
[]artifactspec.Descriptor, error,
) {
return orasRefs, nil
},
}}, nil, log.NewLogger("debug", ""))
// trigger artifactDescriptors not equal
ok, err := oras.canSkipReferences("repo", "tag", ReferenceList{[]artifactspec.Descriptor{
{
MediaType: "oras",
ArtifactType: "oras",
Digest: "digest2",
},
}})
So(err, ShouldBeNil)
So(ok, ShouldBeFalse)
})
}
func TestCompareManifest(t *testing.T) {
testCases := []struct {
manifest1 ispec.Manifest
manifest2 ispec.Manifest
expected bool
}{
{
manifest1: ispec.Manifest{
Config: ispec.Descriptor{
Digest: "digest1",
},
},
manifest2: ispec.Manifest{
Config: ispec.Descriptor{
Digest: "digest2",
},
},
expected: false,
},
{
manifest1: ispec.Manifest{
Config: ispec.Descriptor{
Digest: "digest",
},
},
manifest2: ispec.Manifest{
Config: ispec.Descriptor{
Digest: "digest",
},
},
expected: true,
},
{
manifest1: ispec.Manifest{
Layers: []ispec.Descriptor{{
Digest: "digest",
Size: 1,
}},
},
manifest2: ispec.Manifest{
Layers: []ispec.Descriptor{{
Digest: "digest",
Size: 1,
}},
},
expected: true,
},
{
manifest1: ispec.Manifest{
Layers: []ispec.Descriptor{{
Digest: "digest1",
Size: 1,
}},
},
manifest2: ispec.Manifest{
Layers: []ispec.Descriptor{{
Digest: "digest2",
Size: 2,
}},
},
expected: false,
},
{
manifest1: ispec.Manifest{
Layers: []ispec.Descriptor{
{
Digest: "digest",
Size: 1,
},
{
Digest: "digest1",
Size: 1,
},
},
},
manifest2: ispec.Manifest{
Layers: []ispec.Descriptor{{
Digest: "digest",
Size: 1,
}},
},
expected: false,
},
{
manifest1: ispec.Manifest{
Layers: []ispec.Descriptor{
{
Digest: "digest1",
Size: 1,
},
{
Digest: "digest2",
Size: 2,
},
},
},
manifest2: ispec.Manifest{
Layers: []ispec.Descriptor{
{
Digest: "digest1",
Size: 1,
},
{
Digest: "digest2",
Size: 2,
},
},
},
expected: true,
},
{
manifest1: ispec.Manifest{
Layers: []ispec.Descriptor{
{
Digest: "digest",
Size: 1,
},
{
Digest: "digest1",
Size: 1,
},
},
},
manifest2: ispec.Manifest{
Layers: []ispec.Descriptor{
{
Digest: "digest",
Size: 1,
},
{
Digest: "digest2",
Size: 2,
},
},
},
expected: false,
},
}
Convey("Test manifestsEqual()", t, func() {
for _, test := range testCases {
actualResult := manifestsEqual(test.manifest1, test.manifest2)
So(actualResult, ShouldEqual, test.expected)
}
})
}
func TestCompareArtifactRefs(t *testing.T) {
testCases := []struct {
refs1 []artifactspec.Descriptor
refs2 []artifactspec.Descriptor
expected bool
}{
{
refs1: []artifactspec.Descriptor{
{
Digest: "digest1",
},
},
refs2: []artifactspec.Descriptor{
{
Digest: "digest2",
},
},
expected: false,
},
{
refs1: []artifactspec.Descriptor{
{
Digest: "digest",
},
},
refs2: []artifactspec.Descriptor{
{
Digest: "digest",
},
},
expected: true,
},
{
refs1: []artifactspec.Descriptor{
{
Digest: "digest",
},
{
Digest: "digest2",
},
},
refs2: []artifactspec.Descriptor{
{
Digest: "digest",
},
},
expected: false,
},
{
refs1: []artifactspec.Descriptor{
{
Digest: "digest1",
},
{
Digest: "digest2",
},
},
refs2: []artifactspec.Descriptor{
{
Digest: "digest1",
},
{
Digest: "digest2",
},
},
expected: true,
},
{
refs1: []artifactspec.Descriptor{
{
Digest: "digest",
},
{
Digest: "digest1",
},
},
refs2: []artifactspec.Descriptor{
{
Digest: "digest1",
},
{
Digest: "digest2",
},
},
expected: false,
},
}
Convey("Test manifestsEqual()", t, func() {
for _, test := range testCases {
actualResult := artifactDescriptorsEqual(test.refs1, test.refs2)
So(actualResult, ShouldEqual, test.expected)
}
})
}

View file

@ -0,0 +1,127 @@
//go:build sync
// +build sync
package sync
import (
"context"
"fmt"
"github.com/containers/image/v5/docker"
dockerReference "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"zotregistry.io/zot/pkg/api/constants"
"zotregistry.io/zot/pkg/common"
client "zotregistry.io/zot/pkg/extensions/sync/httpclient"
"zotregistry.io/zot/pkg/log"
)
type catalog struct {
Repositories []string `json:"repositories"`
}
type RemoteRegistry struct {
client *client.Client
context *types.SystemContext
log log.Logger
}
func NewRemoteRegistry(client *client.Client, logger log.Logger) Remote {
registry := &RemoteRegistry{}
registry.log = logger
registry.client = client
clientConfig := client.GetConfig()
registry.context = getUpstreamContext(clientConfig.CertDir, clientConfig.Username,
clientConfig.Password, clientConfig.TLSVerify)
return registry
}
func (registry *RemoteRegistry) GetContext() *types.SystemContext {
return registry.context
}
func (registry *RemoteRegistry) GetRepositories(ctx context.Context) ([]string, error) {
var catalog catalog
_, _, _, err := registry.client.MakeGetRequest(&catalog, "application/json", //nolint: dogsled
constants.RoutePrefix, constants.ExtCatalogPrefix)
if err != nil {
return []string{}, err
}
return catalog.Repositories, nil
}
func (registry *RemoteRegistry) GetImageReference(repo, reference string) (types.ImageReference, error) {
remoteHost := registry.client.GetHostname()
repoRef, err := parseRepositoryReference(fmt.Sprintf("%s/%s", remoteHost, repo))
if err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).
Str("reference", reference).Str("remote", remoteHost).
Err(err).Msg("couldn't parse repository reference")
return nil, err
}
var namedRepoRef dockerReference.Named
digest, ok := parseReference(reference)
if ok {
namedRepoRef, err = dockerReference.WithDigest(repoRef, digest)
if err != nil {
return nil, err
}
} else {
namedRepoRef, err = dockerReference.WithTag(repoRef, reference)
if err != nil {
return nil, err
}
}
imageRef, err := docker.NewReference(namedRepoRef)
if err != nil {
registry.log.Err(err).Str("transport", docker.Transport.Name()).Str("reference", namedRepoRef.String()).
Msg("cannot obtain a valid image reference for given transport and reference")
return nil, err
}
return imageRef, nil
}
func (registry *RemoteRegistry) GetManifestContent(imageReference types.ImageReference) (
[]byte, string, digest.Digest, error,
) {
imageSource, err := imageReference.NewImageSource(context.Background(), registry.GetContext())
if err != nil {
return []byte{}, "", "", err
}
defer imageSource.Close()
manifestBuf, mediaType, err := imageSource.GetManifest(context.Background(), nil)
if err != nil {
return []byte{}, "", "", err
}
return manifestBuf, mediaType, digest.FromBytes(manifestBuf), nil
}
func (registry *RemoteRegistry) GetRepoTags(repo string) ([]string, error) {
remoteHost := registry.client.GetHostname()
tags, err := getRepoTags(context.Background(), registry.GetContext(), remoteHost, repo)
if err != nil {
registry.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).
Str("remote", remoteHost).Err(err).Msg("couldn't fetch tags for repo")
return []string{}, err
}
return tags, nil
}

View file

@ -0,0 +1,436 @@
//go:build sync
// +build sync
package sync
import (
"context"
"errors"
"fmt"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/opencontainers/go-digest"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/common"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
client "zotregistry.io/zot/pkg/extensions/sync/httpclient"
"zotregistry.io/zot/pkg/extensions/sync/references"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/storage"
)
type BaseService struct {
config syncconf.RegistryConfig
credentials syncconf.CredentialsFile
remote Remote
local Local
retryOptions *retry.RetryOptions
contentManager ContentManager
storeController storage.StoreController
repoDB repodb.RepoDB
repositories []string
references references.References
client *client.Client
log log.Logger
}
func New(opts syncconf.RegistryConfig, credentialsFilepath string,
storeController storage.StoreController, repodb repodb.RepoDB, log log.Logger,
) (Service, error) {
service := &BaseService{}
service.config = opts
service.log = log
service.repoDB = repodb
var err error
var credentialsFile syncconf.CredentialsFile
if credentialsFilepath != "" {
credentialsFile, err = getFileCredentials(credentialsFilepath)
if err != nil {
log.Error().Str("errortype", common.TypeOf(err)).Str("path", credentialsFilepath).
Err(err).Msg("couldn't get registry credentials from configured path")
}
}
service.credentials = credentialsFile
service.contentManager = NewContentManager(opts.Content, log)
service.local = NewLocalRegistry(storeController, repodb, log)
retryOptions := &retry.RetryOptions{}
if opts.MaxRetries != nil {
retryOptions.MaxRetry = *opts.MaxRetries
if opts.RetryDelay != nil {
retryOptions.Delay = *opts.RetryDelay
}
}
service.retryOptions = retryOptions
service.storeController = storeController
err = service.SetNextAvailableClient()
if err != nil {
return nil, err
}
service.references = references.NewReferences(
service.client,
service.storeController,
service.repoDB,
service.log,
)
service.remote = NewRemoteRegistry(
service.client,
service.log,
)
return service, nil
}
func (service *BaseService) SetNextAvailableClient() error {
if service.client != nil && service.client.IsAvailable() {
return nil
}
for _, url := range service.config.URLs {
remoteAddress := StripRegistryTransport(url)
credentials := service.credentials[remoteAddress]
tlsVerify := true
if service.config.TLSVerify != nil {
tlsVerify = *service.config.TLSVerify
}
options := client.Config{
URL: url,
Username: credentials.Username,
Password: credentials.Password,
TLSVerify: tlsVerify,
CertDir: service.config.CertDir,
}
var err error
if service.client != nil {
err = service.client.SetConfig(options)
} else {
service.client, err = client.New(options, service.log)
}
if err != nil {
return err
}
if !service.client.IsAvailable() {
continue
}
}
if service.client == nil {
return zerr.ErrSyncPingRegistry
}
return nil
}
func (service *BaseService) GetRetryOptions() *retry.Options {
return service.retryOptions
}
func (service *BaseService) getNextRepoFromCatalog(lastRepo string) string {
var found bool
var nextRepo string
for _, repo := range service.repositories {
if lastRepo == "" {
nextRepo = repo
break
}
if repo == lastRepo {
found = true
continue
}
if found {
nextRepo = repo
break
}
}
return nextRepo
}
func (service *BaseService) GetNextRepo(lastRepo string) (string, error) {
var err error
if len(service.repositories) == 0 {
if err = retry.RetryIfNecessary(context.Background(), func() error {
service.repositories, err = service.remote.GetRepositories(context.Background())
return err
}, service.retryOptions); err != nil {
service.log.Error().Str("errorType", common.TypeOf(err)).Str("remote registry", service.client.GetConfig().URL).
Err(err).Msg("error while getting repositories from remote registry")
return "", err
}
}
var matches bool
for !matches {
lastRepo = service.getNextRepoFromCatalog(lastRepo)
if lastRepo == "" {
break
}
matches = service.contentManager.MatchesContent(lastRepo)
}
return lastRepo, nil
}
// SyncReference on demand.
func (service *BaseService) SyncReference(repo string, subjectDigestStr string, referenceType string) error {
remoteRepo := repo
remoteURL := service.client.GetConfig().URL
if len(service.config.Content) > 0 {
remoteRepo = service.contentManager.GetRepoSource(repo)
if remoteRepo == "" {
service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("subject", subjectDigestStr).
Str("reference type", referenceType).Msg("will not sync reference for image, filtered out by content")
return zerr.ErrSyncImageFilteredOut
}
}
service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("subject", subjectDigestStr).
Str("reference type", referenceType).Msg("sync: syncing reference for image")
return service.references.SyncReference(repo, remoteRepo, subjectDigestStr, referenceType)
}
// SyncImage on demand.
func (service *BaseService) SyncImage(repo, reference string) error {
remoteRepo := repo
remoteURL := service.client.GetConfig().URL
if len(service.config.Content) > 0 {
remoteRepo = service.contentManager.GetRepoSource(repo)
if remoteRepo == "" {
service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("reference", reference).
Msg("will not sync image, filtered out by content")
return zerr.ErrSyncImageFilteredOut
}
}
service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("reference", reference).
Msg("sync: syncing image")
manifestDigest, err := service.syncTag(repo, remoteRepo, reference)
if err != nil {
return err
}
err = service.references.SyncAll(repo, remoteRepo, manifestDigest.String())
if err != nil && !errors.Is(err, zerr.ErrSyncReferrerNotFound) {
service.log.Error().Err(err).Str("remote", remoteURL).Str("repo", repo).Str("reference", reference).
Msg("error while syncing references for image")
return err
}
return nil
}
// sync repo periodically.
func (service *BaseService) SyncRepo(repo string) error {
service.log.Info().Str("repo", repo).Str("registry", service.client.GetConfig().URL).
Msg("sync: syncing repo")
var err error
var tags []string
if err = retry.RetryIfNecessary(context.Background(), func() error {
tags, err = service.remote.GetRepoTags(repo)
return err
}, service.retryOptions); err != nil {
service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).
Err(err).Msg("error while getting tags for repo")
return err
}
// filter tags
tags, err = service.contentManager.FilterTags(repo, tags)
if err != nil {
return err
}
service.log.Info().Str("repo", repo).Msgf("sync: syncing tags %v", tags)
// apply content.destination rule
localRepo := service.contentManager.GetRepoDestination(repo)
for _, tag := range tags {
if references.IsCosignTag(tag) {
continue
}
var manifestDigest digest.Digest
if err = retry.RetryIfNecessary(context.Background(), func() error {
manifestDigest, err = service.syncTag(localRepo, repo, tag)
return err
}, service.retryOptions); err != nil {
if errors.Is(err, zerr.ErrSyncImageNotSigned) || errors.Is(err, zerr.ErrMediaTypeNotSupported) {
// skip unsigned images or unsupported image mediatype
continue
}
service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).
Err(err).Msg("error while syncing tags for repo")
return err
}
if manifestDigest != "" {
if err = retry.RetryIfNecessary(context.Background(), func() error {
err = service.references.SyncAll(localRepo, repo, manifestDigest.String())
if errors.Is(err, zerr.ErrSyncReferrerNotFound) {
return nil
}
return err
}, service.retryOptions); err != nil {
service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).
Err(err).Msg("error while syncing tags for repo")
return err
}
}
}
service.log.Info().Str("repo", repo).Msg("sync: finished syncing repo")
return nil
}
func (service *BaseService) syncTag(localRepo, remoteRepo, tag string) (digest.Digest, error) {
copyOptions := getCopyOptions(service.remote.GetContext(), service.local.GetContext())
policyContext, err := getPolicyContext(service.log)
if err != nil {
return "", err
}
defer func() {
_ = policyContext.Destroy()
}()
remoteImageRef, err := service.remote.GetImageReference(remoteRepo, tag)
if err != nil {
service.log.Error().Err(err).Str("errortype", common.TypeOf(err)).
Str("repo", remoteRepo).Str("reference", tag).Msg("couldn't get a remote image reference")
return "", err
}
_, mediaType, manifestDigest, err := service.remote.GetManifestContent(remoteImageRef)
if err != nil {
service.log.Error().Err(err).Str("repo", remoteRepo).Str("reference", tag).
Msg("couldn't get upstream image manifest details")
return "", err
}
if !isSupportedMediaType(mediaType) {
return "", zerr.ErrMediaTypeNotSupported
}
if service.config.OnlySigned != nil && *service.config.OnlySigned {
signed := service.references.IsSigned(remoteRepo, manifestDigest.String())
if !signed {
// skip unsigned images
service.log.Info().Str("image", remoteImageRef.DockerReference().String()).
Msg("skipping image without mandatory signature")
return "", zerr.ErrSyncImageNotSigned
}
}
skipImage, err := service.local.CanSkipImage(localRepo, tag, manifestDigest)
if err != nil {
service.log.Error().Err(err).Str("errortype", common.TypeOf(err)).
Str("repo", localRepo).Str("reference", tag).
Msg("couldn't check if the local image can be skipped")
}
if !skipImage {
localImageRef, err := service.local.GetImageReference(localRepo, tag)
if err != nil {
service.log.Error().Err(err).Str("errortype", common.TypeOf(err)).
Str("repo", localRepo).Str("reference", tag).Msg("couldn't get a local image reference")
return "", err
}
service.log.Info().Str("remote image", remoteImageRef.DockerReference().String()).
Str("local image", fmt.Sprintf("%s:%s", localRepo, tag)).Msg("syncing image")
_, err = copy.Image(context.Background(), policyContext, localImageRef, remoteImageRef, &copyOptions)
if err != nil {
service.log.Error().Err(err).Str("errortype", common.TypeOf(err)).
Str("remote image", remoteImageRef.DockerReference().String()).
Str("local image", fmt.Sprintf("%s:%s", localRepo, tag)).Msg("coulnd't sync image")
return "", err
}
err = service.local.CommitImage(localImageRef, localRepo, tag)
if err != nil {
service.log.Error().Err(err).Str("errortype", common.TypeOf(err)).
Str("repo", localRepo).Str("reference", tag).Msg("couldn't commit image to local image store")
return "", err
}
} else {
service.log.Info().Str("image", remoteImageRef.DockerReference().String()).
Msg("skipping image because it's already synced")
}
service.log.Info().Str("image", remoteImageRef.DockerReference().String()).Msg("sync: finished syncing image")
return manifestDigest, nil
}
func (service *BaseService) ResetCatalog() {
service.log.Info().Msg("resetting catalog")
service.repositories = []string{}
}
func (service *BaseService) SetNextAvailableURL() error {
service.log.Info().Msg("getting available client")
return service.SetNextAvailableClient()
}

View file

@ -1,592 +0,0 @@
package sync
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"path"
"strings"
notreg "github.com/notaryproject/notation-go/registry"
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
oras "github.com/oras-project/artifacts-spec/specs-go/v1"
"github.com/sigstore/cosign/v2/pkg/oci/remote"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/api/constants"
"zotregistry.io/zot/pkg/common"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/meta/signatures"
"zotregistry.io/zot/pkg/storage"
storageTypes "zotregistry.io/zot/pkg/storage/types"
)
type signaturesCopier struct {
client *http.Client
upstreamURL url.URL
credentials syncconf.Credentials
repoDB repodb.RepoDB
storeController storage.StoreController
log log.Logger
}
func newSignaturesCopier(httpClient *http.Client, credentials syncconf.Credentials,
upstreamURL url.URL, repoDB repodb.RepoDB,
storeController storage.StoreController, log log.Logger,
) *signaturesCopier {
return &signaturesCopier{
client: httpClient,
credentials: credentials,
upstreamURL: upstreamURL,
repoDB: repoDB,
storeController: storeController,
log: log,
}
}
func (sig *signaturesCopier) getCosignManifest(repo, digestStr string) (*ispec.Manifest, error) {
var cosignManifest ispec.Manifest
cosignTag := getCosignTagFromImageDigest(digestStr)
getCosignManifestURL := sig.upstreamURL
getCosignManifestURL.Path = path.Join(getCosignManifestURL.Path, "v2", repo, "manifests", cosignTag)
getCosignManifestURL.RawQuery = getCosignManifestURL.Query().Encode()
_, statusCode, err := common.MakeHTTPGetRequest(sig.client, sig.credentials.Username,
sig.credentials.Password, &cosignManifest,
getCosignManifestURL.String(), ispec.MediaTypeImageManifest, sig.log)
if err != nil {
if statusCode == http.StatusNotFound {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("manifest", getCosignManifestURL.String()).Msg("couldn't find any cosign manifest")
return nil, zerr.ErrSyncReferrerNotFound
}
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("manifest", getCosignManifestURL.String()).Msg("couldn't get cosign manifest")
return nil, err
}
return &cosignManifest, nil
}
func (sig *signaturesCopier) getORASRefs(repo, digestStr string) (ReferenceList, error) {
var referrers ReferenceList
getReferrersURL := sig.upstreamURL
// based on manifest digest get referrers
getReferrersURL.Path = path.Join(getReferrersURL.Path, constants.ArtifactSpecRoutePrefix,
repo, "manifests", digestStr, "referrers")
getReferrersURL.RawQuery = getReferrersURL.Query().Encode()
_, statusCode, err := common.MakeHTTPGetRequest(sig.client, sig.credentials.Username,
sig.credentials.Password, &referrers,
getReferrersURL.String(), "application/json", sig.log)
if err != nil {
if statusCode == http.StatusNotFound {
sig.log.Info().Err(err).Msg("couldn't find any ORAS artifact")
return referrers, zerr.ErrSyncReferrerNotFound
}
sig.log.Error().Err(err).Msg("couldn't get ORAS artifacts")
return referrers, err
}
return referrers, nil
}
func (sig *signaturesCopier) getOCIRefs(repo, digestStr string) (ispec.Index, error) {
var index ispec.Index
getReferrersURL := sig.upstreamURL
// based on manifest digest get referrers
getReferrersURL.Path = path.Join(getReferrersURL.Path, "v2", repo, "referrers", digestStr)
getReferrersURL.RawQuery = getReferrersURL.Query().Encode()
_, statusCode, err := common.MakeHTTPGetRequest(sig.client, sig.credentials.Username,
sig.credentials.Password, &index,
getReferrersURL.String(), "application/json", sig.log)
if err != nil {
if statusCode == http.StatusNotFound {
sig.log.Info().Str("referrers", getReferrersURL.String()).Int("statusCode", statusCode).
Msg("couldn't find any oci reference from referrers, skipping")
return index, zerr.ErrSyncReferrerNotFound
}
sig.log.Error().Str("errorType", common.TypeOf(zerr.ErrSyncReferrer)).Err(zerr.ErrSyncReferrer).
Str("referrers", getReferrersURL.String()).Int("statusCode", statusCode).
Msg("couldn't get oci reference from referrers, skipping")
return index, zerr.ErrSyncReferrer
}
return index, nil
}
func (sig *signaturesCopier) syncCosignSignature(localRepo, remoteRepo, digestStr string,
cosignManifest *ispec.Manifest,
) error {
cosignTag := getCosignTagFromImageDigest(digestStr)
// if no manifest found
if cosignManifest == nil {
return nil
}
skipCosignSig, err := sig.canSkipCosignSignature(localRepo, digestStr, cosignManifest)
if err != nil {
sig.log.Error().Err(err).Str("repository", remoteRepo).Str("reference", digestStr).
Msg("couldn't check if the upstream image cosign signature can be skipped")
}
if skipCosignSig {
return nil
}
imageStore := sig.storeController.GetImageStore(localRepo)
sig.log.Info().Msg("syncing cosign signatures")
for _, blob := range cosignManifest.Layers {
if err := syncBlob(sig, imageStore, localRepo, remoteRepo, blob.Digest); err != nil {
return err
}
}
// sync config blob
if err := syncBlob(sig, imageStore, localRepo, remoteRepo, cosignManifest.Config.Digest); err != nil {
return err
}
cosignManifestBuf, err := json.Marshal(cosignManifest)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't marshal cosign manifest")
}
// push manifest
signatureDigest, _, err := imageStore.PutImageManifest(localRepo, cosignTag,
ispec.MediaTypeImageManifest, cosignManifestBuf)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't upload cosign manifest")
return err
}
if sig.repoDB != nil {
sig.log.Debug().Str("repository", localRepo).Str("digest", digestStr).
Msg("trying to sync cosign signature for repo digest")
err := sig.repoDB.AddManifestSignature(localRepo, godigest.Digest(digestStr), repodb.SignatureMetadata{
SignatureType: signatures.CosignSignature,
SignatureDigest: signatureDigest.String(),
})
if err != nil {
return fmt.Errorf("failed to set metadata for cosign signature '%s@%s': %w", localRepo, digestStr, err)
}
sig.log.Info().Str("repository", localRepo).Str("digest", digestStr).
Msg("successfully added cosign signature to RepoDB for repo digest")
}
return nil
}
func (sig *signaturesCopier) syncORASRefs(localRepo, remoteRepo, digestStr string, referrers ReferenceList,
) error {
if len(referrers.References) == 0 {
return nil
}
skipORASRefs, err := sig.canSkipORASRefs(localRepo, digestStr, referrers)
if err != nil {
sig.log.Error().Err(err).Str("repository", remoteRepo).Str("reference", digestStr).
Msg("couldn't check if the upstream image ORAS artifact can be skipped")
}
if skipORASRefs {
return nil
}
imageStore := sig.storeController.GetImageStore(localRepo)
sig.log.Info().Msg("syncing ORAS artifacts")
for _, ref := range referrers.References {
// get referrer manifest
getRefManifestURL := sig.upstreamURL
getRefManifestURL.Path = path.Join(getRefManifestURL.Path, "v2", remoteRepo, "manifests", ref.Digest.String())
getRefManifestURL.RawQuery = getRefManifestURL.Query().Encode()
var artifactManifest oras.Manifest
body, statusCode, err := common.MakeHTTPGetRequest(sig.client, sig.credentials.Username,
sig.credentials.Password, &artifactManifest,
getRefManifestURL.String(), ref.MediaType, sig.log)
if err != nil {
if statusCode == http.StatusNotFound {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("manifest", getRefManifestURL.String()).Msg("couldn't find any ORAS manifest")
return zerr.ErrSyncReferrerNotFound
}
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("manifest", getRefManifestURL.String()).Msg("couldn't get ORAS manifest")
return err
}
for _, blob := range artifactManifest.Blobs {
if err := syncBlob(sig, imageStore, localRepo, remoteRepo, blob.Digest); err != nil {
return err
}
}
signatureDigest, _, err := imageStore.PutImageManifest(localRepo, ref.Digest.String(),
oras.MediaTypeArtifactManifest, body)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't upload ORAS manifest")
return err
}
// this is for notation signatures
if sig.repoDB != nil {
sig.log.Debug().Str("repository", localRepo).Str("digest", digestStr).
Msg("trying to sync oras artifact for digest")
err := sig.repoDB.AddManifestSignature(localRepo, godigest.Digest(digestStr), repodb.SignatureMetadata{
SignatureType: signatures.NotationSignature,
SignatureDigest: signatureDigest.String(),
})
if err != nil {
return fmt.Errorf("failed to set metadata for oras artifact '%s@%s': %w", localRepo, digestStr, err)
}
sig.log.Info().Str("repository", localRepo).Str("digest", digestStr).
Msg("successfully added oras artifacts to RepoDB for digest")
}
}
sig.log.Info().Str("repository", localRepo).Str("digest", digestStr).
Msg("successfully synced ORAS artifacts for digest")
return nil
}
func (sig *signaturesCopier) syncOCIRefs(localRepo, remoteRepo, subjectStr string, index ispec.Index,
) error {
if len(index.Manifests) == 0 {
return nil
}
skipOCIRefs, err := sig.canSkipOCIRefs(localRepo, subjectStr, index)
if err != nil {
sig.log.Error().Err(err).Str("repository", remoteRepo).Str("reference", subjectStr).
Msg("couldn't check if the upstream image oci references can be skipped")
}
if skipOCIRefs {
return nil
}
imageStore := sig.storeController.GetImageStore(localRepo)
sig.log.Info().Msg("syncing oci references")
for _, ref := range index.Manifests {
getRefManifestURL := sig.upstreamURL
getRefManifestURL.Path = path.Join(getRefManifestURL.Path, "v2", remoteRepo, "manifests", ref.Digest.String())
getRefManifestURL.RawQuery = getRefManifestURL.Query().Encode()
var artifactManifest oras.Manifest
OCIRefBody, statusCode, err := common.MakeHTTPGetRequest(sig.client, sig.credentials.Username,
sig.credentials.Password, &artifactManifest,
getRefManifestURL.String(), ref.MediaType, sig.log)
if err != nil {
if statusCode == http.StatusNotFound {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("manifest", getRefManifestURL.String()).Msg("couldn't find any oci reference manifest")
return zerr.ErrSyncReferrerNotFound
}
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("manifest", getRefManifestURL.String()).Msg("couldn't get oci reference manifest")
return err
}
if ref.MediaType == ispec.MediaTypeImageManifest {
// read manifest
var manifest ispec.Manifest
err = json.Unmarshal(OCIRefBody, &manifest)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("manifest", getRefManifestURL.String()).Msg("couldn't unmarshal oci reference manifest")
return err
}
for _, layer := range manifest.Layers {
if err := syncBlob(sig, imageStore, localRepo, remoteRepo, layer.Digest); err != nil {
return err
}
}
// sync config blob
if err := syncBlob(sig, imageStore, localRepo, remoteRepo, manifest.Config.Digest); err != nil {
return err
}
} else {
continue
}
refDigest, _, err := imageStore.PutImageManifest(localRepo, ref.Digest.String(),
ref.MediaType, OCIRefBody)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't upload oci reference manifest")
return err
}
if sig.repoDB != nil {
sig.log.Debug().Str("repository", localRepo).Str("digest", subjectStr).Msg("trying to add OCI refs for repo digest")
isSig, sigType, signedManifestDig, err := storage.CheckIsImageSignature(localRepo, OCIRefBody, ref.Digest.String())
if err != nil {
return fmt.Errorf("failed to set metadata for OCI ref in '%s@%s': %w", localRepo, subjectStr, err)
}
if isSig {
err = sig.repoDB.AddManifestSignature(localRepo, signedManifestDig, repodb.SignatureMetadata{
SignatureType: sigType,
SignatureDigest: refDigest.String(),
})
} else {
err = repodb.SetImageMetaFromInput(localRepo, refDigest.String(), ref.MediaType,
refDigest, OCIRefBody, sig.storeController.GetImageStore(localRepo),
sig.repoDB, sig.log)
}
if err != nil {
return fmt.Errorf("failed to set metadata for OCI ref in '%s@%s': %w", localRepo, subjectStr, err)
}
sig.log.Info().Str("repository", localRepo).Str("digest", subjectStr).
Msg("successfully added OCI refs to RepoDB for digest")
}
}
sig.log.Info().Str("repository", localRepo).Str("digest", subjectStr).
Msg("successfully synced OCI refs for digest")
return nil
}
func (sig *signaturesCopier) canSkipORASRefs(localRepo, digestStr string, refs ReferenceList,
) (bool, error) {
imageStore := sig.storeController.GetImageStore(localRepo)
digest := godigest.Digest(digestStr)
// check oras artifacts already synced
if len(refs.References) > 0 {
localRefs, err := imageStore.GetOrasReferrers(localRepo, digest, "")
if err != nil {
if errors.Is(err, zerr.ErrManifestNotFound) {
return false, nil
}
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("repository", localRepo).Str("reference", digestStr).Msg("couldn't get local ORAS artifact manifest")
return false, err
}
if !artifactDescriptorsEqual(localRefs, refs.References) {
sig.log.Info().Str("repository", localRepo).Str("reference", digestStr).
Msg("upstream ORAS artifacts changed, syncing again")
return false, nil
}
}
sig.log.Info().Str("repository", localRepo).Str("reference", digestStr).
Msg("skipping ORAS artifact, already synced")
return true, nil
}
func (sig *signaturesCopier) canSkipCosignSignature(localRepo, digestStr string, cosignManifest *ispec.Manifest,
) (bool, error) {
imageStore := sig.storeController.GetImageStore(localRepo)
// check cosign signature already synced
if cosignManifest != nil {
var localCosignManifest ispec.Manifest
/* we need to use tag (cosign format: sha256-$IMAGE_TAG.sig) instead of digest to get local cosign manifest
because of an issue where cosign digests differs between upstream and downstream */
cosignManifestTag := getCosignTagFromImageDigest(digestStr)
localCosignManifestBuf, _, _, err := imageStore.GetImageManifest(localRepo, cosignManifestTag)
if err != nil {
if errors.Is(err, zerr.ErrManifestNotFound) {
return false, nil
}
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("repository", localRepo).Str("reference", digestStr).
Msg("couldn't get local cosign manifest")
return false, err
}
err = json.Unmarshal(localCosignManifestBuf, &localCosignManifest)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("repository", localRepo).Str("reference", digestStr).
Msg("couldn't unmarshal local cosign signature manifest")
return false, err
}
if !manifestsEqual(localCosignManifest, *cosignManifest) {
sig.log.Info().Str("repository", localRepo).Str("reference", digestStr).
Msg("upstream cosign signatures changed, syncing again")
return false, nil
}
}
sig.log.Info().Str("repository", localRepo).Str("reference", digestStr).
Msg("skipping cosign signature, already synced")
return true, nil
}
func (sig *signaturesCopier) canSkipOCIRefs(localRepo, digestStr string, index ispec.Index,
) (bool, error) {
imageStore := sig.storeController.GetImageStore(localRepo)
digest := godigest.Digest(digestStr)
// check oci references already synced
if len(index.Manifests) > 0 {
localRefs, err := imageStore.GetReferrers(localRepo, digest, nil)
if err != nil {
if errors.Is(err, zerr.ErrManifestNotFound) {
return false, nil
}
sig.log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("repository", localRepo).Str("reference", digestStr).
Msg("couldn't get local ocireferences for manifest")
return false, err
}
if !descriptorsEqual(localRefs.Manifests, index.Manifests) {
sig.log.Info().Str("repository", localRepo).Str("reference", digestStr).
Msg("upstream oci references for manifest changed, syncing again")
return false, nil
}
}
sig.log.Info().Str("repository", localRepo).Str("reference", digestStr).
Msg("skipping oci references, already synced")
return true, nil
}
func syncBlob(sig *signaturesCopier, imageStore storageTypes.ImageStore, localRepo, remoteRepo string,
digest godigest.Digest,
) error {
getBlobURL := sig.upstreamURL
getBlobURL.Path = path.Join(getBlobURL.Path, "v2", remoteRepo, "blobs", digest.String())
getBlobURL.RawQuery = getBlobURL.Query().Encode()
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, getBlobURL.String(), nil)
if err != nil {
return err
}
resp, err := sig.client.Do(req)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).Str("blob url", getBlobURL.String()).
Err(err).Msg("couldn't get blob from url")
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
sig.log.Info().Str("url", getBlobURL.String()).Str("blob url", getBlobURL.String()).
Int("statusCode", resp.StatusCode).Msg("couldn't find blob from url, status code")
return zerr.ErrSyncReferrer
}
_, _, err = imageStore.FullBlobUpload(localRepo, resp.Body, digest)
if err != nil {
sig.log.Error().Str("errorType", common.TypeOf(err)).Str("digest", digest.String()).
Err(err).Msg("couldn't upload blob")
return err
}
return nil
}
// sync feature will try to pull cosign signature because for sync cosign signature is just an image
// this function will check if tag is a cosign tag.
func isCosignTag(tag string) bool {
if strings.HasPrefix(tag, "sha256-") && strings.HasSuffix(tag, remote.SignatureTagSuffix) {
return true
}
return false
}
func getCosignTagFromImageDigest(digestStr string) string {
if !isCosignTag(digestStr) {
return strings.Replace(digestStr, ":", "-", 1) + "." + remote.SignatureTagSuffix
}
return digestStr
}
func getNotationManifestsFromOCIRefs(ociRefs ispec.Index) []ispec.Descriptor {
notaryManifests := []ispec.Descriptor{}
for _, ref := range ociRefs.Manifests {
if ref.ArtifactType == notreg.ArtifactTypeNotation {
notaryManifests = append(notaryManifests, ref)
}
}
return notaryManifests
}

View file

@ -1,412 +1,134 @@
//go:build sync
// +build sync
package sync
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"os"
"time"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/types"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/go-digest"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/api/constants"
"zotregistry.io/zot/pkg/common"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/storage"
"zotregistry.io/zot/pkg/test/inject"
"zotregistry.io/zot/pkg/scheduler"
)
const (
SyncBlobUploadDir = ".sync"
httpMaxRedirectsCount = 15
)
// below types are used by containers/image to copy images
// types.ImageReference - describes a registry/repo:tag
// types.SystemContext - describes a registry/oci layout config
// /v2/_catalog struct.
type catalog struct {
Repositories []string `json:"repositories"`
// Sync general functionalities, one service per registry config.
type Service interface {
// Get next repo from remote /v2/_catalog, will return empty string when there is no repo left.
GetNextRepo(lastRepo string) (string, error) // used by task scheduler
// Sync a repo with all of its tags and references (signatures, artifacts, sboms) into ImageStore.
SyncRepo(repo string) error // used by periodically sync
// Sync an image (repo:tag || repo:digest) into ImageStore.
SyncImage(repo, reference string) error // used by sync on demand
// Sync a single reference for an image.
SyncReference(repo string, subjectDigestStr string, referenceType string) error // used by sync on demand
// Remove all internal catalog entries.
ResetCatalog() // used by scheduler to empty out the catalog after a sync periodically roundtrip finishes
// Sync supports multiple urls per registry, before a sync repo/image/ref 'ping' each url.
SetNextAvailableURL() error // used by all sync methods
// Returns retry options from registry config.
GetRetryOptions() *retry.Options // used by sync on demand to retry in background
}
type RepoReferences struct {
contentID int // matched registry config content
name string // repo name
imageReferences []types.ImageReference // contained images(tags)
// Local and remote registries must implement this interface.
type Registry interface {
// Get temporary ImageReference, is used by functions in containers/image package
GetImageReference(repo string, tag string) (types.ImageReference, error)
// Get local oci layout context, is used by functions in containers/image package
GetContext() *types.SystemContext
}
// getUpstreamCatalog gets all repos from a registry.
func GetUpstreamCatalog(client *http.Client, upstreamURL, username, password string, log log.Logger) (catalog, error) { //nolint
var catalog catalog
registryCatalogURL := fmt.Sprintf("%s%s%s", upstreamURL, constants.RoutePrefix, constants.ExtCatalogPrefix)
body, statusCode, err := common.MakeHTTPGetRequest(client, username,
password, &catalog,
registryCatalogURL, "application/json", log)
if err != nil {
log.Error().Str("catalog", registryCatalogURL).Int("statusCode", statusCode).
RawJSON("body", body).Msg("couldn't query catalog")
return catalog, err
}
return catalog, nil
/*
Temporary oci layout, sync first pulls an image to this oci layout (using oci:// transport)
then moves them into ImageStore.
*/
type OciLayoutStorage interface {
Registry
}
// imagesToCopyFromRepos lists all images given a registry name and its repos.
func imagesToCopyFromUpstream(ctx context.Context, registryName string, repoName string,
upstreamCtx *types.SystemContext, content syncconf.Content, log log.Logger,
) ([]types.ImageReference, error) {
imageRefs := []types.ImageReference{}
repoRefName := fmt.Sprintf("%s/%s", registryName, repoName)
repoRef, err := parseRepositoryReference(repoRefName)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("repository reference", repoRefName).Msg("couldn't parse repository reference")
return imageRefs, err
}
tags, err := getImageTags(ctx, upstreamCtx, repoRef)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("repository reference", repoRefName).Msg("couldn't fetch tags reference")
return imageRefs, err
}
// filter based on tags rules
if content.Tags != nil {
if content.Tags.Regex != nil {
tags, err = filterTagsByRegex(tags, *content.Tags.Regex, log)
if err != nil {
return imageRefs, err
}
}
if content.Tags.Semver != nil && *content.Tags.Semver {
tags = filterTagsBySemver(tags, log)
}
}
log.Debug().Str("repository", repoName).Strs("tags", tags).Msg("upstream tags to be copied")
for _, tag := range tags {
// don't copy cosign signature, containers/image doesn't support it
// we will copy it manually later
if isCosignTag(tag) {
continue
}
taggedRef, err := reference.WithTag(repoRef, tag)
if err != nil {
log.Err(err).Str("repository", repoRef.Name()).Str("tag", tag).
Msg("error creating a reference for repository and tag")
return imageRefs, err
}
ref, err := docker.NewReference(taggedRef)
if err != nil {
log.Err(err).Str("transport", docker.Transport.Name()).Str("reference", taggedRef.String()).
Msg("cannot obtain a valid image reference for transport and reference")
return imageRefs, err
}
imageRefs = append(imageRefs, ref)
}
return imageRefs, nil
// Remote registry.
type Remote interface {
Registry
// Get a list of repos (catalog)
GetRepositories(ctx context.Context) ([]string, error)
// Get a list of tags given a repo
GetRepoTags(repo string) ([]string, error)
// Get manifest content, mediaType, digest given an ImageReference
GetManifestContent(imageReference types.ImageReference) ([]byte, string, digest.Digest, error)
}
func getCopyOptions(upstreamCtx, localCtx *types.SystemContext) copy.Options {
options := copy.Options{
DestinationCtx: localCtx,
SourceCtx: upstreamCtx,
ReportWriter: io.Discard,
ForceManifestMIMEType: ispec.MediaTypeImageManifest, // force only oci manifest MIME type
ImageListSelection: copy.CopyAllImages,
}
return options
// Local registry.
type Local interface {
Registry
// Check if an image is already synced
CanSkipImage(repo, tag string, imageDigest digest.Digest) (bool, error)
// CommitImage moves a synced repo/ref from temporary oci layout to ImageStore
CommitImage(imageReference types.ImageReference, repo, tag string) error
}
func getUpstreamContext(regCfg *syncconf.RegistryConfig, credentials syncconf.Credentials) *types.SystemContext {
upstreamCtx := &types.SystemContext{}
upstreamCtx.DockerCertPath = regCfg.CertDir
upstreamCtx.DockerDaemonCertPath = regCfg.CertDir
if regCfg.TLSVerify != nil && *regCfg.TLSVerify {
upstreamCtx.DockerDaemonInsecureSkipTLSVerify = false
upstreamCtx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(false)
} else {
upstreamCtx.DockerDaemonInsecureSkipTLSVerify = true
upstreamCtx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(true)
}
if credentials != (syncconf.Credentials{}) {
upstreamCtx.DockerAuthConfig = &types.DockerAuthConfig{
Username: credentials.Username,
Password: credentials.Password,
}
}
return upstreamCtx
type TaskGenerator struct {
Service Service
lastRepo string
done bool
log log.Logger
}
//nolint:gocyclo // offloading some of the functionalities from here would make the code harder to follow
func syncRegistry(ctx context.Context, regCfg syncconf.RegistryConfig,
upstreamURL string, repoDB repodb.RepoDB,
storeController storage.StoreController, localCtx *types.SystemContext,
policyCtx *signature.PolicyContext, credentials syncconf.Credentials,
retryOptions *retry.RetryOptions, log log.Logger,
) error {
log.Info().Str("registry", upstreamURL).Msg("syncing registry")
var err error
log.Debug().Msg("getting upstream context")
upstreamCtx := getUpstreamContext(&regCfg, credentials)
options := getCopyOptions(upstreamCtx, localCtx)
if !common.Contains(regCfg.URLs, upstreamURL) {
return zerr.ErrSyncInvalidUpstreamURL
func NewTaskGenerator(service Service, log log.Logger) *TaskGenerator {
return &TaskGenerator{
Service: service,
done: false,
lastRepo: "",
log: log,
}
registryURL, err := url.Parse(upstreamURL)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("url", upstreamURL).Msg("couldn't parse url")
return err
}
httpClient, err := common.CreateHTTPClient(*regCfg.TLSVerify, registryURL.Host, regCfg.CertDir)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("error while creating http client")
return err
}
var catalog catalog
if err = retry.RetryIfNecessary(ctx, func() error {
catalog, err = GetUpstreamCatalog(httpClient, upstreamURL, credentials.Username, credentials.Password, log)
return err
}, retryOptions); err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("error while getting upstream catalog, retrying...")
return err
}
log.Info().Int("repo count", len(catalog.Repositories)).Msg("filtering repos based on sync prefixes")
repos := filterRepos(catalog.Repositories, regCfg.Content, log)
log.Info().Interface("repos", repos).Msg("got repos")
upstreamAddr := StripRegistryTransport(upstreamURL)
reposReferences := []RepoReferences{}
for contentID, repos := range repos {
for _, repoName := range repos {
var imageReferences []types.ImageReference
if err = retry.RetryIfNecessary(ctx, func() error {
imageReferences, err = imagesToCopyFromUpstream(ctx, upstreamAddr,
repoName, upstreamCtx, regCfg.Content[contentID], log)
return err
}, retryOptions); err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("error while getting images references from upstream, retrying...")
return err
}
reposReferences = append(reposReferences, RepoReferences{
contentID: contentID,
name: repoName,
imageReferences: imageReferences,
})
}
}
sig := newSignaturesCopier(httpClient, credentials, *registryURL, repoDB, storeController, log)
for _, repoReference := range reposReferences {
upstreamRepo := repoReference.name
content := regCfg.Content[repoReference.contentID]
localRepo := getRepoDestination(upstreamRepo, content)
imageStore := storeController.GetImageStore(localRepo)
localCachePath, err := getLocalCachePath(imageStore, localRepo)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("repository", localRepo).Msg("couldn't get localCachePath for repo")
return err
}
defer os.RemoveAll(localCachePath)
for _, upstreamImageRef := range repoReference.imageReferences {
var enforeSignatures bool
if regCfg.OnlySigned != nil && *regCfg.OnlySigned {
enforeSignatures = true
}
syncContextUtils := syncContextUtils{
policyCtx: policyCtx,
localCtx: localCtx,
upstreamCtx: upstreamCtx,
upstreamAddr: upstreamAddr,
copyOptions: options,
retryOptions: &retry.Options{}, // we don't want to retry inline
enforceSignatures: enforeSignatures,
}
tag := getTagFromRef(upstreamImageRef, log).Tag()
skipped, err := syncImageWithRefs(ctx, localRepo, upstreamRepo, tag, upstreamImageRef,
syncContextUtils, sig, localCachePath, log)
if skipped || err != nil {
// skip
continue
}
}
}
log.Info().Str("upstreamAddr", upstreamAddr).Msg("finished syncing from upstream address")
return nil
}
func getLocalContexts(log log.Logger) (*types.SystemContext, *signature.PolicyContext, error) {
log.Debug().Msg("getting local context")
var policy *signature.Policy
var err error
localCtx := &types.SystemContext{}
// preserve compression
localCtx.OCIAcceptUncompressedLayers = true
// accept any image with or without signature
policy = &signature.Policy{Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}}
policyContext, err := signature.NewPolicyContext(policy)
if err := inject.Error(err); err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't create policy context")
return &types.SystemContext{}, &signature.PolicyContext{}, err
func (gen *TaskGenerator) GenerateTask() (scheduler.Task, error) {
if err := gen.Service.SetNextAvailableURL(); err != nil {
return nil, err
}
return localCtx, policyContext, nil
repo, err := gen.Service.GetNextRepo(gen.lastRepo)
if err != nil {
return nil, err
}
if repo == "" {
gen.log.Info().Msg("sync: finished syncing all repos")
gen.done = true
return nil, nil
}
gen.lastRepo = repo
return newSyncRepoTask(gen.lastRepo, gen.Service), nil
}
func Run(ctx context.Context, cfg syncconf.Config, repoDB repodb.RepoDB,
storeController storage.StoreController, logger log.Logger,
) error {
var credentialsFile syncconf.CredentialsFile
var err error
if cfg.CredentialsFile != "" {
credentialsFile, err = getFileCredentials(cfg.CredentialsFile)
if err != nil {
logger.Error().Str("errortype", common.TypeOf(err)).Err(err).
Str("credentialsFile", cfg.CredentialsFile).Msg("couldn't get registry credentials from credentials file")
return err
}
}
localCtx, policyCtx, err := getLocalContexts(logger)
if err != nil {
return err
}
// for each upstream registry, start a go routine.
for _, regCfg := range cfg.Registries {
// if content not provided, don't run periodically sync
if len(regCfg.Content) == 0 {
logger.Info().Strs("registry", regCfg.URLs).
Msg("sync config content not configured for registry, will not run periodically sync")
continue
}
// if pollInterval is not provided, don't run periodically sync
if regCfg.PollInterval == 0 {
logger.Warn().Strs("registry", regCfg.URLs).
Msg("sync config PollInterval not configured for registry, will not run periodically sync")
continue
}
ticker := time.NewTicker(regCfg.PollInterval)
retryOptions := &retry.RetryOptions{}
if regCfg.MaxRetries != nil {
retryOptions.MaxRetry = *regCfg.MaxRetries
if regCfg.RetryDelay != nil {
retryOptions.Delay = *regCfg.RetryDelay
}
}
// schedule each registry sync
go func(ctx context.Context, regCfg syncconf.RegistryConfig, logger log.Logger) {
for {
for _, upstreamURL := range regCfg.URLs {
upstreamAddr := StripRegistryTransport(upstreamURL)
// first try syncing main registry
if err := syncRegistry(ctx, regCfg, upstreamURL, repoDB, storeController, localCtx, policyCtx,
credentialsFile[upstreamAddr], retryOptions, logger); err != nil {
logger.Error().Str("errortype", common.TypeOf(err)).
Err(err).Str("registry", upstreamURL).
Msg("sync exited with error, falling back to auxiliary registries if any")
} else {
// if success fall back to main registry
break
}
}
select {
case <-ctx.Done():
ticker.Stop()
return
case <-ticker.C:
// run on intervals
continue
}
}
}(ctx, regCfg, logger)
}
logger.Info().Msg("finished setting up sync")
return nil
func (gen *TaskGenerator) IsDone() bool {
return gen.done
}
func (gen *TaskGenerator) Reset() {
gen.lastRepo = ""
gen.Service.ResetCatalog()
gen.done = false
}
type syncRepoTask struct {
repo string
service Service
}
func newSyncRepoTask(repo string, service Service) *syncRepoTask {
return &syncRepoTask{repo, service}
}
func (srt *syncRepoTask) DoWork() error {
return srt.service.SyncRepo(srt.repo)
}

View file

@ -47,16 +47,23 @@ func TestSyncExtension(t *testing.T) {
defer ctlrManager.StopServer()
Convey("verify sync is skipped when binary doesn't include it", func() {
// image
resp, err := resty.R().
Head(baseURL + "/v2/" + "invalid" + "/manifests/invalid:0.0.2")
So(err, ShouldBeNil)
So(resp, ShouldNotBeNil)
// reference
resp, err = resty.R().
Head(baseURL + "/v2/" + "invalid" + "/manifests/sha256_digest.sig")
So(err, ShouldBeNil)
So(resp, ShouldNotBeNil)
data, err := os.ReadFile(logFile.Name())
So(err, ShouldBeNil)
So(string(data), ShouldContainSubstring,
"skipping syncing on demand because given zot binary doesn't include "+
"skipping enabling sync extension because given zot binary doesn't include "+
"this feature,please build a binary that does so")
})
})

File diff suppressed because it is too large Load diff

View file

@ -24,6 +24,7 @@ import (
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
"github.com/sigstore/cosign/v2/cmd/cosign/cli/attach"
"github.com/sigstore/cosign/v2/cmd/cosign/cli/generate"
"github.com/sigstore/cosign/v2/cmd/cosign/cli/options"
"github.com/sigstore/cosign/v2/cmd/cosign/cli/sign"
@ -36,11 +37,10 @@ import (
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/api/constants"
"zotregistry.io/zot/pkg/cli"
"zotregistry.io/zot/pkg/common"
extconf "zotregistry.io/zot/pkg/extensions/config"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
"zotregistry.io/zot/pkg/extensions/sync"
logger "zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
"zotregistry.io/zot/pkg/meta/signatures"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/test"
@ -201,6 +201,7 @@ func makeDownstreamServer(
}
destConfig.Extensions.Sync = syncConfig
destConfig.Log.Output = path.Join(destDir, "sync.log")
destConfig.Log.Level = "debug"
dctlr := api.NewController(destConfig)
@ -307,7 +308,6 @@ func TestORAS(t *testing.T) {
updateDuration, _ := time.ParseDuration("30m")
sctlr, srcBaseURL, srcDir, _, _ := makeUpstreamServer(t, false, false)
scm := test.NewControllerManager(sctlr)
scm.StartAndWait(sctlr.Config.HTTP.Port)
defer scm.StopServer()
@ -497,7 +497,6 @@ func TestORAS(t *testing.T) {
func TestOnDemand(t *testing.T) {
Convey("Verify sync on demand feature", t, func() {
sctlr, srcBaseURL, _, _, srcClient := makeUpstreamServer(t, false, false)
scm := test.NewControllerManager(sctlr)
scm.StartAndWait(sctlr.Config.HTTP.Port)
defer scm.StopServer()
@ -523,6 +522,7 @@ func TestOnDemand(t *testing.T) {
OnDemand: true,
}
Convey("Verify sync on demand feature with one registryConfig", func() {
defaultVal := true
syncConfig := &syncconf.Config{
Enable: &defaultVal,
@ -555,7 +555,7 @@ func TestOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
err = os.Chmod(path.Join(destDir, testImage), 0o000)
err = os.MkdirAll(path.Join(destDir, testImage), 0o000)
if err != nil {
panic(err)
}
@ -573,7 +573,7 @@ func TestOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
err = os.Chmod(path.Join(destDir, testImage, sync.SyncBlobUploadDir), 0o000)
err = os.MkdirAll(path.Join(destDir, testImage, sync.SyncBlobUploadDir), 0o000)
if err != nil {
panic(err)
}
@ -609,6 +609,11 @@ func TestOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
// for coverage, sync again
resp, err = destClient.R().Get(destBaseURL + "/v2/" + testImage + "/manifests/" + testImageTag)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
resp, err = destClient.R().Get(destBaseURL + "/v2/" + testImage + "/tags/list")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
@ -630,6 +635,68 @@ func TestOnDemand(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusInternalServerError)
})
Convey("Verify sync on demand feature with multiple registryConfig", func() {
// make a new upstream server
sctlr, newSrcBaseURL, srcDir, _, srcClient := makeUpstreamServer(t, false, false)
scm := test.NewControllerManager(sctlr)
scm.StartAndWait(sctlr.Config.HTTP.Port)
defer scm.StopServer()
// remove remote testImage
err := os.RemoveAll(path.Join(srcDir, testImage))
So(err, ShouldBeNil)
// new registryConfig with new server url
newRegistryConfig := syncRegistryConfig
newRegistryConfig.URLs = []string{newSrcBaseURL}
defaultVal := true
syncConfig := &syncconf.Config{
Enable: &defaultVal,
Registries: []syncconf.RegistryConfig{newRegistryConfig, syncRegistryConfig},
}
dctlr, destBaseURL, _, destClient := makeDownstreamServer(t, false, syncConfig)
dcm := test.NewControllerManager(dctlr)
dcm.StartAndWait(dctlr.Config.HTTP.Port)
defer dcm.StopServer()
var srcTagsList TagsList
var destTagsList TagsList
resp, _ := srcClient.R().Get(srcBaseURL + "/v2/" + testImage + "/tags/list")
So(resp, ShouldNotBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
err = json.Unmarshal(resp.Body(), &srcTagsList)
if err != nil {
panic(err)
}
resp, err = destClient.R().Get(destBaseURL + "/v2/" + "inexistent" + "/manifests/" + testImageTag)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
resp, err = destClient.R().Get(destBaseURL + "/v2/" + testImage + "/manifests/" + "inexistent")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
resp, err = destClient.R().Get(destBaseURL + "/v2/" + testImage + "/manifests/" + testImageTag)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
resp, err = destClient.R().Get(destBaseURL + "/v2/" + testImage + "/tags/list")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
err = json.Unmarshal(resp.Body(), &destTagsList)
if err != nil {
panic(err)
}
So(destTagsList, ShouldResemble, srcTagsList)
})
})
Convey("Sync on Demand errors", t, func() {
Convey("Signature copier errors", func() {
@ -666,6 +733,9 @@ func TestOnDemand(t *testing.T) {
err = test.SignImageUsingCosign(fmt.Sprintf("remote-repo@%s", manifestDigest.String()), port)
So(err, ShouldBeNil)
// add cosign sbom
attachSBOM(rootDir, port, "remote-repo", manifestDigest)
// add OCI Ref
_ = pushBlob(srcBaseURL, "remote-repo", ispec.ScratchDescriptor.Data)
@ -764,60 +834,41 @@ func TestOnDemand(t *testing.T) {
destConfig.Storage.GC = false
destConfig.Extensions = &extconf.ExtensionConfig{}
defVal := true
destConfig.Extensions.Search = &extconf.SearchConfig{
BaseConfig: extconf.BaseConfig{Enable: &defVal},
}
destConfig.Extensions.Sync = syncConfig
dctlr := api.NewController(destConfig)
dcm := test.NewControllerManager(dctlr)
dcm.StartAndWait(destPort)
// repodb fails for syncOCIRefs
dctlr.RepoDB = mocks.RepoDBMock{
SetRepoReferenceFn: func(repo, Reference string, manifestDigest godigest.Digest, mediaType string) error {
if mediaType == ispec.MediaTypeImageManifest {
return sync.ErrTestError
}
return nil
},
}
resp, err = resty.R().Get(destBaseURL + "/v2/remote-repo/manifests/test")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
// repodb fails for syncCosignSignature"
dctlr.RepoDB = mocks.RepoDBMock{
SetRepoReferenceFn: func(repo, reference string, manifestDigest godigest.Digest, mediaType string) error {
if strings.HasPrefix(reference, "sha256") || strings.HasSuffix(reference, ".sig") {
AddManifestSignatureFn: func(repo string, signedManifestDigest godigest.Digest,
sm repodb.SignatureMetadata,
) error {
if sm.SignatureType == signatures.CosignSignature || sm.SignatureType == signatures.NotationSignature {
return sync.ErrTestError
}
return nil
},
}
resp, err = resty.R().Get(destBaseURL + "/v2/remote-repo/manifests/test")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
// repodb fails for getORASRefs
dctlr.RepoDB = mocks.RepoDBMock{
SetRepoReferenceFn: func(repo, Reference string, manifestDigest godigest.Digest, mediaType string) error {
if mediaType == artifactspec.MediaTypeArtifactManifest {
SetRepoReferenceFn: func(repo, reference string, manifestDigest godigest.Digest,
mediaType string,
) error {
if strings.HasPrefix(reference, "sha256-") &&
(strings.HasSuffix(reference, remote.SignatureTagSuffix) ||
strings.HasSuffix(reference, remote.SBOMTagSuffix)) ||
strings.HasPrefix(reference, "sha256:") {
return sync.ErrTestError
}
// don't return err for normal image with tag
return nil
},
}
dcm := test.NewControllerManager(dctlr)
dcm.StartAndWait(destPort)
defer dcm.StopServer()
resp, err = resty.R().Get(destBaseURL + "/v2/remote-repo/manifests/test")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
@ -1151,7 +1202,7 @@ func TestPermsDenied(t *testing.T) {
dcm.StartAndWait(destPort)
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"couldn't get localCachePath for ", 15*time.Second)
"couldn't get a local image reference", 20*time.Second)
if err != nil {
panic(err)
}
@ -1180,15 +1231,17 @@ func TestPermsDenied(t *testing.T) {
func TestConfigReloader(t *testing.T) {
Convey("Verify periodically sync config reloader works", t, func() {
duration, _ := time.ParseDuration("3s")
sctlr, srcBaseURL, _, _, _ := makeUpstreamServer(t, false, false)
sctlr, srcBaseURL, srcDir, _, _ := makeUpstreamServer(t, false, false)
defer os.RemoveAll(srcDir)
scm := test.NewControllerManager(sctlr)
scm.StartAndWait(sctlr.Config.HTTP.Port)
defer scm.StopServer()
duration, _ := time.ParseDuration("3s")
var tlsVerify bool
defaultVal := true
syncRegistryConfig := syncconf.RegistryConfig{
Content: []syncconf.Content{
@ -1203,7 +1256,6 @@ func TestConfigReloader(t *testing.T) {
OnDemand: true,
}
defaultVal := true
syncConfig := &syncconf.Config{
Enable: &defaultVal,
Registries: []syncconf.RegistryConfig{syncRegistryConfig},
@ -1238,6 +1290,8 @@ func TestConfigReloader(t *testing.T) {
defer dcm.StopServer()
//nolint: dupl
Convey("Reload config without sync", func() {
content := fmt.Sprintf(`{"distSpecVersion": "1.1.0-dev", "storage": {"rootDirectory": "%s"},
"http": {"address": "127.0.0.1", "port": "%s"},
"log": {"level": "debug", "output": "%s"}}`, destDir, destPort, logFile.Name())
@ -1289,11 +1343,105 @@ func TestConfigReloader(t *testing.T) {
time.Sleep(2 * time.Second)
data, err := os.ReadFile(logFile.Name())
So(err, ShouldBeNil)
t.Logf("downstream log: %s", string(data))
So(err, ShouldBeNil)
So(string(data), ShouldContainSubstring, "reloaded params")
So(string(data), ShouldContainSubstring, "new configuration settings")
So(string(data), ShouldContainSubstring, "\"Sync\":null")
So(string(data), ShouldContainSubstring, "\"Extensions\":null")
})
//nolint: dupl
Convey("Reload bad sync config", func() {
content := fmt.Sprintf(`{
"distSpecVersion": "1.1.0-dev",
"storage": {
"rootDirectory": "%s"
},
"http": {
"address": "127.0.0.1",
"port": "%s"
},
"log": {
"level": "debug",
"output": "%s"
},
"extensions": {
"sync": {
"registries": [{
"urls": ["%%"],
"tlsVerify": false,
"onDemand": false,
"PollInterval": "1s",
"maxRetries": 3,
"retryDelay": "15m",
"certDir": "",
"content":[
{
"prefix": "zot-test",
"tags": {
"regex": ".*",
"semver": true
}
}
]
}]
}
}
}`, destDir, destPort, logFile.Name())
cfgfile, err := os.CreateTemp("", "zot-test*.json")
So(err, ShouldBeNil)
defer os.Remove(cfgfile.Name()) // clean up
_, err = cfgfile.Write([]byte(content))
So(err, ShouldBeNil)
hotReloader, err := cli.NewHotReloader(dctlr, cfgfile.Name())
So(err, ShouldBeNil)
reloadCtx := hotReloader.Start()
go func() {
// this blocks
if err := dctlr.Init(reloadCtx); err != nil {
return
}
if err := dctlr.Run(reloadCtx); err != nil {
return
}
}()
// wait till ready
for {
_, err := resty.R().Get(destBaseURL)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
// let it sync
time.Sleep(3 * time.Second)
// modify config
_, err = cfgfile.WriteString(" ")
So(err, ShouldBeNil)
err = cfgfile.Close()
So(err, ShouldBeNil)
time.Sleep(2 * time.Second)
data, err := os.ReadFile(logFile.Name())
t.Logf("downstream log: %s", string(data))
So(err, ShouldBeNil)
So(string(data), ShouldContainSubstring, "unable to start sync extension")
So(string(data), ShouldContainSubstring, "\"TLSVerify\":false")
So(string(data), ShouldContainSubstring, "\"OnDemand\":false")
})
})
}
@ -1711,7 +1859,7 @@ func TestBasicAuth(t *testing.T) {
defer dcm.StopServer()
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"status code\":\"401", 15*time.Second)
"authentication required", 15*time.Second)
if err != nil {
panic(err)
}
@ -1815,8 +1963,10 @@ func TestBasicAuth(t *testing.T) {
registryName := sync.StripRegistryTransport(srcBaseURL)
credentialsFile := makeCredentialsFile(fmt.Sprintf(`{"%s":{"username": "test", "password": "test"}}`, registryName))
defaultValue := false
syncRegistryConfig := syncconf.RegistryConfig{
URLs: []string{srcBaseURL},
TLSVerify: &defaultValue,
OnDemand: true,
}
@ -2140,57 +2290,6 @@ func TestNotSemver(t *testing.T) {
})
}
func TestErrorOnCatalog(t *testing.T) {
Convey("Verify error on catalog", t, func() {
updateDuration, _ := time.ParseDuration("1h")
sctlr, srcBaseURL, destDir, _, _ := makeUpstreamServer(t, true, false)
scm := test.NewControllerManager(sctlr)
scm.StartAndWait(sctlr.Config.HTTP.Port)
defer scm.StopServer()
err := os.Chmod(destDir, 0o000)
So(err, ShouldBeNil)
tlsVerify := false
syncRegistryConfig := syncconf.RegistryConfig{
Content: []syncconf.Content{
{
Prefix: testImage,
},
},
URLs: []string{srcBaseURL},
PollInterval: updateDuration,
TLSVerify: &tlsVerify,
OnDemand: true,
}
defaultVal := true
syncConfig := &syncconf.Config{
Enable: &defaultVal,
Registries: []syncconf.RegistryConfig{syncRegistryConfig},
}
dctlr, _, _, _ := makeDownstreamServer(t, false, syncConfig)
dcm := test.NewControllerManager(dctlr)
dcm.StartAndWait(dctlr.Config.HTTP.Port)
defer dcm.StopServer()
httpClient, err := common.CreateHTTPClient(*syncRegistryConfig.TLSVerify, "localhost", "")
So(httpClient, ShouldNotBeNil)
So(err, ShouldBeNil)
_, err = sync.GetUpstreamCatalog(httpClient, srcBaseURL, "", "", logger.NewLogger("", ""))
So(err, ShouldNotBeNil)
err = os.Chmod(destDir, 0o755)
So(err, ShouldBeNil)
})
}
func TestInvalidCerts(t *testing.T) {
Convey("Verify sync with bad certs", t, func() {
updateDuration, _ := time.ParseDuration("1h")
@ -2255,7 +2354,6 @@ func TestInvalidCerts(t *testing.T) {
}
dctlr, destBaseURL, _, destClient := makeDownstreamServer(t, false, syncConfig)
dcm := test.NewControllerManager(dctlr)
dcm.StartAndWait(dctlr.Config.HTTP.Port)
defer dcm.StopServer()
@ -2269,13 +2367,6 @@ func TestInvalidCerts(t *testing.T) {
func TestCertsWithWrongPerms(t *testing.T) {
Convey("Verify sync with wrong permissions on certs", t, func() {
updateDuration, _ := time.ParseDuration("1h")
sctlr, srcBaseURL, _, _, _ := makeUpstreamServer(t, true, false)
scm := test.NewControllerManager(sctlr)
scm.StartAndWait(sctlr.Config.HTTP.Port)
defer scm.StopServer()
// copy client certs, use them in sync config
clientCertDir := t.TempDir()
@ -2308,7 +2399,7 @@ func TestCertsWithWrongPerms(t *testing.T) {
Prefix: testImage,
},
},
URLs: []string{srcBaseURL},
URLs: []string{"http://localhost:9999"},
PollInterval: updateDuration,
TLSVerify: &tlsVerify,
CertDir: clientCertDir,
@ -2321,6 +2412,19 @@ func TestCertsWithWrongPerms(t *testing.T) {
Registries: []syncconf.RegistryConfig{syncRegistryConfig},
}
// can't create http client because of no perms on ca cert
destPort := test.GetFreePort()
destConfig := config.New()
destConfig.HTTP.Port = destPort
destDir := t.TempDir()
destConfig.Storage.RootDirectory = destDir
destConfig.Extensions = &extconf.ExtensionConfig{}
destConfig.Extensions.Search = nil
destConfig.Extensions.Sync = syncConfig
dctlr, destBaseURL, _, destClient := makeDownstreamServer(t, false, syncConfig)
dcm := test.NewControllerManager(dctlr)
@ -2940,13 +3044,12 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
So(err, ShouldBeNil)
dctlr, destBaseURL, _, _ := makeDownstreamServer(t, false, syncConfig)
dcm := test.NewControllerManager(dctlr)
dcm.StartAndWait(dctlr.Config.HTTP.Port)
defer dcm.StopServer()
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"finished syncing", 15*time.Second)
"finished syncing all repos", 15*time.Second)
if err != nil {
panic(err)
}
@ -2994,7 +3097,7 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
defer dcm.StopServer()
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"finished syncing", 15*time.Second)
"finished syncing all repos", 15*time.Second)
if err != nil {
panic(err)
}
@ -3012,6 +3115,11 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
resp, err := resty.R().Get(destBaseURL + "/v2/" + repoName + "/manifests/" + cosignTag)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
data, err := os.ReadFile(dctlr.Config.Log.Output)
So(err, ShouldBeNil)
t.Logf("downstream log: %s", string(data))
})
Convey("Trigger error on notary signature", func() {
@ -3058,7 +3166,7 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
defer dcm.StopServer()
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"finished syncing", 15*time.Second)
"finished syncing all repos", 15*time.Second)
if err != nil {
panic(err)
}
@ -3132,7 +3240,7 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
defer dcm.StopServer()
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"couldn't copy referrer for", 15*time.Second)
"couldn't sync image referrer", 15*time.Second)
if err != nil {
panic(err)
}
@ -3185,7 +3293,7 @@ func TestPeriodicallySignaturesErr(t *testing.T) {
defer dcm.StopServer()
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"couldn't copy referrer for", 15*time.Second)
"couldn't sync image referrer", 15*time.Second)
if err != nil {
panic(err)
}
@ -3243,14 +3351,18 @@ func TestSignatures(t *testing.T) {
So(func() { signImage(tdir, srcPort, repoName, digest) }, ShouldNotPanic)
// attach sbom
attachSBOM(srcDir, sctlr.Config.HTTP.Port, repoName, digest)
regex := ".*"
var semver bool
var tlsVerify bool
onlySigned := true
syncRegistryConfig := syncconf.RegistryConfig{
Content: []syncconf.Content{
{
Prefix: repoName,
Prefix: "**",
Tags: &syncconf.Tags{
Regex: &regex,
Semver: &semver,
@ -3261,6 +3373,7 @@ func TestSignatures(t *testing.T) {
PollInterval: updateDuration,
TLSVerify: &tlsVerify,
CertDir: "",
OnlySigned: &onlySigned,
OnDemand: true,
}
@ -3311,10 +3424,6 @@ func TestSignatures(t *testing.T) {
// notation verify the image
image := fmt.Sprintf("localhost:%s/%s:%s", destPort, repoName, testImageTag)
err = test.VerifyWithNotation(image, tdir)
So(err, ShouldBeNil)
// cosign verify the image
vrfy := verify.VerifyCommand{
RegistryOptions: options.RegistryOptions{AllowInsecure: true},
CheckClaims: true,
@ -3323,16 +3432,20 @@ func TestSignatures(t *testing.T) {
IgnoreTlog: true,
}
// notation verify signed image
err = test.VerifyWithNotation(image, tdir)
So(err, ShouldBeNil)
// cosign verify signed image
err = vrfy.Exec(context.TODO(), []string{fmt.Sprintf("localhost:%s/%s:%s", destPort, repoName, testImageTag)})
So(err, ShouldBeNil)
// get oci references from downstream, should be synced
getOCIReferrersURL := srcBaseURL + path.Join("/v2", repoName, "referrers", digest.String())
getOCIReferrersURL := destBaseURL + path.Join("/v2", repoName, "referrers", digest.String())
resp, err := resty.R().Get(getOCIReferrersURL)
So(err, ShouldBeNil)
So(resp, ShouldNotBeEmpty)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
var index ispec.Index
@ -3341,6 +3454,14 @@ func TestSignatures(t *testing.T) {
So(len(index.Manifests), ShouldEqual, 3)
// get cosign sbom
sbomCosignTag := string(digest.Algorithm()) + "-" + digest.Encoded() +
"." + remote.SBOMTagSuffix
resp, err = resty.R().Get(destBaseURL + path.Join("/v2/", repoName, "manifests", sbomCosignTag))
So(err, ShouldBeNil)
So(resp, ShouldNotBeEmpty)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
// test negative cases (trigger errors)
// test notary signatures errors
@ -3815,11 +3936,6 @@ func TestOnDemandRetryGoroutine(t *testing.T) {
resp, err = destClient.R().Get(destBaseURL + "/v2/" + testImage + "/manifests/" + testImageTag)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
data, err := os.ReadFile(dctlr.Config.Log.Output)
So(err, ShouldBeNil)
t.Logf("downstream log: %s", string(data))
})
}
@ -3939,7 +4055,7 @@ func TestOnDemandRetryGoroutineErr(t *testing.T) {
})
}
func TestOnDemandMultipleRetries(t *testing.T) {
func TestOnDemandMultipleImage(t *testing.T) {
Convey("Verify ondemand sync retries in background on error, multiple calls should spawn one routine", t, func() {
srcPort := test.GetFreePort()
srcConfig := config.New()
@ -3977,6 +4093,7 @@ func TestOnDemandMultipleRetries(t *testing.T) {
}
dctlr, destBaseURL, destDir, destClient := makeDownstreamServer(t, false, syncConfig)
defer os.RemoveAll(destDir)
dcm := test.NewControllerManager(dctlr)
dcm.StartAndWait(dctlr.Config.HTTP.Port)
@ -4086,26 +4203,26 @@ func TestOnDemandPullsOnce(t *testing.T) {
wg.Add(1)
go func(conv C) {
defer wg.Done()
resp, err := resty.R().Get(destBaseURL + "/v2/" + testImage + "/manifests/" + testImageTag)
conv.So(err, ShouldBeNil)
conv.So(resp.StatusCode(), ShouldEqual, http.StatusOK)
wg.Done()
}(conv)
wg.Add(1)
go func(conv C) {
defer wg.Done()
resp, err := resty.R().Get(destBaseURL + "/v2/" + testImage + "/manifests/" + testImageTag)
conv.So(err, ShouldBeNil)
conv.So(resp.StatusCode(), ShouldEqual, http.StatusOK)
wg.Done()
}(conv)
wg.Add(1)
go func(conv C) {
defer wg.Done()
resp, err := resty.R().Get(destBaseURL + "/v2/" + testImage + "/manifests/" + testImageTag)
conv.So(err, ShouldBeNil)
conv.So(resp.StatusCode(), ShouldEqual, http.StatusOK)
wg.Done()
}(conv)
done := make(chan bool)
@ -4174,7 +4291,7 @@ func TestError(t *testing.T) {
Registries: []syncconf.RegistryConfig{syncRegistryConfig},
}
dctlr, destBaseURL, destDir, client := makeDownstreamServer(t, false, syncConfig)
dctlr, _, destDir, _ := makeDownstreamServer(t, false, syncConfig)
dcm := test.NewControllerManager(dctlr)
dcm.StartAndWait(dctlr.Config.HTTP.Port)
@ -4193,12 +4310,8 @@ func TestError(t *testing.T) {
So(err, ShouldBeNil)
}()
resp, err := client.R().Get(destBaseURL + "/v2/" + testImage + "/manifests/" + testImageTag)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"finished syncing", 15*time.Second)
"finished syncing all repos", 15*time.Second)
if err != nil {
panic(err)
}
@ -4448,21 +4561,6 @@ func TestSignaturesOnDemand(t *testing.T) {
}
So(found, ShouldBeTrue)
found, err = test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"couldn't find upstream referrer", 15*time.Second)
if err != nil {
panic(err)
}
if !found {
data, err := os.ReadFile(dctlr.Config.Log.Output)
So(err, ShouldBeNil)
t.Logf("downstream log: %s", string(data))
}
So(found, ShouldBeTrue)
})
}
@ -4647,7 +4745,7 @@ func TestSyncOnlyDiff(t *testing.T) {
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"already synced image", 15*time.Second)
"skipping image because it's already synced", 15*time.Second)
if err != nil {
panic(err)
}
@ -5016,7 +5114,7 @@ func TestSyncSignaturesDiff(t *testing.T) {
So(reflect.DeepEqual(cosignManifest, syncedCosignManifest), ShouldEqual, true)
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"skipping cosign signature", 15*time.Second)
"skipping syncing cosign signature", 15*time.Second)
if err != nil {
panic(err)
}
@ -5098,7 +5196,7 @@ func TestOnlySignedFlag(t *testing.T) {
defer dcm.StopServer()
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"skipping image without signature", 15*time.Second)
"skipping image without mandatory signature", 15*time.Second)
if err != nil {
panic(err)
}
@ -5147,7 +5245,6 @@ func TestSyncWithDestination(t *testing.T) {
testCases := []struct {
content syncconf.Content
expected string
repo string
}{
{
expected: "zot-test/zot-fold/zot-test",
@ -5183,24 +5280,7 @@ func TestSyncWithDestination(t *testing.T) {
},
}
srcPort := test.GetFreePort()
srcConfig := config.New()
srcBaseURL := test.GetBaseURL(srcPort)
srcConfig.HTTP.Port = srcPort
srcDir := t.TempDir()
srcConfig.Storage.RootDirectory = srcDir
defVal := true
srcConfig.Extensions = &extconf.ExtensionConfig{}
srcConfig.Extensions.Search = &extconf.SearchConfig{
BaseConfig: extconf.BaseConfig{Enable: &defVal},
}
sctlr := api.NewController(srcConfig)
test.CopyTestFiles("../../../test/data", srcDir)
sctlr, srcBaseURL, _, _, _ := makeUpstreamServer(t, false, false)
err := os.MkdirAll(path.Join(sctlr.Config.Storage.RootDirectory, "/zot-fold"), storageConstants.DefaultDirPerms)
So(err, ShouldBeNil)
@ -5213,11 +5293,11 @@ func TestSyncWithDestination(t *testing.T) {
So(err, ShouldBeNil)
scm := test.NewControllerManager(sctlr)
scm.StartAndWait(srcPort)
scm.StartAndWait(sctlr.Config.HTTP.Port)
defer scm.StopServer()
splittedURL := strings.SplitAfter(srcBaseURL, ":")
srcPort = splittedURL[len(splittedURL)-1]
srcPort := splittedURL[len(splittedURL)-1]
cwd, err := os.Getwd()
So(err, ShouldBeNil)
@ -5262,7 +5342,13 @@ func TestSyncWithDestination(t *testing.T) {
defer dcm.StopServer()
// give it time to set up sync
waitSyncFinish(dctlr.Config.Log.Output)
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
"finished syncing repo", 60*time.Second)
if err != nil {
panic(err)
}
So(found, ShouldBeTrue)
resp, err := destClient.R().Get(destBaseURL + "/v2/" + testCase.expected + "/manifests/0.0.1")
t.Logf("testcase: %#v", testCase)
@ -5518,6 +5604,23 @@ func generateKeyPairs(tdir string) {
}
}
func attachSBOM(tdir, port, repoName string, digest godigest.Digest) {
sbomFilePath := path.Join(tdir, "sbom.spdx")
err := os.WriteFile(sbomFilePath, []byte("sbom example"), storageConstants.DefaultFilePerms)
if err != nil {
panic(err)
}
err = attach.SBOMCmd(context.Background(), options.RegistryOptions{AllowInsecure: true},
options.RegistryExperimentalOptions{RegistryReferrersMode: options.RegistryReferrersModeLegacy},
sbomFilePath, "text/spdx", fmt.Sprintf("localhost:%s/%s@%s", port, repoName, digest.String()),
)
if err != nil {
panic(err)
}
}
func signImage(tdir, port, repoName string, digest godigest.Digest) {
annotations := []string{fmt.Sprintf("tag=%s", testImageTag)}
@ -5820,7 +5923,7 @@ func pushBlob(url string, repoName string, buf []byte) godigest.Digest {
func waitSyncFinish(logPath string) bool {
found, err := test.ReadLogFileAndSearchString(logPath,
"finished syncing", 60*time.Second)
"finished syncing all repos", 60*time.Second)
if err != nil {
panic(err)
}

View file

@ -1,254 +1,35 @@
//go:build sync
// +build sync
package sync
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path"
"regexp"
"strings"
"time"
"github.com/Masterminds/semver"
glob "github.com/bmatcuk/doublestar/v4"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/types"
guuid "github.com/gofrs/uuid"
godigest "github.com/opencontainers/go-digest"
"github.com/docker/distribution/reference"
"github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
artifactspec "github.com/oras-project/artifacts-spec/specs-go/v1"
"github.com/sigstore/cosign/v2/pkg/oci/static"
zerr "zotregistry.io/zot/errors"
"zotregistry.io/zot/pkg/common"
syncconf "zotregistry.io/zot/pkg/extensions/config/sync"
"zotregistry.io/zot/pkg/extensions/monitoring"
"zotregistry.io/zot/pkg/log"
"zotregistry.io/zot/pkg/meta/repodb"
storageCommon "zotregistry.io/zot/pkg/storage/common"
storageConstants "zotregistry.io/zot/pkg/storage/constants"
"zotregistry.io/zot/pkg/storage/local"
storageTypes "zotregistry.io/zot/pkg/storage/types"
"zotregistry.io/zot/pkg/test/inject"
)
type ReferenceList struct {
References []artifactspec.Descriptor `json:"references"`
}
// getTagFromRef returns a tagged reference from an image reference.
func getTagFromRef(ref types.ImageReference, log log.Logger) reference.Tagged {
tagged, isTagged := ref.DockerReference().(reference.Tagged)
if !isTagged {
log.Warn().Str("reference", ref.DockerReference().String()).
Msg("internal server error, reference does not have a tag, skipping")
}
return tagged
}
// getImageTags lists all tags in a repository.
// It returns a string slice of tags and any error encountered.
func getImageTags(ctx context.Context, sysCtx *types.SystemContext, repoRef reference.Named) ([]string, error) {
dockerRef, err := docker.NewReference(reference.TagNameOnly(repoRef))
// hard to reach test case, injected error, see pkg/test/dev.go
if err = inject.Error(err); err != nil {
return nil, err // Should never happen for a reference with tag and no digest
}
tags, err := docker.GetRepositoryTags(ctx, sysCtx, dockerRef)
if err != nil {
return nil, err
}
return tags, nil
}
// filterTagsByRegex filters images by tag regex given in the config.
func filterTagsByRegex(tags []string, regex string, log log.Logger) ([]string, error) {
filteredTags := []string{}
if len(tags) == 0 || regex == "" {
return filteredTags, nil
}
log.Info().Str("regex", regex).Msg("start filtering using the regular expression")
tagReg, err := regexp.Compile(regex)
if err != nil {
log.Error().Err(err).Str("regex", regex).Msg("couldn't compile regex")
return filteredTags, err
}
for _, tag := range tags {
if tagReg.MatchString(tag) {
filteredTags = append(filteredTags, tag)
}
}
return filteredTags, nil
}
// filterTagsBySemver filters tags by checking if they are semver compliant.
func filterTagsBySemver(tags []string, log log.Logger) []string {
filteredTags := []string{}
log.Info().Msg("start filtering using semver compliant rule")
for _, tag := range tags {
_, err := semver.NewVersion(tag)
if err == nil {
filteredTags = append(filteredTags, tag)
}
}
return filteredTags
}
// parseRepositoryReference parses input into a reference.Named, and verifies that it names a repository, not an image.
func parseRepositoryReference(input string) (reference.Named, error) {
ref, err := reference.ParseNormalizedNamed(input)
if err != nil {
return nil, err
}
if !reference.IsNameOnly(ref) {
return nil, zerr.ErrInvalidRepositoryName
}
return ref, nil
}
// filterRepos filters repos based on prefix given in the config.
func filterRepos(repos []string, contentList []syncconf.Content, log log.Logger) map[int][]string {
filtered := make(map[int][]string)
for _, repo := range repos {
for contentID, content := range contentList {
var prefix string
// handle prefixes starting with '/'
if strings.HasPrefix(content.Prefix, "/") {
prefix = content.Prefix[1:]
} else {
prefix = content.Prefix
}
matched, err := glob.Match(prefix, repo)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("pattern",
prefix).Msg("error while parsing glob pattern, skipping it...")
continue
}
if matched {
filtered[contentID] = append(filtered[contentID], repo)
break
}
}
}
return filtered
}
// findRepoContentID return the contentID that maches the localRepo path for a given RegistryConfig in the config file.
func findRepoMatchingContentID(localRepo string, contentList []syncconf.Content) (int, error) {
contentID := -1
localRepo = strings.Trim(localRepo, "/")
for cID, content := range contentList {
// make sure prefix ends in "/" to extract the meta characters
prefix := strings.Trim(content.Prefix, "/") + "/"
destination := strings.Trim(content.Destination, "/")
var patternSlice []string
if content.StripPrefix {
_, metaCharacters := glob.SplitPattern(prefix)
patternSlice = append(patternSlice, destination, metaCharacters)
} else {
patternSlice = append(patternSlice, destination, prefix)
}
pattern := strings.Trim(strings.Join(patternSlice, "/"), "/")
matched, err := glob.Match(pattern, localRepo)
if err != nil {
continue
}
if matched {
contentID = cID
break
}
}
if contentID == -1 {
return -1, zerr.ErrRegistryNoContent
}
return contentID, nil
}
func getRepoSource(localRepo string, content syncconf.Content) string {
localRepo = strings.Trim(localRepo, "/")
destination := strings.Trim(content.Destination, "/")
prefix := strings.Trim(content.Prefix, "/*")
var localRepoSlice []string
localRepo = strings.TrimPrefix(localRepo, destination)
localRepo = strings.Trim(localRepo, "/")
if content.StripPrefix {
localRepoSlice = append([]string{prefix}, localRepo)
} else {
localRepoSlice = []string{localRepo}
}
repoSource := strings.Join(localRepoSlice, "/")
if repoSource == "/" {
return repoSource
}
return strings.Trim(repoSource, "/")
}
// getRepoDestination returns the local storage path of the synced repo based on the specified destination.
func getRepoDestination(remoteRepo string, content syncconf.Content) string {
remoteRepo = strings.Trim(remoteRepo, "/")
destination := strings.Trim(content.Destination, "/")
prefix := strings.Trim(content.Prefix, "/*")
var repoDestSlice []string
if content.StripPrefix {
remoteRepo = strings.TrimPrefix(remoteRepo, prefix)
remoteRepo = strings.Trim(remoteRepo, "/")
repoDestSlice = append(repoDestSlice, destination, remoteRepo)
} else {
repoDestSlice = append(repoDestSlice, destination, remoteRepo)
}
repoDestination := strings.Join(repoDestSlice, "/")
if repoDestination == "/" {
return "/"
}
return strings.Trim(repoDestination, "/")
}
const (
SyncBlobUploadDir = ".sync"
)
// Get sync.FileCredentials from file.
func getFileCredentials(filepath string) (syncconf.CredentialsFile, error) {
@ -267,188 +48,27 @@ func getFileCredentials(filepath string) (syncconf.CredentialsFile, error) {
return creds, nil
}
func pushSyncedLocalImage(localRepo, reference, localCachePath string,
repoDB repodb.RepoDB, imageStore storageTypes.ImageStore, log log.Logger,
) error {
log.Info().Str("image", localCachePath+"/"+localRepo+":"+reference).Msg("pushing synced local image to local registry")
func getUpstreamContext(certDir, username, password string, tlsVerify bool) *types.SystemContext {
upstreamCtx := &types.SystemContext{}
upstreamCtx.DockerCertPath = certDir
upstreamCtx.DockerDaemonCertPath = certDir
var lockLatency time.Time
metrics := monitoring.NewMetricsServer(false, log)
cacheImageStore := local.NewImageStore(localCachePath, false,
storageConstants.DefaultGCDelay, false, false, log, metrics, nil, nil)
manifestBlob, manifestDigest, mediaType, err := cacheImageStore.GetImageManifest(localRepo, reference)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("dir", path.Join(cacheImageStore.RootDir(), localRepo)).
Str("manifest", reference).Msg("couldn't find manifest")
return err
if tlsVerify {
upstreamCtx.DockerDaemonInsecureSkipTLSVerify = false
upstreamCtx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(false)
} else {
upstreamCtx.DockerDaemonInsecureSkipTLSVerify = true
upstreamCtx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(true)
}
// is image manifest
switch mediaType {
case ispec.MediaTypeImageManifest:
if err := copyManifest(localRepo, manifestBlob, reference, repoDB, cacheImageStore, imageStore, log); err != nil {
if errors.Is(err, zerr.ErrImageLintAnnotations) {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't upload manifest because of missing annotations")
return nil
}
return err
}
case ispec.MediaTypeImageIndex:
// is image index
var indexManifest ispec.Index
if err := json.Unmarshal(manifestBlob, &indexManifest); err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("dir", path.Join(cacheImageStore.RootDir(), localRepo)).
Msg("invalid JSON")
return err
}
for _, manifest := range indexManifest.Manifests {
cacheImageStore.RLock(&lockLatency)
manifestBuf, err := cacheImageStore.GetBlobContent(localRepo, manifest.Digest)
cacheImageStore.RUnlock(&lockLatency)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("dir", path.Join(cacheImageStore.RootDir(), localRepo)).Str("digest", manifest.Digest.String()).
Msg("couldn't find manifest which is part of an image index")
return err
}
if err := copyManifest(localRepo, manifestBuf, manifest.Digest.String(), repoDB,
cacheImageStore, imageStore, log); err != nil {
if errors.Is(err, zerr.ErrImageLintAnnotations) {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't upload manifest because of missing annotations")
return nil
}
return err
if username != "" && password != "" {
upstreamCtx.DockerAuthConfig = &types.DockerAuthConfig{
Username: username,
Password: password,
}
}
_, _, err = imageStore.PutImageManifest(localRepo, reference, mediaType, manifestBlob)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't upload manifest")
return err
}
if repoDB != nil {
err = repodb.SetImageMetaFromInput(localRepo, reference, mediaType,
manifestDigest, manifestBlob, imageStore, repoDB, log)
if err != nil {
return fmt.Errorf("failed to set metadata for image '%s %s': %w", localRepo, reference, err)
}
log.Debug().Str("repository", localRepo).Str("reference", reference).
Msg("successfully set metadata for image")
}
}
return nil
}
func copyManifest(localRepo string, manifestContent []byte, reference string, repoDB repodb.RepoDB,
cacheImageStore, imageStore storageTypes.ImageStore, log log.Logger,
) error {
var manifest ispec.Manifest
var err error
if err := json.Unmarshal(manifestContent, &manifest); err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("dir", path.Join(cacheImageStore.RootDir(), localRepo)).
Msg("invalid JSON")
return err
}
for _, blob := range manifest.Layers {
if storageCommon.IsNonDistributable(blob.MediaType) {
continue
}
err = copyBlob(localRepo, blob.Digest, blob.MediaType,
cacheImageStore, imageStore, log)
if err != nil {
return err
}
}
err = copyBlob(localRepo, manifest.Config.Digest, manifest.Config.MediaType,
cacheImageStore, imageStore, log)
if err != nil {
return err
}
digest, _, err := imageStore.PutImageManifest(localRepo, reference,
ispec.MediaTypeImageManifest, manifestContent)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't upload manifest")
return err
}
if repoDB != nil {
err = repodb.SetImageMetaFromInput(localRepo, reference, ispec.MediaTypeImageManifest,
digest, manifestContent, imageStore, repoDB, log)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't set metadata from input")
return err
}
log.Debug().Str("repository", localRepo).Str("reference", reference).
Msg("successfully set metadata for image")
}
return nil
}
// Copy a blob from one image store to another image store.
func copyBlob(localRepo string, blobDigest godigest.Digest, blobMediaType string,
souceImageStore, destinationImageStore storageTypes.ImageStore, log log.Logger,
) error {
if found, _, _ := destinationImageStore.CheckBlob(localRepo, blobDigest); found {
// Blob is already at destination, nothing to do
return nil
}
blobReadCloser, _, err := souceImageStore.GetBlob(localRepo, blobDigest, blobMediaType)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("dir", path.Join(souceImageStore.RootDir(), localRepo)).
Str("blob digest", blobDigest.String()).Str("media type", blobMediaType).
Msg("couldn't read blob")
return err
}
defer blobReadCloser.Close()
_, _, err = destinationImageStore.FullBlobUpload(localRepo, blobReadCloser, blobDigest)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("blob digest", blobDigest.String()).Str("media type", blobMediaType).
Msg("couldn't upload blob")
}
return err
return upstreamCtx
}
// sync needs transport to be stripped to not be wrongly interpreted as an image reference
@ -457,122 +77,47 @@ func StripRegistryTransport(url string) string {
return strings.Replace(strings.Replace(url, "http://", "", 1), "https://", "", 1)
}
// get an ImageReference given the registry, repo and tag.
func getImageRef(registryDomain, repo, ref string) (types.ImageReference, error) {
repoRef, err := parseRepositoryReference(fmt.Sprintf("%s/%s", registryDomain, repo))
// getRepoTags lists all tags in a repository.
// It returns a string slice of tags and any error encountered.
func getRepoTags(ctx context.Context, sysCtx *types.SystemContext, host, repo string) ([]string, error) {
repoRef, err := parseRepositoryReference(fmt.Sprintf("%s/%s", host, repo))
if err != nil {
return nil, err
return []string{}, err
}
var namedRepoRef reference.Named
digest, ok := parseReference(ref)
if ok {
namedRepoRef, err = reference.WithDigest(repoRef, digest)
if err != nil {
return nil, err
}
} else {
namedRepoRef, err = reference.WithTag(repoRef, ref)
if err != nil {
return nil, err
}
}
imageRef, err := docker.NewReference(namedRepoRef)
if err != nil {
return nil, err
}
return imageRef, err
}
// get a local ImageReference used to temporary store one synced image.
func getLocalImageRef(localCachePath, repo, reference string) (types.ImageReference, error) {
if _, err := os.ReadDir(localCachePath); err != nil {
return nil, err
}
localRepo := path.Join(localCachePath, repo)
_, refIsDigest := parseReference(reference)
if !refIsDigest {
localRepo = fmt.Sprintf("%s:%s", localRepo, reference)
}
localImageRef, err := layout.ParseReference(localRepo)
if err != nil {
return nil, err
}
return localImageRef, nil
}
// Returns the localCachePath with an UUID at the end. Only to be called once per repo.
func getLocalCachePath(imageStore storageTypes.ImageStore, repo string) (string, error) {
localRepoPath := path.Join(imageStore.RootDir(), repo, SyncBlobUploadDir)
// check if SyncBlobUploadDir exists, create if not
var err error
if _, err = os.ReadDir(localRepoPath); os.IsNotExist(err) {
if err = os.MkdirAll(localRepoPath, storageConstants.DefaultDirPerms); err != nil {
return "", err
}
}
if err != nil {
return "", err
}
// create uuid folder
uuid, err := guuid.NewV4()
dockerRef, err := docker.NewReference(reference.TagNameOnly(repoRef))
// hard to reach test case, injected error, see pkg/test/dev.go
if err := inject.Error(err); err != nil {
return "", err
if err = inject.Error(err); err != nil {
return nil, err // Should never happen for a reference with tag and no digest
}
localCachePath := path.Join(localRepoPath, uuid.String())
cachedRepoPath := path.Join(localCachePath, repo)
if err = os.MkdirAll(cachedRepoPath, storageConstants.DefaultDirPerms); err != nil {
return "", err
tags, err := docker.GetRepositoryTags(ctx, sysCtx, dockerRef)
if err != nil {
return nil, err
}
return localCachePath, nil
return tags, nil
}
// canSkipImage returns whether or not we already synced this image.
func canSkipImage(repo, tag string, digest godigest.Digest, imageStore storageTypes.ImageStore, log log.Logger,
) (bool, error) {
// check image already synced
_, localImageManifestDigest, _, err := imageStore.GetImageManifest(repo, tag)
// parseRepositoryReference parses input into a reference.Named, and verifies that it names a repository, not an image.
func parseRepositoryReference(input string) (reference.Named, error) {
ref, err := reference.ParseNormalizedNamed(input)
if err != nil {
if errors.Is(err, zerr.ErrRepoNotFound) || errors.Is(err, zerr.ErrManifestNotFound) {
return false, nil
return nil, err
}
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Str("repository", repo).Str("tag", tag).
Msg("couldn't get local image manifest")
return false, err
if !reference.IsNameOnly(ref) {
return nil, zerr.ErrInvalidRepositoryName
}
if localImageManifestDigest != digest {
log.Info().Str("repository", repo).Str("tag", tag).
Msg("upstream image digest changed, syncing again")
return false, nil
}
return true, nil
return ref, nil
}
// parse a reference, return its digest and if it's valid.
func parseReference(reference string) (godigest.Digest, bool) {
func parseReference(reference string) (digest.Digest, bool) {
var ok bool
d, err := godigest.Parse(reference)
d, err := digest.Parse(reference)
if err == nil {
ok = true
}
@ -580,212 +125,48 @@ func parseReference(reference string) (godigest.Digest, bool) {
return d, ok
}
func manifestsEqual(manifest1, manifest2 ispec.Manifest) bool {
if manifest1.Config.Digest == manifest2.Config.Digest &&
manifest1.Config.MediaType == manifest2.Config.MediaType &&
manifest1.Config.Size == manifest2.Config.Size {
if descriptorsEqual(manifest1.Layers, manifest2.Layers) {
return true
}
func getCopyOptions(upstreamCtx, localCtx *types.SystemContext) copy.Options {
options := copy.Options{
DestinationCtx: localCtx,
SourceCtx: upstreamCtx,
ReportWriter: io.Discard,
ForceManifestMIMEType: ispec.MediaTypeImageManifest, // force only oci manifest MIME type
ImageListSelection: copy.CopyAllImages,
}
return false
return options
}
func artifactDescriptorsEqual(desc1, desc2 []artifactspec.Descriptor) bool {
if len(desc1) != len(desc2) {
return false
func getPolicyContext(log log.Logger) (*signature.PolicyContext, error) {
policy := &signature.Policy{Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}}
policyContext, err := signature.NewPolicyContext(policy)
if err := inject.Error(err); err != nil {
log.Error().Str("errorType", common.TypeOf(err)).
Err(err).Msg("couldn't create policy context")
return nil, err
}
for id, desc := range desc1 {
if desc.Digest != desc2[id].Digest ||
desc.Size != desc2[id].Size ||
desc.MediaType != desc2[id].MediaType ||
desc.ArtifactType != desc2[id].ArtifactType {
return false
}
}
return true
return policyContext, nil
}
func descriptorsEqual(desc1, desc2 []ispec.Descriptor) bool {
if len(desc1) != len(desc2) {
return false
func getSupportedMediaType() []string {
return []string{
ispec.MediaTypeImageIndex,
ispec.MediaTypeImageManifest,
manifest.DockerV2ListMediaType,
manifest.DockerV2Schema2MediaType,
}
for id, desc := range desc1 {
if !descriptorEqual(desc, desc2[id]) {
return false
}
}
return true
}
func descriptorEqual(desc1, desc2 ispec.Descriptor) bool {
if desc1.Size == desc2.Size &&
desc1.Digest == desc2.Digest &&
desc1.MediaType == desc2.MediaType &&
desc1.Annotations[static.SignatureAnnotationKey] == desc2.Annotations[static.SignatureAnnotationKey] {
return true
}
return false
}
func isSupportedMediaType(mediaType string) bool {
return mediaType == ispec.MediaTypeImageIndex ||
mediaType == ispec.MediaTypeImageManifest ||
mediaType == manifest.DockerV2ListMediaType ||
mediaType == manifest.DockerV2Schema2MediaType
}
func getImageRefManifest(ctx context.Context, upstreamCtx *types.SystemContext, imageRef types.ImageReference,
log log.Logger,
) ([]byte, string, error) {
imageSource, err := imageRef.NewImageSource(ctx, upstreamCtx)
if err != nil {
log.Error().Err(err).Str("image", imageRef.DockerReference().String()).
Msg("couldn't get upstream image manifest details")
return []byte{}, "", err
}
defer imageSource.Close()
manifestBuf, mediaType, err := imageSource.GetManifest(ctx, nil)
if err != nil {
log.Error().Err(err).Str("image", imageRef.DockerReference().String()).
Msg("couldn't get upstream image manifest mediaType")
return []byte{}, "", err
}
return manifestBuf, mediaType, nil
}
func syncImageWithRefs(ctx context.Context, localRepo, upstreamRepo, reference string,
upstreamImageRef types.ImageReference, utils syncContextUtils, sig *signaturesCopier,
localCachePath string, log log.Logger,
) (bool, error) {
var skipped bool
imageStore := sig.storeController.GetImageStore(localRepo)
manifestBuf, mediaType, err := getImageRefManifest(ctx, utils.upstreamCtx, upstreamImageRef, log)
if err != nil {
return skipped, err
}
upstreamImageDigest := godigest.FromBytes(manifestBuf)
if !isSupportedMediaType(mediaType) {
return skipped, nil
}
// get upstream signatures
cosignManifest, err := sig.getCosignManifest(upstreamRepo, upstreamImageDigest.String())
if err != nil {
log.Error().Err(err).Str("image", upstreamImageRef.DockerReference().String()).
Msg("couldn't get upstream imagecosign manifest")
}
index, err := sig.getOCIRefs(upstreamRepo, upstreamImageDigest.String())
if err != nil {
log.Error().Err(err).Str("image", upstreamImageRef.DockerReference().String()).
Msg("couldn't get upstream image OCI references")
}
// check if upstream image is signed
if cosignManifest == nil && len(getNotationManifestsFromOCIRefs(index)) == 0 {
// upstream image not signed
if utils.enforceSignatures {
// skip unsigned images
log.Info().Str("image", upstreamImageRef.DockerReference().String()).
Msg("skipping image without signature")
skipped = true
return skipped, nil
}
}
skipImage, err := canSkipImage(localRepo, upstreamImageDigest.String(), upstreamImageDigest, imageStore, log)
if err != nil {
log.Error().Err(err).Str("image", upstreamImageRef.DockerReference().String()).
Msg("couldn't check if the upstream image can be skipped")
}
if !skipImage {
// sync image
localImageRef, err := getLocalImageRef(localCachePath, localRepo, reference)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("reference", localCachePath+"/"+localRepo+":"+reference).
Msg("couldn't obtain a valid image reference for reference")
return skipped, err
}
log.Info().Str("image", upstreamImageRef.DockerReference().String()).
Str("path", localCachePath).Msg("copying image to path")
if err = retry.RetryIfNecessary(ctx, func() error {
_, err = copy.Image(ctx, utils.policyCtx, localImageRef, upstreamImageRef, &utils.copyOptions)
return err
}, utils.retryOptions); err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("image", upstreamImageRef.DockerReference().String()).Str("path", localCachePath).
Msg("error while copying image to path")
return skipped, err
}
// push from cache to repo
err = pushSyncedLocalImage(localRepo, reference, localCachePath, sig.repoDB, imageStore, log)
if err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("image", localCachePath+"/"+localRepo+":"+reference).Msg("error while pushing synced cached image")
return skipped, err
}
} else {
log.Info().Str("image", upstreamImageRef.DockerReference().String()).
Msg("already synced image, checking its signatures")
}
// sync signatures
if err = retry.RetryIfNecessary(ctx, func() error {
err = sig.syncOCIRefs(localRepo, upstreamRepo, upstreamImageDigest.String(), index)
if err != nil {
return err
}
refs, err := sig.getORASRefs(upstreamRepo, upstreamImageDigest.String())
if err != nil && !errors.Is(err, zerr.ErrSyncReferrerNotFound) {
return err
}
err = sig.syncORASRefs(localRepo, upstreamRepo, upstreamImageDigest.String(), refs)
if err != nil {
return err
}
err = sig.syncCosignSignature(localRepo, upstreamRepo, upstreamImageDigest.String(), cosignManifest)
if err != nil {
return err
}
return nil
}, utils.retryOptions); err != nil {
log.Error().Str("errorType", common.TypeOf(err)).Err(err).
Str("image", upstreamImageRef.DockerReference().String()).Msg("couldn't copy referrer for image")
return skipped, err
}
log.Info().Str("image", upstreamImageRef.DockerReference().String()).Msg("successfully synced image")
return skipped, nil
mediaTypes := getSupportedMediaType()
for _, m := range mediaTypes {
if m == mediaType {
return true
}
}
return false
}

View file

@ -1503,8 +1503,9 @@ func (is *ImageStoreLocal) garbageCollect(dir string, repo string) error {
case ispec.MediaTypeImageManifest:
tag, ok := desc.Annotations[ispec.AnnotationRefName]
if ok {
// gather cosign signatures
if strings.HasPrefix(tag, "sha256-") && strings.HasSuffix(tag, remote.SignatureTagSuffix) {
// gather cosign references
if strings.HasPrefix(tag, "sha256-") && (strings.HasSuffix(tag, remote.SignatureTagSuffix) ||
strings.HasSuffix(tag, remote.SBOMTagSuffix)) {
cosignDescriptors = append(cosignDescriptors, desc)
continue
@ -1536,9 +1537,9 @@ func (is *ImageStoreLocal) garbageCollect(dir string, repo string) error {
return err
}
is.log.Info().Msg("gc: cosign signatures")
is.log.Info().Msg("gc: cosign references")
if err := gcCosignSignatures(is, oci, &index, repo, cosignDescriptors); err != nil {
if err := gcCosignReferences(is, oci, &index, repo, cosignDescriptors); err != nil {
return err
}
@ -1628,23 +1629,30 @@ func gcUntaggedManifests(imgStore *ImageStoreLocal, oci casext.Engine, index *is
return nil
}
func gcCosignSignatures(imgStore *ImageStoreLocal, oci casext.Engine, index *ispec.Index, repo string,
func gcCosignReferences(imgStore *ImageStoreLocal, oci casext.Engine, index *ispec.Index, repo string,
cosignDescriptors []ispec.Descriptor,
) error {
for _, cosignDesc := range cosignDescriptors {
foundSubject := false
// check if we can find the manifest which the signature points to
// check if we can find the manifest which the reference points to
for _, desc := range index.Manifests {
// signature
subject := fmt.Sprintf("sha256-%s.%s", desc.Digest.Encoded(), remote.SignatureTagSuffix)
if subject == cosignDesc.Annotations[ispec.AnnotationRefName] {
foundSubject = true
}
// sbom
subject = fmt.Sprintf("sha256-%s.%s", desc.Digest.Encoded(), remote.SBOMTagSuffix)
if subject == cosignDesc.Annotations[ispec.AnnotationRefName] {
foundSubject = true
}
}
if !foundSubject {
// remove manifest
imgStore.log.Info().Str("repository", repo).Str("digest", cosignDesc.Digest.String()).
Msg("gc: removing cosign signature without subject")
Msg("gc: removing cosign reference without subject")
// no need to check for manifest conflict, if one doesn't have a subject, then none with same digest will have
_, _ = common.RemoveManifestDescByReference(index, cosignDesc.Digest.String(), false)

View file

@ -258,7 +258,7 @@ type Controller interface {
type ControllerManager struct {
controller Controller
// used to stop background tasks(goroutines) - task scheduler
// used to stop background tasks(goroutines)
cancelRoutinesFunc context.CancelFunc
}
@ -283,7 +283,7 @@ func (cm *ControllerManager) StartServer() {
}
func (cm *ControllerManager) StopServer() {
// stop background tasks - task scheduler
// stop background tasks
if cm.cancelRoutinesFunc != nil {
cm.cancelRoutinesFunc()
}

View file

@ -39,17 +39,33 @@ function zot_stop() {
pkill zot
}
function wait_str() {
local filepath="$1"
local search_term="$2"
local wait_time="${3:-2m}"
(timeout $wait_time tail -F -n0 "$filepath" &) | grep -q "$search_term" && return 0
echo "timeout of $wait_time reached. unable to find '$search_term' in '$filepath'"
return 1
}
function wait_for_string() {
string=$1
filepath=$2
local search_term="$1"
local filepath="$2"
local wait_time="${3:-2m}"
while [ ! -f $filepath ]
do sleep 2;
done
wait_file "$filepath" 60 || { echo "server log file missing: '$filepath'"; return 1; }
while ! grep "${string}" $filepath
do sleep 10;
done
wait_str "$filepath" "$search_term" "$wait_time"
}
function wait_file() {
local file="$1"; shift
local wait_seconds="${1:-60}"; shift
until test $((wait_seconds--)) -eq 0 -o -f "$file" ; do sleep 1; done
}
function wait_zot_reachable() {

View file

@ -3,7 +3,9 @@ TEST_DATA_DIR=${ROOT_DIR}/test/data/
OS="${OS:-linux}"
ARCH="${ARCH:-amd64}"
ZOT_PATH=${ROOT_DIR}/bin/zot-${OS}-${ARCH}
ZOT_MINIMAL_PATH=${ROOT_DIR}/bin/zot-${OS}-${ARCH}-minimal
ZB_PATH=${ROOT_DIR}/bin/zb-${OS}-${ARCH}
mkdir -p ${TEST_DATA_DIR}
@ -18,6 +20,11 @@ function verify_prerequisites {
return 1
fi
if [ ! -f ${ZB_PATH} ]; then
echo "you need to build ${ZB_PATH} before running the tests" >&3
return 1
fi
if [ ! -f ${ZOT_MINIMAL_PATH} ]; then
echo "you need to build ${ZOT_MINIMAL_PATH} before running tests" >&3
return 1
@ -45,9 +52,44 @@ function zot_serve() {
local config_file=${2}
local pid_dir=${3}
${zot_path} serve ${config_file} &
echo $! >>${pid_dir}/zot.pid
}
function zb_run() {
local zot_address=${1}
${ZB_PATH} -c 10 -n 100 -o stdout ${zot_address} --skip-cleanup
}
function wait_str() {
local filepath="$1"
local search_term="$2"
local wait_time="${3:-2m}"
(timeout $wait_time tail -F -n0 "$filepath" &) | grep -q "$search_term" && return 0
echo "timeout of $wait_time reached. unable to find '$search_term' in '$filepath'"
return 1
}
function wait_for_string() {
local search_term="$1"
local filepath="$2"
local wait_time="${3:-2m}"
wait_file "$filepath" 60 || { echo "server log file missing: '$filepath'"; return 1; }
wait_str "$filepath" "$search_term" "$wait_time"
}
function wait_file() {
local file="$1"; shift
local wait_seconds="${1:-60}"; shift
until test $((wait_seconds--)) -eq 0 -o -f "$file" ; do sleep 1; done
}
function zot_stop() {
local pid_dir=${1}
cat ${pid_dir}/zot.pid

View file

@ -223,7 +223,7 @@ function teardown_file() {
# attach signature
echo "{\"artifact\": \"\", \"signature\": \"pat hancock\"}" > signature.json
start=`date +%s`
run oras attach --plain-http 127.0.0.1:8080/golang:1.20 --artifact-type 'signature/example' ./signature.json:application/json
run oras attach --plain-http 127.0.0.1:8080/golang:1.20 --image-spec v1.1-image --artifact-type 'signature/example' ./signature.json:application/json
[ "$status" -eq 0 ]
end=`date +%s`
runtime=$((end-start))
@ -232,7 +232,7 @@ function teardown_file() {
# attach sbom
echo "{\"version\": \"0.0.0.0\", \"artifact\": \"'127.0.0.1:8080/golang:1.20'\", \"contents\": \"good\"}" > sbom.json
start=`date +%s`
run oras attach --plain-http 127.0.0.1:8080/golang:1.20 --artifact-type 'sbom/example' ./sbom.json:application/json
run oras attach --plain-http 127.0.0.1:8080/golang:1.20 --image-spec v1.1-image --artifact-type 'sbom/example' ./sbom.json:application/json
[ "$status" -eq 0 ]
end=`date +%s`
runtime=$((end-start))

View file

@ -113,7 +113,9 @@ function teardown_file() {
wait_zot_reachable "http://127.0.0.1:8080/v2/"
start=`date +%s`
echo "waiting for restoring blobs task to finish" >&3
wait_for_string "dedupe rebuild: finished" ${ZOT_LOG_FILE}
run wait_for_string "dedupe rebuild: finished" ${ZOT_LOG_FILE} "5m"
[ "$status" -eq 0 ]
end=`date +%s`
runtime=$((end-start))

View file

@ -46,7 +46,7 @@ function setup_file() {
],
"onDemand": false,
"tlsVerify": false,
"PollInterval": "5s",
"PollInterval": "1s",
"content": [
{
"prefix": "**"
@ -282,7 +282,7 @@ EOF
@test "sync signatures periodically" {
# wait for signatures to be copied
run sleep 5s
run sleep 15s
run notation verify --plain-http localhost:8081/golang:1.20
[ "$status" -eq 0 ]
@ -312,7 +312,7 @@ EOF
@test "sync oras artifact periodically" {
# # wait for oras artifact to be copied
run sleep 5s
run sleep 15s
run oras pull --plain-http 127.0.0.1:8081/hello-artifact:v2 -d -v
[ "$status" -eq 0 ]
grep -q "hello world" artifact.txt
@ -337,7 +337,7 @@ EOF
@test "sync helm chart periodically" {
# wait for helm chart to be copied
run sleep 5s
run sleep 15s
local chart_version=$(awk '/version/{printf $2}' ${BATS_FILE_TMPDIR}/helm-charts/charts/zot/Chart.yaml)
run helm pull oci://localhost:8081/zot-chart/zot --version ${chart_version}
@ -364,7 +364,7 @@ EOF
@test "sync OCI artifact (oci image mediatype) periodically" {
# wait for helm chart to be copied
run sleep 5s
run sleep 15s
run regctl manifest get localhost:8081/artifact:demo
[ "$status" -eq 0 ]
run regctl artifact get localhost:8081/artifact:demo
@ -389,7 +389,7 @@ EOF
@test "sync OCI artifact (oci artifact mediatype) periodically" {
# wait for helm chart to be copied
run sleep 5s
run sleep 15s
run regctl manifest get localhost:8081/newartifact:demo
[ "$status" -eq 0 ]
run regctl artifact get localhost:8081/newartifact:demo
@ -433,7 +433,7 @@ EOF
@test "sync OCI artifact references periodically" {
# wait for OCI artifacts to be copied
run sleep 5
run sleep 20
run regctl artifact get localhost:8081/manifest-ref:demo
[ "$status" -eq 0 ]
[ "${lines[-1]}" == "test artifact" ]

View file

@ -23,19 +23,79 @@ function setup_file() {
"port": "8090"
},
"log": {
"level": "debug"
"level": "debug",
"output": "/tmp/blackbox.log"
},
"extensions": {
"sync": {
"registries": [
{
"urls": [
"https://docker.io/library",
"https://registry.k8s.io",
"https://aws.amazon.com/ecr",
"https://gcr.io",
"https://docker.io/library"
],
"content": [
{
"prefix": "registry"
},
{
"prefix": "archlinux"
}
],
"onDemand": true,
"tlsVerify": true
},
{
"urls": [
"https://registry.k8s.io"
],
"content": [
{
"prefix": "kube-apiserver"
},
{
"prefix": "pause"
},
{
"prefix": "kube-apiserver-amd64"
}
],
"onDemand": true,
"tlsVerify": true
},
{
"urls": [
"https://public.ecr.aws"
],
"content": [
{
"prefix": "amazonlinux/amazonlinux"
}
],
"onDemand": true,
"tlsVerify": true
},
{
"urls": [
"https://gcr.io"
],
"content": [
{
"prefix": "google-containers/kube-proxy-amd64"
}
],
"onDemand": true,
"tlsVerify": true
},
{
"urls": [
"https://mcr.microsoft.com"
],
"content": [
{
"prefix": "azure-cognitive-services/vision/spatial-analysis/diagnostics"
}
],
"onDemand": true,
"tlsVerify": true
}
@ -80,7 +140,35 @@ function teardown_file() {
run curl http://127.0.0.1:8090/v2/_catalog
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.repositories[0]') = '"archlinux"' ]
run curl http://127.0.0.1:8090/v2/registry/tags/list
run curl http://127.0.0.1:8090/v2/archlinux/tags/list
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.tags[]') = '"latest"' ]
}
@test "sync k8s image list on demand" {
run skopeo --insecure-policy copy --multi-arch=all --src-tls-verify=false \
docker://127.0.0.1:8090/kube-apiserver:v1.26.0 \
oci:${TEST_DATA_DIR}
[ "$status" -eq 0 ]
run curl http://127.0.0.1:8090/v2/_catalog
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.repositories[1]') = '"kube-apiserver"' ]
run curl http://127.0.0.1:8090/v2/kube-apiserver/tags/list
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.tags[]') = '"v1.26.0"' ]
}
@test "sync k8s image on demand" {
run skopeo --insecure-policy copy --src-tls-verify=false \
docker://127.0.0.1:8090/pause \
oci:${TEST_DATA_DIR}
[ "$status" -eq 0 ]
run curl http://127.0.0.1:8090/v2/_catalog
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.repositories[2]') = '"pause"' ]
run curl http://127.0.0.1:8090/v2/pause/tags/list
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.tags[]') = '"latest"' ]
}
@ -98,13 +186,13 @@ function teardown_file() {
}
@test "sync image on demand from aws.amazon.com/ecr" {
run skopeo copy docker://127.0.0.1:8090/amazonlinux:latest oci:${TEST_DATA_DIR} --src-tls-verify=false
run skopeo copy docker://127.0.0.1:8090/amazonlinux/amazonlinux:latest oci:${TEST_DATA_DIR} --src-tls-verify=false
[ "$status" -eq 0 ]
run curl http://127.0.0.1:8090/v2/_catalog
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}"| jq '.repositories | map(select(. == "amazonlinux"))' | jq '.[]') = '"amazonlinux"' ]
run curl http://127.0.0.1:8090/v2/amazonlinux/tags/list
[ $(echo "${lines[-1]}"| jq '.repositories | map(select(. == "amazonlinux/amazonlinux"))' | jq '.[]') = '"amazonlinux/amazonlinux"' ]
run curl http://127.0.0.1:8090/v2/amazonlinux/amazonlinux/tags/list
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.tags[]') = '"latest"' ]
}
@ -166,13 +254,13 @@ function teardown_file() {
}
@test "run docker with image synced from aws.amazon.com/ecr" {
run docker run -d 127.0.0.1:8090/amazonlinux:latest
run docker run -d 127.0.0.1:8090/amazonlinux/amazonlinux:latest
[ "$status" -eq 0 ]
run curl http://127.0.0.1:8090/v2/_catalog
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}"| jq '.repositories | map(select(. == "amazonlinux"))' | jq '.[]') = '"amazonlinux"' ]
run curl http://127.0.0.1:8090/v2/amazonlinux/tags/list
[ $(echo "${lines[-1]}"| jq '.repositories | map(select(. == "amazonlinux/amazonlinux"))' | jq '.[]') = '"amazonlinux/amazonlinux"' ]
run curl http://127.0.0.1:8090/v2/amazonlinux/amazonlinux/tags/list
[ "$status" -eq 0 ]
[ $(echo "${lines[-1]}" | jq '.tags[]') = '"latest"' ]
@ -206,4 +294,3 @@ function teardown_file() {
run docker kill $(docker ps -q)
}

View file

@ -0,0 +1,120 @@
load helpers_sync
function setup_file() {
# Verify prerequisites are available
if ! verify_prerequisites; then
exit 1
fi
# Setup zot server
local zot_sync_per_root_dir=${BATS_FILE_TMPDIR}/zot-per
local zot_sync_per_config_file=${BATS_FILE_TMPDIR}/zot_sync_per_config.json
local zot_sync_ondemand_config_file=${BATS_FILE_TMPDIR}/zot_sync_ondemand_config.json
local zot_minimal_root_dir=${BATS_FILE_TMPDIR}/zot-minimal
local zot_minimal_config_file=${BATS_FILE_TMPDIR}/zot_minimal_config.json
local oci_data_dir=${BATS_FILE_TMPDIR}/oci
mkdir -p ${zot_sync_per_root_dir}
mkdir -p ${zot_minimal_root_dir}
mkdir -p ${oci_data_dir}
local ZOT_LOG_FILE=${zot_sync_per_root_dir}/zot.log
cat >${zot_sync_per_config_file} <<EOF
{
"distSpecVersion": "1.1.0",
"storage": {
"rootDirectory": "${zot_sync_per_root_dir}"
},
"http": {
"address": "0.0.0.0",
"port": "8081"
},
"log": {
"level": "debug",
"output": "${ZOT_LOG_FILE}"
},
"extensions": {
"sync": {
"registries": [
{
"urls": [
"http://localhost:8080"
],
"onDemand": false,
"tlsVerify": false,
"PollInterval": "5m",
"content": [
{
"prefix": "**"
}
]
}
]
}
}
}
EOF
cat >${zot_minimal_config_file} <<EOF
{
"distSpecVersion": "1.1.0",
"storage": {
"rootDirectory": "${zot_minimal_root_dir}"
},
"http": {
"address": "0.0.0.0",
"port": "8080"
},
"log": {
"level": "debug",
"output": "${zot_minimal_root_dir}/zot.log"
}
}
EOF
setup_zot_minimal_file_level ${zot_minimal_config_file}
wait_zot_reachable "http://127.0.0.1:8080/v2/_catalog"
}
function teardown_file() {
local zot_sync_per_root_dir=${BATS_FILE_TMPDIR}/zot-per
local oci_data_dir=${BATS_FILE_TMPDIR}/oci
local zot_minimal_root_dir=${BATS_FILE_TMPDIR}/zot-minimal
teardown_zot_file_level
rm -rf ${zot_sync_per_root_dir}
rm -rf ${zot_minimal_root_dir}
rm -rf ${oci_data_dir}
}
# sync zb images
@test "run zb benchmark and let zot sync all repos" {
local zot_sync_per_root_dir=${BATS_FILE_TMPDIR}/zot-per
local zot_sync_per_config_file=${BATS_FILE_TMPDIR}/zot_sync_per_config.json
local zot_minimal_root_dir=${BATS_FILE_TMPDIR}/zot-minimal
local ZOT_LOG_FILE=${zot_sync_per_root_dir}/zot.log
zb_run "http://127.0.0.1:8080"
# start zot sync server
setup_zot_file_level ${zot_sync_per_config_file}
wait_zot_reachable "http://127.0.0.1:8081/v2/_catalog"
start=`date +%s`
echo "waiting for sync to finish" >&3
run wait_for_string "sync: finished syncing all repos" ${ZOT_LOG_FILE} "3m"
[ "$status" -eq 0 ]
end=`date +%s`
runtime=$((end-start))
echo "sync finished in $runtime sec" >&3
sleep 10 # wait a bit more because sync runs in background.
# diff, but exclude log files, .sync subdirs and cache.db
run diff -r -x "*.db" -x ".sync" -x "*.log" ${zot_sync_per_root_dir} ${zot_minimal_root_dir}
[ "$status" -eq 0 ]
}