0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-30 22:34:13 -05:00

Merge pull request from GHSA-55r9-5mx9-qq7r

when a client pushes an image zot's inline dedupe
will try to find the blob path corresponding with the blob digest
that it's currently pushed and if it's found in the cache
then zot will make a symbolic link to that cache entry and report
to the client that the blob already exists on the location.

Before this patch authorization was not applied on this process meaning
that a user could copy blobs without having permissions on the source repo.

Added a rule which says that the client should have read permissions on the source repo
before deduping, otherwise just Stat() the blob and return the corresponding status code.

Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
Co-authored-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
Ramkumar Chinchani 2024-07-08 11:35:44 -07:00 committed by GitHub
parent 002ff05f6e
commit aaee0220e4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 492 additions and 10 deletions

View file

@ -5142,6 +5142,122 @@ func TestGetUsername(t *testing.T) {
})
}
func TestAuthorizationMountBlob(t *testing.T) {
Convey("Make a new controller", t, func() {
port := test.GetFreePort()
baseURL := test.GetBaseURL(port)
conf := config.New()
conf.HTTP.Port = port
// have two users: one for user Policy, and another for default policy
username1, _ := test.GenerateRandomString()
password1, _ := test.GenerateRandomString()
username2, _ := test.GenerateRandomString()
password2, _ := test.GenerateRandomString()
username1 = strings.ToLower(username1)
username2 = strings.ToLower(username2)
content := test.GetCredString(username1, password1) + test.GetCredString(username2, password2)
htpasswdPath := test.MakeHtpasswdFileFromString(content)
defer os.Remove(htpasswdPath)
conf.HTTP.Auth = &config.AuthConfig{
HTPasswd: config.AuthHTPasswd{
Path: htpasswdPath,
},
}
user1Repo := fmt.Sprintf("%s/**", username1)
user2Repo := fmt.Sprintf("%s/**", username2)
// config with all policy types, to test that the correct one is applied in each case
conf.HTTP.AccessControl = &config.AccessControlConfig{
Repositories: config.Repositories{
user1Repo: config.PolicyGroup{
Policies: []config.Policy{
{
Users: []string{username1},
Actions: []string{
constants.ReadPermission,
constants.CreatePermission,
},
},
},
},
user2Repo: config.PolicyGroup{
Policies: []config.Policy{
{
Users: []string{username2},
Actions: []string{
constants.ReadPermission,
constants.CreatePermission,
},
},
},
},
},
}
dir := t.TempDir()
ctlr := api.NewController(conf)
ctlr.Config.Storage.RootDirectory = dir
cm := test.NewControllerManager(ctlr)
cm.StartAndWait(port)
defer cm.StopServer()
userClient1 := resty.New()
userClient1.SetBasicAuth(username1, password1)
userClient2 := resty.New()
userClient2.SetBasicAuth(username2, password2)
img := CreateImageWith().RandomLayers(1, 2).DefaultConfig().Build()
repoName1 := username1 + "/" + "myrepo"
tag := "1.0"
// upload image with user1 on repoName1
err := UploadImageWithBasicAuth(img, baseURL, repoName1, tag, username1, password1)
So(err, ShouldBeNil)
repoName2 := username2 + "/" + "myrepo"
blobDigest := img.Manifest.Layers[0].Digest
/* a HEAD request by user2 on blob digest (found in user1Repo) should return 404
because user2 doesn't have permissions to read user1Repo */
resp, err := userClient2.R().Head(baseURL + fmt.Sprintf("/v2/%s/blobs/%s", repoName2, blobDigest))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
params := make(map[string]string)
params["mount"] = blobDigest.String()
// trying to mount a blob which can be found in cache, but user doesn't have permission
// should return 202 instead of 201
resp, err = userClient2.R().SetQueryParams(params).Post(baseURL + "/v2/" + repoName2 + "/blobs/uploads/")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusAccepted)
/* a HEAD request by user1 on blob digest (found in user1Repo) should return 200
because user1 has permission to read user1Repo */
resp, err = userClient1.R().Head(baseURL + fmt.Sprintf("/v2/%s/blobs/%s", username1+"/"+"mysecondrepo", blobDigest))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusOK)
// user2 can upload without dedupe
err = UploadImageWithBasicAuth(img, baseURL, repoName2, tag, username2, password2)
So(err, ShouldBeNil)
// trying to mount a blob which can be found in cache and user has permission should return 201 instead of 202
resp, err = userClient2.R().SetQueryParams(params).Post(baseURL + "/v2/" + repoName2 + "/blobs/uploads/")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, http.StatusCreated)
})
}
func TestAuthorizationWithOnlyAnonymousPolicy(t *testing.T) {
Convey("Make a new controller", t, func() {
const TestRepo = "my-repos/repo"

View file

@ -873,6 +873,37 @@ func (rh *RouteHandler) DeleteManifest(response http.ResponseWriter, request *ht
response.WriteHeader(http.StatusAccepted)
}
// canMount checks if a user has read permission on cached blobs with this specific digest.
// returns true if the user have permission to copy blob from cache.
func canMount(userAc *reqCtx.UserAccessControl, imgStore storageTypes.ImageStore, digest godigest.Digest,
) (bool, error) {
canMount := true
// authz enabled
if userAc != nil {
canMount = false
repos, err := imgStore.GetAllDedupeReposCandidates(digest)
if err != nil {
// first write
return false, err
}
if len(repos) == 0 {
canMount = false
}
// check if user can read any repo which contain this blob
for _, repo := range repos {
if userAc.Can(constants.ReadPermission, repo) {
canMount = true
}
}
}
return canMount, nil
}
// CheckBlob godoc
// @Summary Check image blob/layer
// @Description Check an image's blob/layer given a digest
@ -905,7 +936,31 @@ func (rh *RouteHandler) CheckBlob(response http.ResponseWriter, request *http.Re
digest := godigest.Digest(digestStr)
ok, blen, err := imgStore.CheckBlob(name, digest)
userAc, err := reqCtx.UserAcFromContext(request.Context())
if err != nil {
response.WriteHeader(http.StatusInternalServerError)
return
}
userCanMount, err := canMount(userAc, imgStore, digest)
if err != nil {
rh.c.Log.Error().Err(err).Msg("unexpected error")
}
var blen int64
if userCanMount {
ok, blen, err = imgStore.CheckBlob(name, digest)
} else {
var lockLatency time.Time
imgStore.RLock(&lockLatency)
defer imgStore.RUnlock(&lockLatency)
ok, blen, _, err = imgStore.StatBlob(name, digest)
}
if err != nil {
details := zerr.GetDetails(err)
if errors.Is(err, zerr.ErrBadBlobDigest) { //nolint:gocritic // errorslint conflicts with gocritic:IfElseChain
@ -1191,11 +1246,27 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request *
}
mountDigest := godigest.Digest(mountDigests[0])
userAc, err := reqCtx.UserAcFromContext(request.Context())
if err != nil {
response.WriteHeader(http.StatusInternalServerError)
return
}
userCanMount, err := canMount(userAc, imgStore, mountDigest)
if err != nil {
rh.c.Log.Error().Err(err).Msg("unexpected error")
}
// zot does not support cross mounting directly and do a workaround creating using hard link.
// check blob looks for actual path (name+mountDigests[0]) first then look for cache and
// if found in cache, will do hard link and if fails we will start new upload.
_, _, err := imgStore.CheckBlob(name, mountDigest)
if err != nil {
if userCanMount {
_, _, err = imgStore.CheckBlob(name, mountDigest)
}
if err != nil || !userCanMount {
upload, err := imgStore.NewBlobUpload(name)
if err != nil {
details := zerr.GetDetails(err)

View file

@ -174,6 +174,57 @@ func (d *BoltDBDriver) PutBlob(digest godigest.Digest, path string) error {
return nil
}
func (d *BoltDBDriver) GetAllBlobs(digest godigest.Digest) ([]string, error) {
var blobPath strings.Builder
blobPaths := []string{}
if err := d.db.View(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(constants.BlobsCache))
if root == nil {
// this is a serious failure
err := zerr.ErrCacheRootBucket
d.log.Error().Err(err).Msg("failed to access root bucket")
return err
}
bucket := root.Bucket([]byte(digest.String()))
if bucket != nil {
origin := bucket.Bucket([]byte(constants.OriginalBucket))
blobPath.Write(d.getOne(origin))
originBlob := blobPath.String()
blobPaths = append(blobPaths, originBlob)
deduped := bucket.Bucket([]byte(constants.DuplicatesBucket))
if deduped != nil {
cursor := deduped.Cursor()
for k, _ := cursor.First(); k != nil; k, _ = cursor.Next() {
var blobPath strings.Builder
blobPath.Write(k)
duplicateBlob := blobPath.String()
if duplicateBlob != originBlob {
blobPaths = append(blobPaths, duplicateBlob)
}
}
return nil
}
}
return zerr.ErrCacheMiss
}); err != nil {
return nil, err
}
return blobPaths, nil
}
func (d *BoltDBDriver) GetBlob(digest godigest.Digest) (string, error) {
var blobPath strings.Builder

View file

@ -122,4 +122,47 @@ func TestBoltDBCache(t *testing.T) {
So(err, ShouldNotBeNil)
So(val, ShouldBeEmpty)
})
Convey("Test cache.GetAllBlos()", t, func() {
dir := t.TempDir()
log := log.NewLogger("debug", "")
So(log, ShouldNotBeNil)
_, err := storage.Create("boltdb", "failTypeAssertion", log)
So(err, ShouldNotBeNil)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{dir, "cache_test", false}, log)
So(cacheDriver, ShouldNotBeNil)
err = cacheDriver.PutBlob("digest", "first")
So(err, ShouldBeNil)
err = cacheDriver.PutBlob("digest", "second")
So(err, ShouldBeNil)
err = cacheDriver.PutBlob("digest", "third")
So(err, ShouldBeNil)
blobs, err := cacheDriver.GetAllBlobs("digest")
So(err, ShouldBeNil)
So(blobs, ShouldResemble, []string{"first", "second", "third"})
err = cacheDriver.DeleteBlob("digest", "first")
So(err, ShouldBeNil)
blobs, err = cacheDriver.GetAllBlobs("digest")
So(err, ShouldBeNil)
So(blobs, ShouldResemble, []string{"second", "third"})
err = cacheDriver.DeleteBlob("digest", "third")
So(err, ShouldBeNil)
blobs, err = cacheDriver.GetAllBlobs("digest")
So(err, ShouldBeNil)
So(blobs, ShouldResemble, []string{"second"})
})
}

View file

@ -11,6 +11,8 @@ type Cache interface {
// Retrieves the blob matching provided digest.
GetBlob(digest godigest.Digest) (string, error)
GetAllBlobs(digest godigest.Digest) ([]string, error)
// Uploads blob to cachedb.
PutBlob(digest godigest.Digest, path string) error

View file

@ -141,6 +141,42 @@ func (d *DynamoDBDriver) GetBlob(digest godigest.Digest) (string, error) {
return out.OriginalBlobPath, nil
}
func (d *DynamoDBDriver) GetAllBlobs(digest godigest.Digest) ([]string, error) {
blobPaths := []string{}
resp, err := d.client.GetItem(context.TODO(), &dynamodb.GetItemInput{
TableName: aws.String(d.tableName),
Key: map[string]types.AttributeValue{
"Digest": &types.AttributeValueMemberS{Value: digest.String()},
},
})
if err != nil {
d.log.Error().Err(err).Str("tableName", d.tableName).Msg("failed to get blob")
return nil, err
}
out := Blob{}
if resp.Item == nil {
d.log.Debug().Err(zerr.ErrCacheMiss).Str("digest", string(digest)).Msg("failed to find blob in cache")
return nil, zerr.ErrCacheMiss
}
_ = attributevalue.UnmarshalMap(resp.Item, &out)
blobPaths = append(blobPaths, out.OriginalBlobPath)
for _, item := range out.DuplicateBlobPath {
if item != out.OriginalBlobPath {
blobPaths = append(blobPaths, item)
}
}
return blobPaths, nil
}
func (d *DynamoDBDriver) PutBlob(digest godigest.Digest, path string) error {
if path == "" {
d.log.Error().Err(zerr.ErrEmptyValue).Str("digest", digest.String()).

View file

@ -144,6 +144,48 @@ func TestDynamoDB(t *testing.T) {
So(err, ShouldNotBeNil)
So(val, ShouldBeEmpty)
})
Convey("Test dynamoDB", t, func(c C) {
log := log.NewLogger("debug", "")
cacheDriver, err := storage.Create("dynamodb", cache.DynamoDBDriverParameters{
Endpoint: os.Getenv("DYNAMODBMOCK_ENDPOINT"),
TableName: "BlobTable",
Region: "us-east-2",
}, log)
So(cacheDriver, ShouldNotBeNil)
So(err, ShouldBeNil)
err = cacheDriver.PutBlob("digest", "first")
So(err, ShouldBeNil)
err = cacheDriver.PutBlob("digest", "second")
So(err, ShouldBeNil)
err = cacheDriver.PutBlob("digest", "third")
So(err, ShouldBeNil)
blobs, err := cacheDriver.GetAllBlobs("digest")
So(err, ShouldBeNil)
So(blobs, ShouldResemble, []string{"first", "second", "third"})
err = cacheDriver.DeleteBlob("digest", "first")
So(err, ShouldBeNil)
blobs, err = cacheDriver.GetAllBlobs("digest")
So(err, ShouldBeNil)
So(blobs, ShouldResemble, []string{"second", "third"})
err = cacheDriver.DeleteBlob("digest", "third")
So(err, ShouldBeNil)
blobs, err = cacheDriver.GetAllBlobs("digest")
So(err, ShouldBeNil)
So(blobs, ShouldResemble, []string{"second"})
})
}
func TestDynamoDBError(t *testing.T) {

View file

@ -1114,6 +1114,37 @@ func (is *ImageStore) BlobPath(repo string, digest godigest.Digest) string {
return path.Join(is.rootDir, repo, "blobs", digest.Algorithm().String(), digest.Encoded())
}
func (is *ImageStore) GetAllDedupeReposCandidates(digest godigest.Digest) ([]string, error) {
var lockLatency time.Time
if err := digest.Validate(); err != nil {
return nil, err
}
is.RLock(&lockLatency)
defer is.RUnlock(&lockLatency)
blobsPaths, err := is.cache.GetAllBlobs(digest)
if err != nil {
return nil, err
}
repos := []string{}
for _, blobPath := range blobsPaths {
// these can be both full paths or relative paths depending on the cache options
if !is.cache.UsesRelativePaths() && path.IsAbs(blobPath) {
blobPath, _ = filepath.Rel(is.rootDir, blobPath)
}
blobsDirIndex := strings.LastIndex(blobPath, "/blobs/")
repos = append(repos, blobPath[:blobsDirIndex])
}
return repos, nil
}
/*
CheckBlob verifies a blob and returns true if the blob is correct

View file

@ -10,6 +10,7 @@ import (
"io"
"os"
"path"
"slices"
"strings"
"sync"
"testing"
@ -132,6 +133,75 @@ func TestStorageNew(t *testing.T) {
})
}
func TestGetAllDedupeReposCandidates(t *testing.T) {
for _, testcase := range testCases {
testcase := testcase
t.Run(testcase.testCaseName, func(t *testing.T) {
var imgStore storageTypes.ImageStore
if testcase.storageType == storageConstants.S3StorageDriverName {
tskip.SkipS3(t)
uuid, err := guuid.NewV4()
if err != nil {
panic(err)
}
testDir := path.Join("/oci-repo-test", uuid.String())
tdir := t.TempDir()
var store driver.StorageDriver
store, imgStore, _ = createObjectsStore(testDir, tdir)
defer cleanupStorage(store, testDir)
} else {
dir := t.TempDir()
log := zlog.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
cacheDriver, _ := storage.Create("boltdb", cache.BoltDBDriverParameters{
RootDir: dir,
Name: "cache",
UseRelPaths: true,
}, log)
driver := local.New(true)
imgStore = imagestore.NewImageStore(dir, dir, true, true, log, metrics, nil, driver, cacheDriver)
}
Convey("Push repos with deduped blobs", t, func(c C) {
repoNames := []string{
"first",
"second",
"repo/a",
"repo/a/b/c/d/e/f",
"repo/repo-b/blobs",
"foo/bar/baz",
"blobs/foo/bar/blobs",
"blobs",
"blobs/foo",
}
storeController := storage.StoreController{DefaultStore: imgStore}
image := CreateRandomImage()
for _, repoName := range repoNames {
err := WriteImageToFileSystem(image, repoName, tag, storeController)
So(err, ShouldBeNil)
}
randomBlobDigest := image.Manifest.Layers[0].Digest
repos, err := imgStore.GetAllDedupeReposCandidates(randomBlobDigest)
So(err, ShouldBeNil)
slices.Sort(repoNames)
slices.Sort(repos)
So(repoNames, ShouldResemble, repos)
})
})
}
}
func TestStorageAPIs(t *testing.T) {
for _, testcase := range testCases {
testcase := testcase

View file

@ -63,6 +63,7 @@ type ImageStore interface { //nolint:interfacebloat
GetAllBlobs(repo string) ([]string, error)
PopulateStorageMetrics(interval time.Duration, sch *scheduler.Scheduler)
VerifyBlobDigestValue(repo string, digest godigest.Digest) error
GetAllDedupeReposCandidates(digest godigest.Digest) ([]string, error)
}
type Driver interface { //nolint:interfacebloat

View file

@ -6,6 +6,8 @@ type CacheMock struct {
// Returns the human-readable "name" of the driver.
NameFn func() string
GetAllBlobsFn func(digest godigest.Digest) ([]string, error)
// Retrieves the blob matching provided digest.
GetBlobFn func(digest godigest.Digest) (string, error)
@ -68,3 +70,11 @@ func (cacheMock CacheMock) DeleteBlob(digest godigest.Digest, path string) error
return nil
}
func (cacheMock CacheMock) GetAllBlobs(digest godigest.Digest) ([]string, error) {
if cacheMock.GetAllBlobsFn != nil {
return cacheMock.GetAllBlobsFn(digest)
}
return []string{}, nil
}

View file

@ -57,6 +57,7 @@ type MockedImageStore struct {
PopulateStorageMetricsFn func(interval time.Duration, sch *scheduler.Scheduler)
StatIndexFn func(repo string) (bool, int64, time.Time, error)
VerifyBlobDigestValueFn func(repo string, digest godigest.Digest) error
GetAllDedupeReposCandidatesFn func(digest godigest.Digest) ([]string, error)
}
func (is MockedImageStore) StatIndex(repo string) (bool, int64, time.Time, error) {
@ -419,3 +420,11 @@ func (is MockedImageStore) VerifyBlobDigestValue(repo string, digest godigest.Di
return nil
}
func (is MockedImageStore) GetAllDedupeReposCandidates(digest godigest.Digest) ([]string, error) {
if is.GetAllBlobsFn != nil {
return is.GetAllDedupeReposCandidatesFn(digest)
}
return []string{}, nil
}