0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-30 22:34:13 -05:00

dedupe: use hard links to dedupe blobs

As the number of repos and layers increases, the greater the probability
that layers are duplicated. We dedupe using hard links when content is
the same. This is intended to be purely a storage layer optimization.
Access control when available is orthogonal this optimization.

Add a durable cache to help speed up layer lookups.

Update README.

Add more unit tests.
This commit is contained in:
Ramkumar Chinchani 2020-02-17 13:57:15 -08:00
parent 365145d2cd
commit 25f5a45296
13 changed files with 767 additions and 88 deletions

View file

@ -7,8 +7,14 @@
* Uses [OCI storage layout](https://github.com/opencontainers/image-spec/blob/master/image-layout.md) for storage layout
* Currently suitable for on-prem deployments (e.g. colocated with Kubernetes)
* TLS support
* Authentication via TLS mutual authentication and HTTP *BASIC* (local _htpasswd_ and LDAP)
* Authentication via:
* TLS mutual authentication
* HTTP *Basic* (local _htpasswd_ and LDAP)
* HTTP *Bearer* token
* Doesn't require _root_ privileges
* Storage optimizations:
* Automatic garbage collection of orphaned blobs
* Layer deduplication using hard links when content is identical
* Swagger based documentation
* Released under Apache 2.0 License
* ```go get -u github.com/anuvu/zot/cmd/zot```

View file

@ -872,8 +872,8 @@ go_repository(
go_repository(
name = "io_etcd_go_bbolt",
importpath = "go.etcd.io/bbolt",
sum = "h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=",
version = "v1.3.2",
sum = "h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=",
version = "v1.3.4",
)
go_repository(
@ -935,8 +935,8 @@ go_repository(
go_repository(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
sum = "h1:wYqz/tQaWUgGKyx+B/rssSE6wkIKdY5Ee6ryOmzarIg=",
version = "v0.0.0-20190913121621-c3b328c6e5a7",
sum = "h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=",
version = "v0.0.0-20200223170610-d5e6a3e2c0ae",
)
go_repository(
@ -1198,6 +1198,20 @@ go_repository(
version = "v1.51.0",
)
go_repository(
name = "com_github_boltdb_bolt",
importpath = "github.com/boltdb/bolt",
sum = "h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=",
version = "v1.3.1",
)
go_repository(
name = "com_github_etcd_io_bbolt",
importpath = "github.com/etcd-io/bbolt",
sum = "h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=",
version = "v1.3.3",
)
go_repository(
name = "com_github_apex_log",
importpath = "github.com/apex/log",

View file

@ -21,4 +21,7 @@ var (
ErrLDAPEmptyPassphrase = errors.New("ldap: empty passphrase")
ErrLDAPBadConn = errors.New("ldap: bad connection")
ErrLDAPConfig = errors.New("config: invalid LDAP configuration")
ErrCacheRootBucket = errors.New("cache: unable to create/update root bucket")
ErrCacheNoBucket = errors.New("cache: unable to find bucket")
ErrCacheMiss = errors.New("cache: miss")
)

2
go.mod
View file

@ -26,6 +26,8 @@ require (
github.com/spf13/viper v1.6.1
github.com/swaggo/http-swagger v0.0.0-20190614090009-c2865af9083e
github.com/swaggo/swag v1.6.3
go.etcd.io/bbolt v1.3.4
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae // indirect
gopkg.in/resty.v1 v1.12.0
)

10
go.sum
View file

@ -160,6 +160,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nmcclain/asn1-ber v0.0.0-20170104154839-2661553a0484 h1:D9EvfGQvlkKaDr2CRKN++7HbSXbefUNDrPq60T+g24s=
@ -171,8 +172,6 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/openSUSE/umoci v0.4.5 h1:MZgFLy5Jl3VKe5uEOU9c25FoySbx3vUXeXLw4Jf6aRs=
github.com/openSUSE/umoci v0.4.5/go.mod h1:3p4KA5nwyY65lVmQZxv7tm0YEylJ+t1fY91ORsVXv58=
github.com/openSUSE/umoci v0.4.6-0.20200320140503-9aa268eeb258 h1:/8Yu54FufyHHQgIZ/wLy+BLQyzk0gbOG24xf5suWOOI=
github.com/openSUSE/umoci v0.4.6-0.20200320140503-9aa268eeb258/go.mod h1:3p4KA5nwyY65lVmQZxv7tm0YEylJ+t1fY91ORsVXv58=
github.com/opencontainers/distribution-spec v1.0.0-rc0 h1:xMzwhweo1gjvEo74mQjGTLau0TD3ACyTEC1310NbuSQ=
@ -283,6 +282,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
@ -329,8 +330,10 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae h1:xiXzMMEQdQcric9hXtr1QU98MHunKK7OTtsoU6bYWs4=
golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190913121621-c3b328c6e5a7 h1:wYqz/tQaWUgGKyx+B/rssSE6wkIKdY5Ee6ryOmzarIg=
golang.org/x/sys v0.0.0-20190913121621-c3b328c6e5a7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@ -355,6 +358,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=

View file

@ -7,6 +7,7 @@ import (
"io/ioutil"
"net"
"net/http"
"os"
"github.com/anuvu/zot/errors"
"github.com/anuvu/zot/pkg/log"
@ -41,12 +42,16 @@ func (c *Controller) Run() error {
engine.Use(log.SessionLogger(c.Log), handlers.RecoveryHandler(handlers.RecoveryLogger(c.Log),
handlers.PrintRecoveryStack(false)))
c.ImageStore = storage.NewImageStore(c.Config.Storage.RootDirectory, c.Log)
if c.ImageStore == nil {
// we can't proceed without at least a image store
os.Exit(1)
}
c.Router = engine
c.Router.UseEncodedPath()
_ = NewRouteHandler(c)
c.ImageStore = storage.NewImageStore(c.Config.Storage.RootDirectory, c.Log)
addr := fmt.Sprintf("%s:%s", c.Config.HTTP.Address, c.Config.HTTP.Port)
server := &http.Server{Addr: addr, Handler: c.Router}
c.Server = server

View file

@ -20,7 +20,6 @@ import (
"sort"
"strconv"
"strings"
"sync"
_ "github.com/anuvu/zot/docs" // nolint (golint) - as required by swaggo
"github.com/anuvu/zot/errors"
@ -42,11 +41,10 @@ const (
type RouteHandler struct {
c *Controller
blobLock sync.RWMutex
}
func NewRouteHandler(c *Controller) *RouteHandler {
rh := &RouteHandler{c: c, blobLock: sync.RWMutex{}}
rh := &RouteHandler{c: c}
rh.SetupRoutes()
return rh
@ -56,9 +54,9 @@ func NewRouteHandler(c *Controller) *RouteHandler {
func (rh *RouteHandler) blobRLockWrapper(f func(w http.ResponseWriter,
r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
rh.blobLock.RLock()
rh.c.ImageStore.RLock()
f(w, r)
rh.blobLock.RUnlock()
rh.c.ImageStore.RUnlock()
}
}
@ -66,9 +64,9 @@ func (rh *RouteHandler) blobRLockWrapper(f func(w http.ResponseWriter,
func (rh *RouteHandler) blobLockWrapper(f func(w http.ResponseWriter,
r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
rh.blobLock.Lock()
rh.c.ImageStore.Lock()
f(w, r)
rh.blobLock.Unlock()
rh.c.ImageStore.Unlock()
}
}

View file

@ -135,7 +135,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
}
// without a "?digest=<>" should fail
content := []byte("this is a blob")
content := []byte("this is a blob1")
digest := godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
resp, err = resty.R().Put(loc)
@ -172,7 +172,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
Convey("Monolithic blob upload with body", func() {
Print("\nMonolithic blob upload")
// create content
content := []byte("this is a blob")
content := []byte("this is a blob2")
digest := godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
// setting invalid URL params should fail
@ -228,7 +228,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
}
// without a "?digest=<>" should fail
content := []byte("this is a blob")
content := []byte("this is a blob3")
digest := godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
resp, err = resty.R().Put(loc)
@ -271,7 +271,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(loc, ShouldNotBeEmpty)
var buf bytes.Buffer
chunk1 := []byte("this is the first chunk")
chunk1 := []byte("this is the first chunk1")
n, err := buf.Write(chunk1)
So(n, ShouldEqual, len(chunk1))
So(err, ShouldBeNil)
@ -299,7 +299,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(resp.StatusCode(), ShouldEqual, 416)
So(resp.String(), ShouldNotBeEmpty)
chunk2 := []byte("this is the second chunk")
chunk2 := []byte("this is the second chunk1")
n, err = buf.Write(chunk2)
So(n, ShouldEqual, len(chunk2))
So(err, ShouldBeNil)
@ -339,7 +339,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(loc, ShouldNotBeEmpty)
var buf bytes.Buffer
chunk1 := []byte("this is the first chunk")
chunk1 := []byte("this is the first chunk2")
n, err := buf.Write(chunk1)
So(n, ShouldEqual, len(chunk1))
So(err, ShouldBeNil)
@ -367,7 +367,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(resp.StatusCode(), ShouldEqual, 416)
So(resp.String(), ShouldNotBeEmpty)
chunk2 := []byte("this is the second chunk")
chunk2 := []byte("this is the second chunk2")
n, err = buf.Write(chunk2)
So(n, ShouldEqual, len(chunk2))
So(err, ShouldBeNil)
@ -422,7 +422,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
loc := Location(baseURL, resp)
So(loc, ShouldNotBeEmpty)
content := []byte("this is a blob")
content := []byte("this is a blob4")
digest := godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
// monolithic blob upload
@ -461,7 +461,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
resp, err = resty.R().Get(loc)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 204)
content := []byte("this is a blob")
content := []byte("this is a blob5")
digest := godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
// monolithic blob upload: success
@ -507,7 +507,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(d, ShouldNotBeEmpty)
So(d, ShouldEqual, digest.String())
content = []byte("this is a blob")
content = []byte("this is a blob5")
digest = godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
// create a manifest with same blob but a different tag
@ -611,7 +611,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
resp, err = resty.R().Get(loc)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 204)
content := []byte("this is a blob")
content := []byte("this is a blob7")
digest := godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
// monolithic blob upload: success

View file

@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["storage.go"],
srcs = [
"cache.go",
"storage.go",
],
importpath = "github.com/anuvu/zot/pkg/storage",
visibility = ["//visibility:public"],
deps = [
@ -13,16 +16,21 @@ go_library(
"@com_github_opencontainers_image_spec//specs-go/v1:go_default_library",
"@com_github_opensuse_umoci//:go_default_library",
"@com_github_rs_zerolog//:go_default_library",
"@io_etcd_go_bbolt//:go_default_library",
],
)
go_test(
name = "go_default_test",
timeout = "short",
srcs = ["storage_test.go"],
srcs = [
"cache_test.go",
"storage_test.go",
],
embed = [":go_default_library"],
race = "on",
deps = [
"//errors:go_default_library",
"//pkg/log:go_default_library",
"@com_github_opencontainers_go_digest//:go_default_library",
"@com_github_opencontainers_image_spec//specs-go/v1:go_default_library",

163
pkg/storage/cache.go Normal file
View file

@ -0,0 +1,163 @@
package storage
import (
"path"
"strings"
"github.com/anuvu/zot/errors"
zlog "github.com/anuvu/zot/pkg/log"
"go.etcd.io/bbolt"
)
const (
BlobsCache = "blobs"
)
type Cache struct {
db *bbolt.DB
log zlog.Logger
}
// Blob is a blob record
type Blob struct {
Path string
}
func NewCache(rootDir string, name string, log zlog.Logger) *Cache {
dbPath := path.Join(rootDir, name+".db")
db, err := bbolt.Open(dbPath, 0600, nil)
if err != nil {
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create cache db")
return nil
}
if err := db.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucketIfNotExists([]byte(BlobsCache)); err != nil {
// this is a serious failure
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create a root bucket")
return err
}
return nil
}); err != nil {
// something went wrong
log.Error().Err(err).Msg("unable to create a cache")
return nil
}
return &Cache{db: db, log: log}
}
func (c *Cache) PutBlob(digest string, path string) error {
if err := c.db.Update(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
b, err := root.CreateBucketIfNotExists([]byte(digest))
if err != nil {
// this is a serious failure
c.log.Error().Err(err).Str("bucket", digest).Msg("unable to create a bucket")
return err
}
if err := b.Put([]byte(path), nil); err != nil {
c.log.Error().Err(err).Str("bucket", digest).Str("value", path).Msg("unable to put record")
return err
}
return nil
}); err != nil {
return err
}
return nil
}
func (c *Cache) GetBlob(digest string) (string, error) {
var blobPath strings.Builder
if err := c.db.View(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
b := root.Bucket([]byte(digest))
if b != nil {
// get first key
c := b.Cursor()
k, _ := c.First()
blobPath.WriteString(string(k))
return nil
}
return errors.ErrCacheMiss
}); err != nil {
return "", err
}
if len(blobPath.String()) == 0 {
return "", nil
}
return blobPath.String(), nil
}
func (c *Cache) HasBlob(digest string, blob string) bool {
if err := c.db.View(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
b := root.Bucket([]byte(digest))
if b == nil {
return errors.ErrCacheMiss
}
if b.Get([]byte(blob)) == nil {
return errors.ErrCacheMiss
}
return nil
}); err != nil {
return false
}
return true
}
func (c *Cache) DeleteBlob(digest string, path string) error {
if err := c.db.Update(func(tx *bbolt.Tx) error {
root := tx.Bucket([]byte(BlobsCache))
if root == nil {
// this is a serious failure
err := errors.ErrCacheRootBucket
c.log.Error().Err(err).Msg("unable to access root bucket")
return err
}
b := root.Bucket([]byte(digest))
if b == nil {
return errors.ErrCacheMiss
}
if err := b.Delete([]byte(path)); err != nil {
c.log.Error().Err(err).Str("digest", digest).Str("path", path).Msg("unable to delete")
return err
}
return nil
}); err != nil {
return err
}
return nil
}

52
pkg/storage/cache_test.go Normal file
View file

@ -0,0 +1,52 @@
package storage_test
import (
"io/ioutil"
"os"
"testing"
"github.com/anuvu/zot/errors"
"github.com/anuvu/zot/pkg/log"
"github.com/anuvu/zot/pkg/storage"
. "github.com/smartystreets/goconvey/convey"
)
func TestCache(t *testing.T) {
Convey("Make a new cache", t, func() {
dir, err := ioutil.TempDir("", "cache_test")
So(err, ShouldBeNil)
So(dir, ShouldNotBeEmpty)
defer os.RemoveAll(dir)
log := log.NewLogger("debug", "")
So(log, ShouldNotBeNil)
So(storage.NewCache("/deadBEEF", "cache_test", log), ShouldBeNil)
c := storage.NewCache(dir, "cache_test", log)
So(c, ShouldNotBeNil)
v, err := c.GetBlob("key")
So(err, ShouldEqual, errors.ErrCacheMiss)
So(v, ShouldBeEmpty)
b := c.HasBlob("key", "value")
So(b, ShouldBeFalse)
err = c.PutBlob("key", "value")
So(err, ShouldBeNil)
b = c.HasBlob("key", "value")
So(b, ShouldBeTrue)
v, err = c.GetBlob("key")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
err = c.DeleteBlob("bogusKey", "bogusValue")
So(err, ShouldEqual, errors.ErrCacheMiss)
err = c.DeleteBlob("key", "bogusValue")
So(err, ShouldBeNil)
})
}

View file

@ -37,22 +37,48 @@ type ImageStore struct {
rootDir string
lock *sync.RWMutex
blobUploads map[string]BlobUpload
cache *Cache
log zerolog.Logger
}
// NewImageStore returns a new image store backed by a file storage.
func NewImageStore(rootDir string, log zlog.Logger) *ImageStore {
is := &ImageStore{rootDir: rootDir,
if _, err := os.Stat(rootDir); os.IsNotExist(err) {
if err := os.MkdirAll(rootDir, 0700); err != nil {
log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir")
return nil
}
}
is := &ImageStore{
rootDir: rootDir,
lock: &sync.RWMutex{},
blobUploads: make(map[string]BlobUpload),
cache: NewCache(rootDir, "cache", log),
log: log.With().Caller().Logger(),
}
if _, err := os.Stat(rootDir); os.IsNotExist(err) {
_ = os.MkdirAll(rootDir, 0700)
return is
}
return is
// RLock read-lock
func (is *ImageStore) RLock() {
is.lock.RLock()
}
// RUnlock read-unlock
func (is *ImageStore) RUnlock() {
is.lock.RUnlock()
}
// Lock write-lock
func (is *ImageStore) Lock() {
is.lock.Lock()
}
// Unlock write-unlock
func (is *ImageStore) Unlock() {
is.lock.Unlock()
}
// InitRepo creates an image repository under this store.
@ -63,16 +89,10 @@ func (is *ImageStore) InitRepo(name string) error {
return nil
}
// create repo dir
ensureDir(repoDir)
// create "blobs" subdir
dir := path.Join(repoDir, "blobs")
ensureDir(dir)
ensureDir(path.Join(repoDir, "blobs"), is.log)
// create BlobUploadDir subdir
dir = path.Join(repoDir, BlobUploadDir)
ensureDir(dir)
ensureDir(path.Join(repoDir, BlobUploadDir), is.log)
// "oci-layout" file - create if it doesn't exist
ilPath := path.Join(repoDir, ispec.ImageLayoutFile)
@ -85,7 +105,8 @@ func (is *ImageStore) InitRepo(name string) error {
}
if err := ioutil.WriteFile(ilPath, buf, 0644); err != nil {
is.log.Panic().Err(err).Str("file", ilPath).Msg("unable to write file")
is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file")
return err
}
}
@ -101,7 +122,8 @@ func (is *ImageStore) InitRepo(name string) error {
}
if err := ioutil.WriteFile(indexPath, buf, 0644); err != nil {
is.log.Panic().Err(err).Str("file", indexPath).Msg("unable to write file")
is.log.Error().Err(err).Str("file", indexPath).Msg("unable to write file")
return err
}
}
@ -111,8 +133,8 @@ func (is *ImageStore) InitRepo(name string) error {
// ValidateRepo validates that the repository layout is complaint with the OCI repo layout.
func (is *ImageStore) ValidateRepo(name string) (bool, error) {
// https://github.com/opencontainers/image-spec/blob/master/image-layout.md#content
// at least, expect exactly 4 entries - ["blobs", "oci-layout", "index.json"] and BlobUploadDir
// in each image store
// at least, expect at least 3 entries - ["blobs", "oci-layout", "index.json"]
// and an additional/optional BlobUploadDir in each image store
dir := path.Join(is.rootDir, name)
if !dirExists(dir) {
return false, errors.ErrRepoNotFound
@ -124,15 +146,14 @@ func (is *ImageStore) ValidateRepo(name string) (bool, error) {
return false, errors.ErrRepoNotFound
}
if len(files) != 4 {
return false, nil
if len(files) < 3 {
return false, errors.ErrRepoBadVersion
}
found := map[string]bool{
"blobs": false,
ispec.ImageLayoutFile: false,
"index.json": false,
BlobUploadDir: false,
}
for _, file := range files {
@ -195,7 +216,7 @@ func (is *ImageStore) GetRepositories() ([]string, error) {
return nil
}
is.log.Debug().Str("dir", path).Str("name", info.Name()).Msg("found image store")
//is.log.Debug().Str("dir", path).Str("name", info.Name()).Msg("found image store")
stores = append(stores, rel)
return nil
@ -212,7 +233,6 @@ func (is *ImageStore) GetImageTags(repo string) ([]string, error) {
}
buf, err := ioutil.ReadFile(path.Join(dir, "index.json"))
if err != nil {
is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
return nil, errors.ErrRepoNotFound
@ -290,9 +310,7 @@ func (is *ImageStore) GetImageManifest(repo string, reference string) ([]byte, s
return nil, "", "", errors.ErrManifestNotFound
}
p := path.Join(dir, "blobs")
p = path.Join(p, digest.Algorithm().String())
p = path.Join(p, digest.Encoded())
p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
buf, err = ioutil.ReadFile(p)
@ -319,19 +337,24 @@ func (is *ImageStore) GetImageManifest(repo string, reference string) ([]byte, s
func (is *ImageStore) PutImageManifest(repo string, reference string, mediaType string,
body []byte) (string, error) {
if err := is.InitRepo(repo); err != nil {
is.log.Debug().Err(err).Msg("init repo")
return "", err
}
if mediaType != ispec.MediaTypeImageManifest {
is.log.Debug().Interface("actual", mediaType).
Interface("expected", ispec.MediaTypeImageManifest).Msg("bad manifest media type")
return "", errors.ErrBadManifest
}
if len(body) == 0 {
is.log.Debug().Int("len", len(body)).Msg("invalid body length")
return "", errors.ErrBadManifest
}
var m ispec.Manifest
if err := json.Unmarshal(body, &m); err != nil {
is.log.Error().Err(err).Msg("unable to unmarshal JSON")
return "", errors.ErrBadManifest
}
@ -345,6 +368,7 @@ func (is *ImageStore) PutImageManifest(repo string, reference string, mediaType
blobPath := is.BlobPath(repo, digest)
if _, err := os.Stat(blobPath); err != nil {
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to find blob")
return digest.String(), errors.ErrBlobNotFound
}
}
@ -418,13 +442,12 @@ func (is *ImageStore) PutImageManifest(repo string, reference string, mediaType
}
// write manifest to "blobs"
dir = path.Join(is.rootDir, repo)
dir = path.Join(dir, "blobs")
dir = path.Join(dir, mDigest.Algorithm().String())
_ = os.MkdirAll(dir, 0755)
dir = path.Join(is.rootDir, repo, "blobs", mDigest.Algorithm().String())
ensureDir(dir, is.log)
file := path.Join(dir, mDigest.Encoded())
if err := ioutil.WriteFile(file, body, 0644); err != nil {
is.log.Error().Err(err).Str("file", file).Msg("unable to write")
return "", err
}
@ -435,10 +458,12 @@ func (is *ImageStore) PutImageManifest(repo string, reference string, mediaType
buf, err = json.Marshal(index)
if err != nil {
is.log.Error().Err(err).Str("file", file).Msg("unable to marshal JSON")
return "", err
}
if err := ioutil.WriteFile(file, buf, 0644); err != nil {
is.log.Error().Err(err).Str("file", file).Msg("unable to write")
return "", err
}
@ -530,9 +555,7 @@ func (is *ImageStore) DeleteImageManifest(repo string, reference string) error {
return err
}
p := path.Join(dir, "blobs")
p = path.Join(p, digest.Algorithm().String())
p = path.Join(p, digest.Encoded())
p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
_ = os.Remove(p)
@ -542,8 +565,7 @@ func (is *ImageStore) DeleteImageManifest(repo string, reference string) error {
// BlobUploadPath returns the upload path for a blob in this store.
func (is *ImageStore) BlobUploadPath(repo string, uuid string) string {
dir := path.Join(is.rootDir, repo)
blobUploadPath := path.Join(dir, BlobUploadDir)
blobUploadPath = path.Join(blobUploadPath, uuid)
blobUploadPath := path.Join(dir, BlobUploadDir, uuid)
return blobUploadPath
}
@ -711,14 +733,17 @@ func (is *ImageStore) FinishBlobUpload(repo string, uuid string, body io.Reader,
return errors.ErrBadBlobDigest
}
dir := path.Join(is.rootDir, repo)
dir = path.Join(dir, "blobs")
dir = path.Join(dir, dstDigest.Algorithm().String())
_ = os.MkdirAll(dir, 0755)
dir := path.Join(is.rootDir, repo, "blobs", dstDigest.Algorithm().String())
ensureDir(dir, is.log)
dst := is.BlobPath(repo, dstDigest)
// move the blob from uploads to final dest
_ = os.Rename(src, dst)
if is.cache != nil {
if err := is.DedupeBlob(src, dstDigest, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to dedupe blob")
return err
}
}
return err
}
@ -767,34 +792,80 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, digest string)
return "", -1, errors.ErrBadBlobDigest
}
dir := path.Join(is.rootDir, repo)
dir = path.Join(dir, "blobs")
dir = path.Join(dir, dstDigest.Algorithm().String())
_ = os.MkdirAll(dir, 0755)
dir := path.Join(is.rootDir, repo, "blobs", dstDigest.Algorithm().String())
ensureDir(dir, is.log)
dst := is.BlobPath(repo, dstDigest)
// move the blob from uploads to final dest
_ = os.Rename(src, dst)
if is.cache != nil {
if err := is.DedupeBlob(src, dstDigest, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to dedupe blob")
return "", -1, err
}
}
return uuid, n, err
}
// nolint (interfacer)
func (is *ImageStore) DedupeBlob(src string, dstDigest godigest.Digest, dst string) error {
dstRecord, err := is.cache.GetBlob(dstDigest.String())
if err != nil && err != errors.ErrCacheMiss {
is.log.Error().Err(err).Str("blobPath", dst).Msg("unable to lookup blob record")
return err
}
if dstRecord == "" {
if err := is.cache.PutBlob(dstDigest.String(), dst); err != nil {
is.log.Error().Err(err).Str("blobPath", dst).Msg("unable to insert blob record")
return err
}
// move the blob from uploads to final dest
if err := os.Rename(src, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("unable to rename blob")
return err
}
} else {
dstRecordFi, err := os.Stat(dstRecord)
if err != nil {
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("unable to stat")
return err
}
dstFi, err := os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("unable to stat")
return err
}
if !os.SameFile(dstFi, dstRecordFi) {
if err := os.Link(dstRecord, dst); err != nil {
is.log.Error().Err(err).Str("blobPath", dst).Str("link", dstRecord).Msg("unable to hard link")
return err
}
}
if err := os.Remove(src); err != nil {
is.log.Error().Err(err).Str("src", src).Msg("uname to remove blob")
return err
}
}
return nil
}
// DeleteBlobUpload deletes an existing blob upload that is currently in progress.
func (is *ImageStore) DeleteBlobUpload(repo string, uuid string) error {
blobUploadPath := is.BlobUploadPath(repo, uuid)
_ = os.Remove(blobUploadPath)
if err := os.Remove(blobUploadPath); err != nil {
is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("error deleting blob upload")
return err
}
return nil
}
// BlobPath returns the repository path of a blob.
func (is *ImageStore) BlobPath(repo string, digest godigest.Digest) string {
dir := path.Join(is.rootDir, repo)
blobPath := path.Join(dir, "blobs")
blobPath = path.Join(blobPath, digest.Algorithm().String())
blobPath = path.Join(blobPath, digest.Encoded())
return blobPath
return path.Join(is.rootDir, repo, "blobs", digest.Algorithm().String(), digest.Encoded())
}
// CheckBlob verifies a blob and returns true if the blob is correct.
@ -860,7 +931,17 @@ func (is *ImageStore) DeleteBlob(repo string, digest string) error {
return errors.ErrBlobNotFound
}
_ = os.Remove(blobPath)
if is.cache != nil {
if err := is.cache.DeleteBlob(digest, blobPath); err != nil {
is.log.Error().Err(err).Str("digest", digest).Str("blobPath", blobPath).Msg("unable to remove blob path from cache")
return err
}
}
if err := os.Remove(blobPath); err != nil {
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path")
return err
}
return nil
}
@ -888,8 +969,8 @@ func dirExists(d string) bool {
return true
}
func ensureDir(dir string) {
func ensureDir(dir string, log zerolog.Logger) {
if err := os.MkdirAll(dir, 0755); err != nil {
panic(err)
log.Panic().Err(err).Str("dir", dir).Msg("unable to create dir")
}
}

View file

@ -6,6 +6,9 @@ import (
"encoding/json"
"io/ioutil"
"os"
"path"
"strings"
"sync"
"testing"
"github.com/anuvu/zot/pkg/log"
@ -86,13 +89,103 @@ func TestAPIs(t *testing.T) {
So(err, ShouldBeNil)
So(b, ShouldBeGreaterThanOrEqualTo, 0)
content := []byte("test-data")
content := []byte("test-data1")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err = il.PutBlobChunk("test", v, 0, int64(l), buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
blobDigest := d
err = il.FinishBlobUpload("test", v, buf, d.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
_, _, err = il.CheckBlob("test", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
_, _, err = il.GetBlob("test", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
m := ispec.Manifest{}
m.SchemaVersion = 2
mb, _ := json.Marshal(m)
Convey("Bad image manifest", func() {
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb)
So(err, ShouldNotBeNil)
_, _, _, err = il.GetImageManifest("test", d.String())
So(err, ShouldNotBeNil)
})
Convey("Good image manifest", func() {
m := ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
},
},
}
m.SchemaVersion = 2
mb, _ = json.Marshal(m)
d := godigest.FromBytes(mb)
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb)
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("test", d.String())
So(err, ShouldBeNil)
err = il.DeleteImageManifest("test", "1.0")
So(err, ShouldNotBeNil)
err = il.DeleteBlob("test", blobDigest.String())
So(err, ShouldBeNil)
err = il.DeleteImageManifest("test", d.String())
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("test", d.String())
So(err, ShouldNotBeNil)
})
})
err = il.DeleteBlobUpload("test", v)
So(err, ShouldNotBeNil)
})
Convey("New blob upload streamed", func() {
v, err := il.NewBlobUpload("test")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
Convey("Get blob upload", func() {
b, err := il.GetBlobUpload("test", "invalid")
So(err, ShouldNotBeNil)
So(b, ShouldEqual, -1)
b, err = il.GetBlobUpload("test", v)
So(err, ShouldBeNil)
So(b, ShouldBeGreaterThanOrEqualTo, 0)
b, err = il.BlobUploadInfo("test", v)
So(err, ShouldBeNil)
So(b, ShouldBeGreaterThanOrEqualTo, 0)
content := []byte("test-data2")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err = il.PutBlobChunkStreamed("test", v, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
err = il.FinishBlobUpload("test", v, buf, d.String())
So(err, ShouldBeNil)
@ -151,7 +244,257 @@ func TestAPIs(t *testing.T) {
})
err = il.DeleteBlobUpload("test", v)
So(err, ShouldNotBeNil)
})
Convey("Dedupe", func() {
blobDigest1 := ""
blobDigest2 := ""
// manifest1
v, err := il.NewBlobUpload("dedupe1")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
content := []byte("test-data3")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err := il.PutBlobChunkStreamed("dedupe1", v, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
blobDigest1 = strings.Split(d.String(), ":")[1]
So(blobDigest1, ShouldNotBeEmpty)
err = il.FinishBlobUpload("dedupe1", v, buf, d.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
_, _, err = il.CheckBlob("dedupe1", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
_, _, err = il.GetBlob("dedupe1", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
m := ispec.Manifest{}
m.SchemaVersion = 2
m = ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
},
},
}
m.SchemaVersion = 2
mb, _ := json.Marshal(m)
d = godigest.FromBytes(mb)
_, err = il.PutImageManifest("dedupe1", d.String(), ispec.MediaTypeImageManifest, mb)
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("dedupe1", d.String())
So(err, ShouldBeNil)
// manifest2
v, err = il.NewBlobUpload("dedupe2")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
content = []byte("test-data3")
buf = bytes.NewBuffer(content)
l = buf.Len()
d = godigest.FromBytes(content)
b, err = il.PutBlobChunkStreamed("dedupe2", v, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
blobDigest2 = strings.Split(d.String(), ":")[1]
So(blobDigest2, ShouldNotBeEmpty)
err = il.FinishBlobUpload("dedupe2", v, buf, d.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
_, _, err = il.CheckBlob("dedupe2", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
_, _, err = il.GetBlob("dedupe2", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
m = ispec.Manifest{}
m.SchemaVersion = 2
m = ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
},
},
}
m.SchemaVersion = 2
mb, _ = json.Marshal(m)
d = godigest.FromBytes(mb)
_, err = il.PutImageManifest("dedupe2", "1.0", ispec.MediaTypeImageManifest, mb)
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("dedupe2", d.String())
So(err, ShouldBeNil)
// verify that dedupe with hard links happened
fi1, err := os.Stat(path.Join(dir, "dedupe2", "blobs", "sha256", blobDigest1))
So(err, ShouldBeNil)
fi2, err := os.Stat(path.Join(dir, "dedupe2", "blobs", "sha256", blobDigest2))
So(err, ShouldBeNil)
So(os.SameFile(fi1, fi2), ShouldBeTrue)
})
Convey("Locks", func() {
// in parallel, a mix of read and write locks - mainly for coverage
var wg sync.WaitGroup
for i := 0; i < 1000; i++ {
wg.Add(2)
go func() {
defer wg.Done()
il.Lock()
func() {}()
il.Unlock()
}()
go func() {
defer wg.Done()
il.RLock()
func() {}()
il.RUnlock()
}()
}
wg.Wait()
})
})
}
func TestDedupe(t *testing.T) {
Convey("Dedupe", t, func(c C) {
Convey("Nil ImageStore", func() {
is := &storage.ImageStore{}
So(func() { _ = is.DedupeBlob("", "", "") }, ShouldPanic)
})
Convey("Valid ImageStore", func() {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
is := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
So(is.DedupeBlob("", "", ""), ShouldNotBeNil)
})
})
}
func TestNegativeCases(t *testing.T) {
Convey("Invalid root dir", t, func(c C) {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
os.RemoveAll(dir)
So(storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldNotBeNil)
So(storage.NewImageStore("/deadBEEF", log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldBeNil)
})
Convey("Invalid init repo", t, func(c C) {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
err = os.Chmod(dir, 0000) // remove all perms
So(err, ShouldBeNil)
So(func() { _ = il.InitRepo("test") }, ShouldPanic)
})
Convey("Invalid validate repo", t, func(c C) {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
files, err := ioutil.ReadDir(path.Join(dir, "test"))
So(err, ShouldBeNil)
for _, f := range files {
os.Remove(path.Join(dir, "test", f.Name()))
}
_, err = il.ValidateRepo("test")
So(err, ShouldNotBeNil)
os.RemoveAll(path.Join(dir, "test"))
_, err = il.ValidateRepo("test")
So(err, ShouldNotBeNil)
err = os.Chmod(dir, 0000) // remove all perms
So(err, ShouldBeNil)
So(func() { _, _ = il.ValidateRepo("test") }, ShouldPanic)
os.RemoveAll(dir)
_, err = il.GetRepositories()
So(err, ShouldNotBeNil)
})
Convey("Invalid get image tags", t, func(c C) {
il := &storage.ImageStore{}
_, err := il.GetImageTags("test")
So(err, ShouldNotBeNil)
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
il = storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil)
_, err = il.GetImageTags("test")
So(err, ShouldNotBeNil)
So(os.RemoveAll(path.Join(dir, "test")), ShouldBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0755), ShouldBeNil)
_, err = il.GetImageTags("test")
So(err, ShouldNotBeNil)
})
Convey("Invalid get image manifest", t, func(c C) {
il := &storage.ImageStore{}
_, _, _, err := il.GetImageManifest("test", "")
So(err, ShouldNotBeNil)
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
il = storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil)
_, _, _, err = il.GetImageManifest("test", "")
So(err, ShouldNotBeNil)
So(os.RemoveAll(path.Join(dir, "test")), ShouldBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0755), ShouldBeNil)
_, _, _, err = il.GetImageManifest("test", "")
So(err, ShouldNotBeNil)
})
}