0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-30 22:34:13 -05:00

storage: add s3 backend support (without GC and dedupe)

Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
Petu Eusebiu 2021-07-17 06:53:05 +03:00 committed by Ramkumar Chinchani
parent 8e4d828867
commit 9c568c0ee2
15 changed files with 3534 additions and 1049 deletions

View file

@ -13,6 +13,14 @@ jobs:
build-test: build-test:
name: Build and test ZOT name: Build and test ZOT
runs-on: ubuntu-latest runs-on: ubuntu-latest
services:
s3mock:
image: localstack/localstack-full
env:
SERVICES: s3
ports:
- 4563-4599:4563-4599
- 9090:8080
steps: steps:
- name: Install go - name: Install go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
@ -36,6 +44,10 @@ jobs:
timeout-minutes: 30 timeout-minutes: 30
run: | run: |
cd $GITHUB_WORKSPACE && make cd $GITHUB_WORKSPACE && make
env:
S3MOCK_ENDPOINT: localhost:4566
AWS_ACCESS_KEY_ID: fake
AWS_SECRET_ACCESS_KEY: fake
- name: Upload code coverage - name: Upload code coverage
uses: codecov/codecov-action@v1 uses: codecov/codecov-action@v1

View file

@ -267,3 +267,72 @@ Set server path on which metrics will be exposed:
``` ```
In order to test the Metrics feature locally in a [Kind](https://kind.sigs.k8s.io/) cluster, folow [this guide](metrics/README.md). In order to test the Metrics feature locally in a [Kind](https://kind.sigs.k8s.io/) cluster, folow [this guide](metrics/README.md).
## Storage Drivers
Beside filesystem storage backend, zot also supports S3 storage backend, check below url to see how to configure it:
- [s3](https://github.com/docker/docker.github.io/blob/master/registry/storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Service (S3) bucket.
For an s3 zot configuration with multiple storage drivers see: [s3-config](config-s3.json).
zot also supports different storage drivers for each subpath.
### Specifying S3 credentials
There are multiple ways to specify S3 credentials:
- Config file:
```
"storageDriver": {
"name": "s3",
"region": "us-east-2",
"bucket": "zot-storage",
"secure": true,
"skipverify": false,
"accesskey": "<YOUR_ACCESS_KEY_ID>",
"secretkey": "<YOUR_SECRET_ACCESS_KEY>"
}
```
- Environment variables:
SDK looks for credentials in the following environment variables:
```
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
AWS_SESSION_TOKEN (optional)
```
- Credentials file:
A credential file is a plaintext file that contains your access keys. The file must be on the same machine on which youre running your application. The file must be named credentials and located in the .aws/ folder in your home directory.
```
[default]
aws_access_key_id = <YOUR_DEFAULT_ACCESS_KEY_ID>
aws_secret_access_key = <YOUR_DEFAULT_SECRET_ACCESS_KEY>
[test-account]
aws_access_key_id = <YOUR_TEST_ACCESS_KEY_ID>
aws_secret_access_key = <YOUR_TEST_SECRET_ACCESS_KEY>
[prod-account]
; work profile
aws_access_key_id = <YOUR_PROD_ACCESS_KEY_ID>
aws_secret_access_key = <YOUR_PROD_SECRET_ACCESS_KEY>
```
The [default] heading defines credentials for the default profile, which the SDK will use unless you configure it to use another profile.
To specify a profile use AWS_PROFILE environment variable:
```
AWS_PROFILE=test-account
```
For more details see https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials

53
examples/config-s3.json Normal file
View file

@ -0,0 +1,53 @@
{
"version": "0.1.0-dev",
"storage": {
"rootDirectory": "/zot",
"storageDriver": {
"name": "s3",
"region": "us-east-2",
"bucket": "zot-storage",
"secure": true,
"skipverify": false
},
"subPaths": {
"/a": {
"rootDirectory": "/zot-a",
"storageDriver": {
"name": "s3",
"region": "us-east-2",
"bucket": "zot-storage",
"secure": true,
"skipverify": false
}
},
"/b": {
"rootDirectory": "/zot-b",
"storageDriver": {
"name": "s3",
"region": "us-east-2",
"bucket": "zot-storage",
"secure": true,
"skipverify": false
}
},
"/c": {
"rootDirectory": "/zot-c",
"storageDriver": {
"name": "s3",
"region": "us-east-2",
"bucket": "zot-storage",
"secure": false,
"skipverify": false
}
}
}
},
"http": {
"address": "127.0.0.1",
"port": "8080",
"ReadOnly": false
},
"log": {
"level": "debug"
}
}

2
go.mod
View file

@ -16,11 +16,13 @@ require (
github.com/containers/common v0.26.0 github.com/containers/common v0.26.0
github.com/containers/image/v5 v5.13.2 github.com/containers/image/v5 v5.13.2
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/docker/distribution v2.7.1+incompatible
github.com/dustin/go-humanize v1.0.0 github.com/dustin/go-humanize v1.0.0
github.com/fsnotify/fsnotify v1.5.1 github.com/fsnotify/fsnotify v1.5.1
github.com/getlantern/deepcopy v0.0.0-20160317154340-7f45deb8130a github.com/getlantern/deepcopy v0.0.0-20160317154340-7f45deb8130a
github.com/go-ldap/ldap/v3 v3.4.1 github.com/go-ldap/ldap/v3 v3.4.1
github.com/gofrs/uuid v4.0.0+incompatible github.com/gofrs/uuid v4.0.0+incompatible
github.com/golang/mock v1.6.0 // indirect
github.com/google/go-containerregistry v0.6.0 github.com/google/go-containerregistry v0.6.0
github.com/gorilla/handlers v1.5.1 github.com/gorilla/handlers v1.5.1
github.com/gorilla/mux v1.8.0 github.com/gorilla/mux v1.8.0

3
go.sum
View file

@ -725,8 +725,9 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=

View file

@ -21,6 +21,7 @@ type StorageConfig struct {
RootDirectory string RootDirectory string
GC bool GC bool
Dedupe bool Dedupe bool
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
} }
type TLSConfig struct { type TLSConfig struct {
@ -81,6 +82,7 @@ type GlobalStorageConfig struct {
RootDirectory string RootDirectory string
Dedupe bool Dedupe bool
GC bool GC bool
StorageDriver map[string]interface{} `mapstructure:",omitempty"`
SubPaths map[string]StorageConfig SubPaths map[string]StorageConfig
} }

View file

@ -15,8 +15,11 @@ import (
"github.com/anuvu/zot/pkg/extensions/monitoring" "github.com/anuvu/zot/pkg/extensions/monitoring"
"github.com/anuvu/zot/pkg/log" "github.com/anuvu/zot/pkg/log"
"github.com/anuvu/zot/pkg/storage" "github.com/anuvu/zot/pkg/storage"
"github.com/anuvu/zot/pkg/storage/s3"
"github.com/gorilla/handlers" "github.com/gorilla/handlers"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/docker/distribution/registry/storage/driver/factory"
) )
const ( const (
@ -62,6 +65,7 @@ func DefaultHeaders() mux.MiddlewareFunc {
} }
} }
// nolint: gocyclo
func (c *Controller) Run() error { func (c *Controller) Run() error {
// validate configuration // validate configuration
if err := c.Config.Validate(c.Log); err != nil { if err := c.Config.Validate(c.Log); err != nil {
@ -107,8 +111,26 @@ func (c *Controller) Run() error {
} }
} }
defaultStore := storage.NewImageStore(c.Config.Storage.RootDirectory, var defaultStore storage.ImageStore
c.Config.Storage.GC, c.Config.Storage.Dedupe, c.Log, c.Metrics) if len(c.Config.Storage.StorageDriver) == 0 {
defaultStore = storage.NewImageStore(c.Config.Storage.RootDirectory,
c.Config.Storage.GC, c.Config.Storage.Dedupe, c.Log, c.Metrics)
} else {
storeName := fmt.Sprintf("%v", c.Config.Storage.StorageDriver["name"])
if storeName != storage.S3StorageDriverName {
c.Log.Fatal().Err(errors.ErrBadConfig).Msgf("unsupported storage driver: %s",
c.Config.Storage.StorageDriver["name"])
}
// Init a Storager from connection string.
store, err := factory.Create(storeName, c.Config.Storage.StorageDriver)
if err != nil {
c.Log.Error().Err(err).Str("rootDir", c.Config.Storage.RootDirectory).Msg("unable to create s3 service")
return err
}
defaultStore = s3.NewImageStore(c.Config.Storage.RootDirectory,
c.Config.Storage.GC, c.Config.Storage.Dedupe, c.Log, c.Metrics, store)
}
c.StoreController.DefaultStore = defaultStore c.StoreController.DefaultStore = defaultStore
@ -141,8 +163,25 @@ func (c *Controller) Run() error {
} }
} }
subImageStore[route] = storage.NewImageStore(storageConfig.RootDirectory, if len(storageConfig.StorageDriver) == 0 {
storageConfig.GC, storageConfig.Dedupe, c.Log, c.Metrics) subImageStore[route] = storage.NewImageStore(storageConfig.RootDirectory,
storageConfig.GC, storageConfig.Dedupe, c.Log, c.Metrics)
} else {
storeName := fmt.Sprintf("%v", storageConfig.StorageDriver["name"])
if storeName != storage.S3StorageDriverName {
c.Log.Fatal().Err(errors.ErrBadConfig).Msgf("unsupported storage driver: %s", storageConfig.StorageDriver["name"])
}
// Init a Storager from connection string.
store, err := factory.Create(storeName, storageConfig.StorageDriver)
if err != nil {
c.Log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("Unable to create s3 service")
return err
}
subImageStore[route] = s3.NewImageStore(storageConfig.RootDirectory,
storageConfig.GC, storageConfig.Dedupe, c.Log, c.Metrics, store)
}
// Enable extensions if extension config is provided // Enable extensions if extension config is provided
if c.Config != nil && c.Config.Extensions != nil { if c.Config != nil && c.Config.Extensions != nil {

View file

@ -27,6 +27,7 @@ import (
"github.com/anuvu/zot/errors" "github.com/anuvu/zot/errors"
"github.com/anuvu/zot/pkg/api" "github.com/anuvu/zot/pkg/api"
"github.com/anuvu/zot/pkg/api/config" "github.com/anuvu/zot/pkg/api/config"
"github.com/anuvu/zot/pkg/storage"
"github.com/chartmuseum/auth" "github.com/chartmuseum/auth"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
godigest "github.com/opencontainers/go-digest" godigest "github.com/opencontainers/go-digest"
@ -123,6 +124,12 @@ func getCredString(username, password string) string {
return usernameAndHash return usernameAndHash
} }
func skipIt(t *testing.T) {
if os.Getenv("S3MOCK_ENDPOINT") == "" {
t.Skip("Skipping testing without AWS S3 mock server")
}
}
func TestNew(t *testing.T) { func TestNew(t *testing.T) {
Convey("Make a new controller", t, func() { Convey("Make a new controller", t, func() {
conf := config.New() conf := config.New()
@ -131,6 +138,126 @@ func TestNew(t *testing.T) {
}) })
} }
func TestObjectStorageController(t *testing.T) {
skipIt(t)
Convey("Negative make a new object storage controller", t, func() {
port := getFreePort()
conf := config.New()
conf.HTTP.Port = port
storageDriverParams := map[string]interface{}{
"rootDir": "zot",
"name": storage.S3StorageDriverName,
}
conf.Storage.StorageDriver = storageDriverParams
c := api.NewController(conf)
So(c, ShouldNotBeNil)
c.Config.Storage.RootDirectory = "zot"
err := c.Run()
So(err, ShouldNotBeNil)
})
Convey("Make a new object storage controller", t, func() {
port := getFreePort()
baseURL := getBaseURL(port, false)
conf := config.New()
conf.HTTP.Port = port
bucket := "zot-storage-test"
endpoint := os.Getenv("S3MOCK_ENDPOINT")
storageDriverParams := map[string]interface{}{
"rootDir": "zot",
"name": storage.S3StorageDriverName,
"region": "us-east-2",
"bucket": bucket,
"regionendpoint": endpoint,
"secure": false,
"skipverify": false,
}
conf.Storage.StorageDriver = storageDriverParams
c := api.NewController(conf)
So(c, ShouldNotBeNil)
c.Config.Storage.RootDirectory = "/"
go func(controller *api.Controller) {
// this blocks
if err := controller.Run(); err != nil {
return
}
}(c)
// wait till ready
for {
_, err := resty.R().Get(baseURL)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
defer func(controller *api.Controller) {
ctx := context.Background()
_ = controller.Server.Shutdown(ctx)
}(c)
})
}
func TestObjectStorageControllerSubPaths(t *testing.T) {
skipIt(t)
Convey("Make a new object storage controller", t, func() {
port := getFreePort()
baseURL := getBaseURL(port, false)
conf := config.New()
conf.HTTP.Port = port
bucket := "zot-storage-test"
endpoint := os.Getenv("S3MOCK_ENDPOINT")
storageDriverParams := map[string]interface{}{
"rootDir": "zot",
"name": storage.S3StorageDriverName,
"region": "us-east-2",
"bucket": bucket,
"regionendpoint": endpoint,
"secure": false,
"skipverify": false,
}
conf.Storage.StorageDriver = storageDriverParams
c := api.NewController(conf)
So(c, ShouldNotBeNil)
c.Config.Storage.RootDirectory = "zot"
subPathMap := make(map[string]config.StorageConfig)
subPathMap["/a"] = config.StorageConfig{
RootDirectory: "/a",
StorageDriver: storageDriverParams,
}
c.Config.Storage.SubPaths = subPathMap
go func(controller *api.Controller) {
// this blocks
if err := controller.Run(); err != nil {
return
}
}(c)
for {
_, err := resty.R().Get(baseURL)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
defer func(controller *api.Controller) {
ctx := context.Background()
_ = controller.Server.Shutdown(ctx)
}(c)
})
}
func TestHtpasswdSingleCred(t *testing.T) { func TestHtpasswdSingleCred(t *testing.T) {
Convey("Single cred", t, func() { Convey("Single cred", t, func() {
port := getFreePort() port := getFreePort()

View file

@ -53,7 +53,7 @@ func NewRootCmd() *cobra.Command {
// watch for events // watch for events
case event := <-watcher.Events: case event := <-watcher.Events:
if event.Op == fsnotify.Write { if event.Op == fsnotify.Write {
log.Info().Msg("Config file changed, trying to reload accessControl config") log.Info().Msg("config file changed, trying to reload accessControl config")
newConfig := config.New() newConfig := config.New()
LoadConfiguration(newConfig, args[0]) LoadConfiguration(newConfig, args[0])
c.Config.AccessControl = newConfig.AccessControl c.Config.AccessControl = newConfig.AccessControl
@ -67,7 +67,7 @@ func NewRootCmd() *cobra.Command {
}() }()
if err := watcher.Add(args[0]); err != nil { if err := watcher.Add(args[0]); err != nil {
log.Error().Err(err).Msgf("Error adding config file %s to FsNotify watcher", args[0]) log.Error().Err(err).Msgf("error adding config file %s to FsNotify watcher", args[0])
panic(err) panic(err)
} }
<-done <-done
@ -150,18 +150,18 @@ func LoadConfiguration(config *config.Config, configPath string) {
viper.SetConfigFile(configPath) viper.SetConfigFile(configPath)
if err := viper.ReadInConfig(); err != nil { if err := viper.ReadInConfig(); err != nil {
log.Error().Err(err).Msg("Error while reading configuration") log.Error().Err(err).Msg("error while reading configuration")
panic(err) panic(err)
} }
md := &mapstructure.Metadata{} md := &mapstructure.Metadata{}
if err := viper.Unmarshal(&config, metadataConfig(md)); err != nil { if err := viper.Unmarshal(&config, metadataConfig(md)); err != nil {
log.Error().Err(err).Msg("Error while unmarshalling new config") log.Error().Err(err).Msg("error while unmarshalling new config")
panic(err) panic(err)
} }
if len(md.Keys) == 0 || len(md.Unused) > 0 { if len(md.Keys) == 0 || len(md.Unused) > 0 {
log.Error().Err(errors.ErrBadConfig).Msg("Bad configuration, retry writing it") log.Error().Err(errors.ErrBadConfig).Msg("bad configuration, retry writing it")
panic(errors.ErrBadConfig) panic(errors.ErrBadConfig)
} }
@ -174,9 +174,34 @@ func LoadConfiguration(config *config.Config, configPath string) {
} }
} }
// enforce s3 driver in case of using storage driver
if len(config.Storage.StorageDriver) != 0 {
if config.Storage.StorageDriver["name"] != storage.S3StorageDriverName {
log.Error().Err(errors.ErrBadConfig).Msgf("unsupported storage driver: %s", config.Storage.StorageDriver["name"])
panic(errors.ErrBadConfig)
}
}
// enforce s3 driver on subpaths in case of using storage driver
if config.Storage.SubPaths != nil {
if len(config.Storage.SubPaths) > 0 {
subPaths := config.Storage.SubPaths
for route, storageConfig := range subPaths {
if len(storageConfig.StorageDriver) != 0 {
if storageConfig.StorageDriver["name"] != storage.S3StorageDriverName {
log.Error().Err(errors.ErrBadConfig).Str("subpath",
route).Msgf("unsupported storage driver: %s", storageConfig.StorageDriver["name"])
panic(errors.ErrBadConfig)
}
}
}
}
}
err := config.LoadAccessControlConfig() err := config.LoadAccessControlConfig()
if err != nil { if err != nil {
log.Error().Err(errors.ErrBadConfig).Msg("Unable to unmarshal http.accessControl.key.policies") log.Error().Err(errors.ErrBadConfig).Msg("unable to unmarshal http.accessControl.key.policies")
panic(err) panic(err)
} }

720
pkg/storage/s3/s3_test.go Normal file
View file

@ -0,0 +1,720 @@
package s3_test
import (
"bytes"
"context"
_ "crypto/sha256"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strings"
"time"
godigest "github.com/opencontainers/go-digest"
//"strings"
"testing"
"github.com/anuvu/zot/pkg/extensions/monitoring"
"github.com/anuvu/zot/pkg/log"
"github.com/anuvu/zot/pkg/storage"
"github.com/anuvu/zot/pkg/storage/s3"
guuid "github.com/gofrs/uuid"
"github.com/rs/zerolog"
. "github.com/smartystreets/goconvey/convey"
// Add s3 support
storageDriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/factory"
_ "github.com/docker/distribution/registry/storage/driver/s3-aws"
"gopkg.in/resty.v1"
)
// nolint: gochecknoglobals
var (
testImage = "test"
fileWriterSize = 12
fileInfoSize = 10
errorText = "new s3 error"
errS3 = errors.New(errorText)
)
func cleanupStorage(store storageDriver.StorageDriver, name string) {
_ = store.Delete(context.Background(), name)
}
func skipIt(t *testing.T) {
if os.Getenv("S3MOCK_ENDPOINT") == "" {
t.Skip("Skipping testing without AWS S3 mock server")
}
}
func createMockStorage(rootDir string, store storageDriver.StorageDriver) storage.ImageStore {
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := s3.NewImageStore(rootDir, false, false, log, metrics, store)
return il
}
func createObjectsStore(rootDir string) (storageDriver.StorageDriver, storage.ImageStore, error) {
bucket := "zot-storage-test"
endpoint := os.Getenv("S3MOCK_ENDPOINT")
storageDriverParams := map[string]interface{}{
"rootDir": rootDir,
"name": "s3",
"region": "us-east-2",
"bucket": bucket,
"regionendpoint": endpoint,
"secure": false,
"skipverify": false,
}
storeName := fmt.Sprintf("%v", storageDriverParams["name"])
store, err := factory.Create(storeName, storageDriverParams)
if err != nil {
panic(err)
}
// create bucket if it doesn't exists
_, err = resty.R().Put("http://" + endpoint + "/" + bucket)
if err != nil {
panic(err)
}
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := s3.NewImageStore(rootDir, false, false, log, metrics, store)
return store, il, err
}
type FileInfoMock struct {
isDirFn func() bool
}
func (f *FileInfoMock) Path() string {
return ""
}
func (f *FileInfoMock) Size() int64 {
return int64(fileInfoSize)
}
func (f *FileInfoMock) ModTime() time.Time {
return time.Now()
}
func (f *FileInfoMock) IsDir() bool {
if f != nil && f.isDirFn != nil {
return f.isDirFn()
}
return true
}
type FileWriterMock struct {
writeFn func([]byte) (int, error)
cancelFn func() error
commitFn func() error
closeFn func() error
}
func (f *FileWriterMock) Size() int64 {
return int64(fileWriterSize)
}
func (f *FileWriterMock) Cancel() error {
if f != nil && f.cancelFn != nil {
return f.cancelFn()
}
return nil
}
func (f *FileWriterMock) Commit() error {
if f != nil && f.commitFn != nil {
return f.commitFn()
}
return nil
}
func (f *FileWriterMock) Write(p []byte) (int, error) {
if f != nil && f.writeFn != nil {
return f.writeFn(p)
}
return 10, nil
}
func (f *FileWriterMock) Close() error {
if f != nil && f.closeFn != nil {
return f.closeFn()
}
return nil
}
type StorageDriverMock struct {
nameFn func() string
getContentFn func(ctx context.Context, path string) ([]byte, error)
putContentFn func(ctx context.Context, path string, content []byte) error
readerFn func(ctx context.Context, path string, offset int64) (io.ReadCloser, error)
writerFn func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error)
statFn func(ctx context.Context, path string) (storageDriver.FileInfo, error)
listFn func(ctx context.Context, path string) ([]string, error)
moveFn func(ctx context.Context, sourcePath string, destPath string) error
deleteFn func(ctx context.Context, path string) error
walkFn func(ctx context.Context, path string, f storageDriver.WalkFn) error
}
func (s *StorageDriverMock) Name() string {
if s != nil && s.nameFn != nil {
return s.nameFn()
}
return ""
}
func (s *StorageDriverMock) GetContent(ctx context.Context, path string) ([]byte, error) {
if s != nil && s.getContentFn != nil {
return s.getContentFn(ctx, path)
}
return []byte{}, nil
}
func (s *StorageDriverMock) PutContent(ctx context.Context, path string, content []byte) error {
if s != nil && s.putContentFn != nil {
return s.putContentFn(ctx, path, content)
}
return nil
}
func (s *StorageDriverMock) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
if s != nil && s.readerFn != nil {
return s.readerFn(ctx, path, offset)
}
return ioutil.NopCloser(strings.NewReader("")), nil
}
func (s *StorageDriverMock) Writer(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
if s != nil && s.writerFn != nil {
return s.writerFn(ctx, path, append)
}
return &FileWriterMock{}, nil
}
func (s *StorageDriverMock) Stat(ctx context.Context, path string) (storageDriver.FileInfo, error) {
if s != nil && s.statFn != nil {
return s.statFn(ctx, path)
}
return &FileInfoMock{}, nil
}
func (s *StorageDriverMock) List(ctx context.Context, path string) ([]string, error) {
if s != nil && s.listFn != nil {
return s.listFn(ctx, path)
}
return []string{"a"}, nil
}
func (s *StorageDriverMock) Move(ctx context.Context, sourcePath string, destPath string) error {
if s != nil && s.moveFn != nil {
return s.moveFn(ctx, sourcePath, destPath)
}
return nil
}
func (s *StorageDriverMock) Delete(ctx context.Context, path string) error {
if s != nil && s.deleteFn != nil {
return s.deleteFn(ctx, path)
}
return nil
}
func (s *StorageDriverMock) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
return "", nil
}
func (s *StorageDriverMock) Walk(ctx context.Context, path string, f storageDriver.WalkFn) error {
if s != nil && s.walkFn != nil {
return s.walkFn(ctx, path, f)
}
return nil
}
func TestNegativeCasesObjectsStorage(t *testing.T) {
skipIt(t)
uuid, err := guuid.NewV4()
if err != nil {
panic(err)
}
testDir := path.Join("/oci-repo-test", uuid.String())
store, il, _ := createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
Convey("Invalid validate repo", t, func(c C) {
So(il, ShouldNotBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
objects, err := store.List(context.Background(), path.Join(il.RootDir(), testImage))
So(err, ShouldBeNil)
for _, object := range objects {
t.Logf("Removing object: %s", object)
err := store.Delete(context.Background(), object)
So(err, ShouldBeNil)
}
_, err = il.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
_, err = il.GetRepositories()
So(err, ShouldBeNil)
})
Convey("Invalid get image tags", t, func(c C) {
store, il, err := createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
So(err, ShouldBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.Move(context.Background(), path.Join(testDir, testImage, "index.json"),
path.Join(testDir, testImage, "blobs")), ShouldBeNil)
ok, _ := il.ValidateRepo(testImage)
So(ok, ShouldBeFalse)
_, err = il.GetImageTags(testImage)
So(err, ShouldNotBeNil)
So(store.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.PutContent(context.Background(), path.Join(testDir, testImage, "index.json"), []byte{}), ShouldBeNil)
_, err = il.GetImageTags(testImage)
So(err, ShouldNotBeNil)
})
Convey("Invalid get image manifest", t, func(c C) {
store, il, err := createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
So(err, ShouldBeNil)
So(il, ShouldNotBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.Delete(context.Background(), path.Join(testDir, testImage, "index.json")), ShouldBeNil)
_, _, _, err = il.GetImageManifest(testImage, "")
So(err, ShouldNotBeNil)
So(store.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.PutContent(context.Background(), path.Join(testDir, testImage, "index.json"), []byte{}), ShouldBeNil)
_, _, _, err = il.GetImageManifest(testImage, "")
So(err, ShouldNotBeNil)
})
Convey("Invalid validate repo", t, func(c C) {
store, il, err := createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
So(err, ShouldBeNil)
So(il, ShouldNotBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.Delete(context.Background(), path.Join(testDir, testImage, "index.json")), ShouldBeNil)
_, err = il.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
So(store.Delete(context.Background(), path.Join(testDir, testImage)), ShouldBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
So(store.Move(context.Background(), path.Join(testDir, testImage, "index.json"),
path.Join(testDir, testImage, "_index.json")), ShouldBeNil)
ok, err := il.ValidateRepo(testImage)
So(err, ShouldBeNil)
So(ok, ShouldBeFalse)
})
Convey("Invalid finish blob upload", t, func(c C) {
store, il, err := createObjectsStore(testDir)
defer cleanupStorage(store, testDir)
So(err, ShouldBeNil)
So(il, ShouldNotBeNil)
So(il.InitRepo(testImage), ShouldBeNil)
v, err := il.NewBlobUpload(testImage)
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
content := []byte("test-data1")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err := il.PutBlobChunk(testImage, v, 0, int64(l), buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
src := il.BlobUploadPath(testImage, v)
fw, err := store.Writer(context.Background(), src, true)
So(err, ShouldBeNil)
_, err = fw.Write([]byte("another-chunk-of-data"))
So(err, ShouldBeNil)
err = fw.Close()
So(err, ShouldBeNil)
err = il.FinishBlobUpload(testImage, v, buf, d.String())
So(err, ShouldNotBeNil)
})
Convey("Test storage driver errors", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
listFn: func(ctx context.Context, path string) ([]string, error) {
return []string{testImage}, errS3
},
moveFn: func(ctx context.Context, sourcePath, destPath string) error {
return errS3
},
getContentFn: func(ctx context.Context, path string) ([]byte, error) {
return []byte{}, errS3
},
putContentFn: func(ctx context.Context, path string, content []byte) error {
return errS3
},
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
return &FileWriterMock{}, errS3
},
readerFn: func(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("")), errS3
},
walkFn: func(ctx context.Context, path string, f storageDriver.WalkFn) error {
return errS3
},
statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) {
return &FileInfoMock{}, errS3
},
deleteFn: func(ctx context.Context, path string) error {
return errS3
},
})
So(il, ShouldNotBeNil)
So(il.InitRepo(testImage), ShouldNotBeNil)
_, err := il.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
v, err := il.NewBlobUpload(testImage)
So(err, ShouldNotBeNil)
content := []byte("test-data1")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
_, err = il.PutBlobChunk(testImage, v, 0, int64(l), buf)
So(err, ShouldNotBeNil)
err = il.FinishBlobUpload(testImage, v, buf, d.String())
So(err, ShouldNotBeNil)
err = il.DeleteBlob(testImage, d.String())
So(err, ShouldNotBeNil)
err = il.DeleteBlobUpload(testImage, v)
So(err, ShouldNotBeNil)
err = il.DeleteImageManifest(testImage, "1.0")
So(err, ShouldNotBeNil)
_, err = il.PutImageManifest(testImage, "1.0", "application/json", []byte{})
So(err, ShouldNotBeNil)
_, err = il.PutBlobChunkStreamed(testImage, v, bytes.NewBuffer([]byte(testImage)))
So(err, ShouldNotBeNil)
_, _, err = il.FullBlobUpload(testImage, bytes.NewBuffer([]byte{}), "inexistent")
So(err, ShouldNotBeNil)
_, _, err = il.CheckBlob(testImage, d.String())
So(err, ShouldNotBeNil)
})
Convey("Test ValidateRepo", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
listFn: func(ctx context.Context, path string) ([]string, error) {
return []string{testImage, testImage}, errS3
},
})
_, err := il.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
})
Convey("Test ValidateRepo2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
listFn: func(ctx context.Context, path string) ([]string, error) {
return []string{"test/test/oci-layout", "test/test/index.json"}, nil
},
statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) {
return &FileInfoMock{}, nil
},
})
_, err := il.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
})
Convey("Test ValidateRepo3", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
listFn: func(ctx context.Context, path string) ([]string, error) {
return []string{"test/test/oci-layout", "test/test/index.json"}, nil
},
statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) {
return &FileInfoMock{}, nil
},
getContentFn: func(ctx context.Context, path string) ([]byte, error) {
return []byte{}, errS3
},
})
_, err := il.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
})
Convey("Test ValidateRepo4", t, func(c C) {
ociLayout := []byte(`{"imageLayoutVersion": "9.9.9"}`)
il = createMockStorage(testDir, &StorageDriverMock{
listFn: func(ctx context.Context, path string) ([]string, error) {
return []string{"test/test/oci-layout", "test/test/index.json"}, nil
},
statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) {
return &FileInfoMock{}, nil
},
getContentFn: func(ctx context.Context, path string) ([]byte, error) {
return ociLayout, nil
},
})
_, err := il.ValidateRepo(testImage)
So(err, ShouldNotBeNil)
})
Convey("Test GetRepositories", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
walkFn: func(ctx context.Context, path string, f storageDriver.WalkFn) error {
return f(new(FileInfoMock))
},
})
repos, err := il.GetRepositories()
So(repos, ShouldBeEmpty)
So(err, ShouldBeNil)
})
Convey("Test DeleteImageManifest", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
getContentFn: func(ctx context.Context, path string) ([]byte, error) {
return []byte{}, errS3
},
})
err := il.DeleteImageManifest(testImage, "1.0")
So(err, ShouldNotBeNil)
})
Convey("Test DeleteImageManifest2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{})
err := il.DeleteImageManifest(testImage, "1.0")
So(err, ShouldNotBeNil)
})
Convey("Test NewBlobUpload", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
putContentFn: func(ctx context.Context, path string, content []byte) error {
return errS3
},
})
_, err := il.NewBlobUpload(testImage)
So(err, ShouldNotBeNil)
})
Convey("Test GetBlobUpload", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
statFn: func(ctx context.Context, path string) (storageDriver.FileInfo, error) {
return &FileInfoMock{}, errS3
},
})
_, err := il.GetBlobUpload(testImage, "uuid")
So(err, ShouldNotBeNil)
})
Convey("Test PutBlobChunkStreamed", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
return &FileWriterMock{}, errS3
},
})
_, err := il.PutBlobChunkStreamed(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")))
So(err, ShouldNotBeNil)
})
Convey("Test PutBlobChunkStreamed2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
return &FileWriterMock{writeFn: func(b []byte) (int, error) {
return 0, errS3
}}, nil
},
})
_, err := il.PutBlobChunkStreamed(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")))
So(err, ShouldNotBeNil)
})
Convey("Test PutBlobChunk", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
return &FileWriterMock{}, errS3
},
})
_, err := il.PutBlobChunk(testImage, "uuid", 0, 100, ioutil.NopCloser(strings.NewReader("")))
So(err, ShouldNotBeNil)
})
Convey("Test PutBlobChunk2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
return &FileWriterMock{
writeFn: func(b []byte) (int, error) {
return 0, errS3
},
cancelFn: func() error {
return errS3
},
}, nil
},
})
_, err := il.PutBlobChunk(testImage, "uuid", 0, 100, ioutil.NopCloser(strings.NewReader("")))
So(err, ShouldNotBeNil)
})
Convey("Test PutBlobChunk3", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
return &FileWriterMock{
writeFn: func(b []byte) (int, error) {
return 0, errS3
},
}, nil
},
})
_, err := il.PutBlobChunk(testImage, "uuid", 12, 100, ioutil.NopCloser(strings.NewReader("")))
So(err, ShouldNotBeNil)
})
Convey("Test FinishBlobUpload", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
return &FileWriterMock{
commitFn: func() error {
return errS3
},
}, nil
},
})
d := godigest.FromBytes([]byte("test"))
err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FinishBlobUpload2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
return &FileWriterMock{
closeFn: func() error {
return errS3
},
}, nil
},
})
d := godigest.FromBytes([]byte("test"))
err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FinishBlobUpload3", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
readerFn: func(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
return nil, errS3
},
})
d := godigest.FromBytes([]byte("test"))
err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FinishBlobUpload4", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
moveFn: func(ctx context.Context, sourcePath, destPath string) error {
return errS3
},
})
d := godigest.FromBytes([]byte(""))
err := il.FinishBlobUpload(testImage, "uuid", ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FullBlobUpload", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
writerFn: func(ctx context.Context, path string, append bool) (storageDriver.FileWriter, error) {
return &FileWriterMock{}, errS3
},
})
d := godigest.FromBytes([]byte(""))
_, _, err := il.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FullBlobUpload2", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{})
d := godigest.FromBytes([]byte(" "))
_, _, err := il.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test FullBlobUpload3", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
moveFn: func(ctx context.Context, sourcePath, destPath string) error {
return errS3
},
})
d := godigest.FromBytes([]byte(""))
_, _, err := il.FullBlobUpload(testImage, ioutil.NopCloser(strings.NewReader("")), d.String())
So(err, ShouldNotBeNil)
})
Convey("Test GetBlob", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
readerFn: func(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("")), errS3
},
})
d := godigest.FromBytes([]byte(""))
_, _, err := il.GetBlob(testImage, d.String(), "")
So(err, ShouldNotBeNil)
})
Convey("Test DeleteBlob", t, func(c C) {
il = createMockStorage(testDir, &StorageDriverMock{
deleteFn: func(ctx context.Context, path string) error {
return errS3
},
})
d := godigest.FromBytes([]byte(""))
err := il.DeleteBlob(testImage, d.String())
So(err, ShouldNotBeNil)
})
}

1104
pkg/storage/s3/storage.go Normal file

File diff suppressed because it is too large Load diff

View file

@ -6,6 +6,10 @@ import (
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
) )
const (
S3StorageDriverName = "s3"
)
type ImageStore interface { type ImageStore interface {
DirExists(d string) bool DirExists(d string) bool
RootDir() string RootDir() string

View file

@ -30,7 +30,7 @@ import (
const ( const (
// BlobUploadDir defines the upload directory for blob uploads. // BlobUploadDir defines the upload directory for blob uploads.
BlobUploadDir = ".uploads" BlobUploadDir = ".uploads"
schemaVersion = 2 SchemaVersion = 2
gcDelay = 1 * time.Hour gcDelay = 1 * time.Hour
) )
@ -463,7 +463,7 @@ func (is *ImageStoreFS) PutImageManifest(repo string, reference string, mediaTyp
return "", errors.ErrBadManifest return "", errors.ErrBadManifest
} }
if m.SchemaVersion != schemaVersion { if m.SchemaVersion != SchemaVersion {
is.log.Error().Int("SchemaVersion", m.SchemaVersion).Msg("invalid manifest") is.log.Error().Int("SchemaVersion", m.SchemaVersion).Msg("invalid manifest")
return "", errors.ErrBadManifest return "", errors.ErrBadManifest
} }

View file

@ -0,0 +1,749 @@
package storage_test
import (
"bytes"
_ "crypto/sha256"
"encoding/json"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path"
"strings"
"testing"
"time"
"github.com/anuvu/zot/errors"
"github.com/anuvu/zot/pkg/extensions/monitoring"
"github.com/anuvu/zot/pkg/log"
"github.com/anuvu/zot/pkg/storage"
godigest "github.com/opencontainers/go-digest"
ispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/rs/zerolog"
. "github.com/smartystreets/goconvey/convey"
)
func TestStorageFSAPIs(t *testing.T) {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
Convey("Repo layout", t, func(c C) {
repoName := "test"
Convey("Bad image manifest", func() {
v, err := il.NewBlobUpload("test")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
content := []byte("test-data1")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err := il.PutBlobChunk(repoName, v, 0, int64(l), buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
err = il.FinishBlobUpload("test", v, buf, d.String())
So(err, ShouldBeNil)
annotationsMap := make(map[string]string)
annotationsMap[ispec.AnnotationRefName] = "1.0"
m := ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
},
},
Annotations: annotationsMap,
}
m.SchemaVersion = 2
mb, _ := json.Marshal(m)
d = godigest.FromBytes(mb)
err = os.Chmod(path.Join(il.RootDir(), repoName, "index.json"), 0000)
if err != nil {
panic(err)
}
_, err = il.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, mb)
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(il.RootDir(), repoName, "index.json"), 0755)
if err != nil {
panic(err)
}
_, err = il.PutImageManifest(repoName, "1.0", ispec.MediaTypeImageManifest, mb)
So(err, ShouldBeNil)
manifestPath := path.Join(il.RootDir(), repoName, "blobs", d.Algorithm().String(), d.Encoded())
err = os.Chmod(manifestPath, 0000)
if err != nil {
panic(err)
}
_, _, _, err = il.GetImageManifest(repoName, d.String())
So(err, ShouldNotBeNil)
err = os.Remove(manifestPath)
if err != nil {
panic(err)
}
_, _, _, err = il.GetImageManifest(repoName, d.String())
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(il.RootDir(), repoName), 0000)
if err != nil {
panic(err)
}
_, err = il.PutImageManifest(repoName, "2.0", ispec.MediaTypeImageManifest, mb)
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(il.RootDir(), repoName), 0755)
if err != nil {
panic(err)
}
// invalid DeleteImageManifest
indexPath := path.Join(il.RootDir(), repoName, "index.json")
err = os.Chmod(indexPath, 0000)
if err != nil {
panic(err)
}
err = il.DeleteImageManifest(repoName, d.String())
So(err, ShouldNotBeNil)
err = os.RemoveAll(path.Join(il.RootDir(), repoName))
if err != nil {
panic(err)
}
})
})
}
func TestDedupeLinks(t *testing.T) {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
Convey("Dedupe", t, func(c C) {
blobDigest1 := ""
blobDigest2 := ""
// manifest1
v, err := il.NewBlobUpload("dedupe1")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
content := []byte("test-data3")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err := il.PutBlobChunkStreamed("dedupe1", v, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
blobDigest1 = strings.Split(d.String(), ":")[1]
So(blobDigest1, ShouldNotBeEmpty)
err = il.FinishBlobUpload("dedupe1", v, buf, d.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
_, _, err = il.CheckBlob("dedupe1", d.String())
So(err, ShouldBeNil)
_, _, err = il.GetBlob("dedupe1", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
m := ispec.Manifest{}
m.SchemaVersion = 2
m = ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
},
},
}
m.SchemaVersion = 2
mb, _ := json.Marshal(m)
d = godigest.FromBytes(mb)
_, err = il.PutImageManifest("dedupe1", d.String(), ispec.MediaTypeImageManifest, mb)
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("dedupe1", d.String())
So(err, ShouldBeNil)
// manifest2
v, err = il.NewBlobUpload("dedupe2")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
content = []byte("test-data3")
buf = bytes.NewBuffer(content)
l = buf.Len()
d = godigest.FromBytes(content)
b, err = il.PutBlobChunkStreamed("dedupe2", v, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
blobDigest2 = strings.Split(d.String(), ":")[1]
So(blobDigest2, ShouldNotBeEmpty)
err = il.FinishBlobUpload("dedupe2", v, buf, d.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
_, _, err = il.CheckBlob("dedupe2", d.String())
So(err, ShouldBeNil)
_, _, err = il.GetBlob("dedupe2", d.String(), "application/vnd.oci.image.layer.v1.tar+gzip")
So(err, ShouldBeNil)
m = ispec.Manifest{}
m.SchemaVersion = 2
m = ispec.Manifest{
Config: ispec.Descriptor{
Digest: d,
Size: int64(l),
},
Layers: []ispec.Descriptor{
{
MediaType: "application/vnd.oci.image.layer.v1.tar",
Digest: d,
Size: int64(l),
},
},
}
m.SchemaVersion = 2
mb, _ = json.Marshal(m)
d = godigest.FromBytes(mb)
_, err = il.PutImageManifest("dedupe2", "1.0", ispec.MediaTypeImageManifest, mb)
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("dedupe2", d.String())
So(err, ShouldBeNil)
// verify that dedupe with hard links happened
fi1, err := os.Stat(path.Join(dir, "dedupe2", "blobs", "sha256", blobDigest1))
So(err, ShouldBeNil)
fi2, err := os.Stat(path.Join(dir, "dedupe2", "blobs", "sha256", blobDigest2))
So(err, ShouldBeNil)
So(os.SameFile(fi1, fi2), ShouldBeTrue)
})
}
func TestDedupe(t *testing.T) {
Convey("Dedupe", t, func(c C) {
Convey("Nil ImageStore", func() {
var is storage.ImageStore
So(func() { _ = is.DedupeBlob("", "", "") }, ShouldPanic)
})
Convey("Valid ImageStore", func() {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
So(il.DedupeBlob("", "", ""), ShouldNotBeNil)
})
})
}
// nolint: gocyclo
func TestNegativeCases(t *testing.T) {
Convey("Invalid root dir", t, func(c C) {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
So(storage.NewImageStore(dir, true, true, log, metrics), ShouldNotBeNil)
if os.Geteuid() != 0 {
So(storage.NewImageStore("/deadBEEF", true, true, log, metrics), ShouldBeNil)
}
})
Convey("Invalid init repo", t, func(c C) {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
err = os.Chmod(dir, 0000) // remove all perms
if err != nil {
panic(err)
}
if os.Geteuid() != 0 {
err = il.InitRepo("test")
So(err, ShouldNotBeNil)
}
err = os.Chmod(dir, 0755)
if err != nil {
panic(err)
}
// Init repo should fail if repo is a file.
err = ioutil.WriteFile(path.Join(dir, "file-test"), []byte("this is test file"), 0755) // nolint:gosec
So(err, ShouldBeNil)
err = il.InitRepo("file-test")
So(err, ShouldNotBeNil)
err = os.Mkdir(path.Join(dir, "test-dir"), 0755)
So(err, ShouldBeNil)
err = il.InitRepo("test-dir")
So(err, ShouldBeNil)
})
Convey("Invalid validate repo", t, func(c C) {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
err = os.MkdirAll(path.Join(dir, "invalid-test"), 0755)
So(err, ShouldBeNil)
err = os.Chmod(path.Join(dir, "invalid-test"), 0000) // remove all perms
if err != nil {
panic(err)
}
_, err = il.ValidateRepo("invalid-test")
So(err, ShouldNotBeNil)
So(err, ShouldEqual, errors.ErrRepoNotFound)
err = os.Chmod(path.Join(dir, "invalid-test"), 0755) // remove all perms
if err != nil {
panic(err)
}
err = ioutil.WriteFile(path.Join(dir, "invalid-test", "blobs"), []byte{}, 0755) // nolint: gosec
if err != nil {
panic(err)
}
err = ioutil.WriteFile(path.Join(dir, "invalid-test", "index.json"), []byte{}, 0755) // nolint: gosec
if err != nil {
panic(err)
}
err = ioutil.WriteFile(path.Join(dir, "invalid-test", ispec.ImageLayoutFile), []byte{}, 0755) // nolint: gosec
if err != nil {
panic(err)
}
isValid, err := il.ValidateRepo("invalid-test")
So(err, ShouldBeNil)
So(isValid, ShouldEqual, false)
err = os.Remove(path.Join(dir, "invalid-test", "blobs"))
if err != nil {
panic(err)
}
err = os.Mkdir(path.Join(dir, "invalid-test", "blobs"), 0755)
if err != nil {
panic(err)
}
isValid, err = il.ValidateRepo("invalid-test")
So(err, ShouldNotBeNil)
So(isValid, ShouldEqual, false)
err = ioutil.WriteFile(path.Join(dir, "invalid-test", ispec.ImageLayoutFile), []byte("{}"), 0755) // nolint: gosec
if err != nil {
panic(err)
}
isValid, err = il.ValidateRepo("invalid-test")
So(err, ShouldNotBeNil)
So(err, ShouldEqual, errors.ErrRepoBadVersion)
So(isValid, ShouldEqual, false)
files, err := ioutil.ReadDir(path.Join(dir, "test"))
if err != nil {
panic(err)
}
for _, f := range files {
os.Remove(path.Join(dir, "test", f.Name()))
}
_, err = il.ValidateRepo("test")
So(err, ShouldNotBeNil)
err = os.RemoveAll(path.Join(dir, "test"))
if err != nil {
panic(err)
}
_, err = il.ValidateRepo("test")
So(err, ShouldNotBeNil)
err = os.Chmod(dir, 0000) // remove all perms
if err != nil {
panic(err)
}
if os.Geteuid() != 0 {
So(func() { _, _ = il.ValidateRepo("test") }, ShouldPanic)
}
err = os.Chmod(dir, 0755) // remove all perms
if err != nil {
panic(err)
}
err = os.RemoveAll(dir)
if err != nil {
panic(err)
}
_, err = il.GetRepositories()
So(err, ShouldNotBeNil)
})
Convey("Invalid get image tags", t, func(c C) {
var ilfs storage.ImageStoreFS
_, err := ilfs.GetImageTags("test")
So(err, ShouldNotBeNil)
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil)
_, err = il.GetImageTags("test")
So(err, ShouldNotBeNil)
So(os.RemoveAll(path.Join(dir, "test")), ShouldBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0600), ShouldBeNil)
_, err = il.GetImageTags("test")
So(err, ShouldNotBeNil)
})
Convey("Invalid get image manifest", t, func(c C) {
var ilfs storage.ImageStoreFS
_, _, _, err := ilfs.GetImageManifest("test", "")
So(err, ShouldNotBeNil)
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
err = os.Chmod(path.Join(dir, "test", "index.json"), 0000)
if err != nil {
panic(err)
}
_, _, _, err = il.GetImageManifest("test", "")
So(err, ShouldNotBeNil)
err = os.Remove(path.Join(dir, "test", "index.json"))
if err != nil {
panic(err)
}
_, _, _, err = il.GetImageManifest("test", "")
So(err, ShouldNotBeNil)
err = os.RemoveAll(path.Join(dir, "test"))
if err != nil {
panic(err)
}
So(il.InitRepo("test"), ShouldBeNil)
err = ioutil.WriteFile(path.Join(dir, "test", "index.json"), []byte{}, 0600)
if err != nil {
panic(err)
}
_, _, _, err = il.GetImageManifest("test", "")
So(err, ShouldNotBeNil)
})
Convey("Invalid new blob upload", t, func(c C) {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
err = os.Chmod(path.Join(dir, "test", ".uploads"), 0000)
if err != nil {
panic(err)
}
_, err = il.NewBlobUpload("test")
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(dir, "test"), 0000)
if err != nil {
panic(err)
}
_, err = il.NewBlobUpload("test")
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(dir, "test"), 0755)
if err != nil {
panic(err)
}
So(il.InitRepo("test"), ShouldBeNil)
_, err = il.NewBlobUpload("test")
So(err, ShouldNotBeNil)
err = os.Chmod(path.Join(dir, "test", ".uploads"), 0755)
if err != nil {
panic(err)
}
v, err := il.NewBlobUpload("test")
So(err, ShouldBeNil)
err = os.Chmod(path.Join(dir, "test", ".uploads"), 0000)
if err != nil {
panic(err)
}
content := []byte("test-data3")
buf := bytes.NewBuffer(content)
l := buf.Len()
_, err = il.PutBlobChunkStreamed("test", v, buf)
So(err, ShouldNotBeNil)
_, err = il.PutBlobChunk("test", v, 0, int64(l), buf)
So(err, ShouldNotBeNil)
})
Convey("Invalid dedupe sceanrios", t, func() {
dir, err := ioutil.TempDir("", "oci-repo-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
log := log.Logger{Logger: zerolog.New(os.Stdout)}
metrics := monitoring.NewMetricsServer(false, log)
il := storage.NewImageStore(dir, true, true, log, metrics)
v, err := il.NewBlobUpload("dedupe1")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
content := []byte("test-data3")
buf := bytes.NewBuffer(content)
l := buf.Len()
d := godigest.FromBytes(content)
b, err := il.PutBlobChunkStreamed("dedupe1", v, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
blobDigest1 := strings.Split(d.String(), ":")[1]
So(blobDigest1, ShouldNotBeEmpty)
err = il.FinishBlobUpload("dedupe1", v, buf, d.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
// Create a file at the same place where FinishBlobUpload will create
err = il.InitRepo("dedupe2")
So(err, ShouldBeNil)
err = os.MkdirAll(path.Join(dir, "dedupe2", "blobs/sha256"), 0755)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1), content, 0755) // nolint: gosec
if err != nil {
panic(err)
}
v, err = il.NewBlobUpload("dedupe2")
So(err, ShouldBeNil)
So(v, ShouldNotBeEmpty)
content = []byte("test-data3")
buf = bytes.NewBuffer(content)
l = buf.Len()
d = godigest.FromBytes(content)
b, err = il.PutBlobChunkStreamed("dedupe2", v, buf)
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
cmd := exec.Command("sudo", "chattr", "+i", path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1)) // nolint: gosec
_, err = cmd.Output()
if err != nil {
panic(err)
}
err = il.FinishBlobUpload("dedupe2", v, buf, d.String())
So(err, ShouldNotBeNil)
So(b, ShouldEqual, l)
cmd = exec.Command("sudo", "chattr", "-i", path.Join(dir, "dedupe2", "blobs/sha256", blobDigest1)) // nolint: gosec
_, err = cmd.Output()
if err != nil {
panic(err)
}
err = il.FinishBlobUpload("dedupe2", v, buf, d.String())
So(err, ShouldBeNil)
So(b, ShouldEqual, l)
})
}
func TestHardLink(t *testing.T) {
Convey("Test that ValidateHardLink creates rootDir if it does not exist", t, func() {
var randomDir string
rand.Seed(time.Now().UnixNano())
for {
randomLen := rand.Intn(100)
randomDir = "/tmp/" + randSeq(randomLen)
if _, err := os.Stat(randomDir); os.IsNotExist(err) {
break
}
}
defer os.RemoveAll(randomDir)
err := storage.ValidateHardLink(randomDir)
So(err, ShouldBeNil)
})
Convey("Test that ValidateHardLink returns error if rootDir is a file", t, func() {
dir, err := ioutil.TempDir("", "storage-hard-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
filePath := path.Join(dir, "file.txt")
err = ioutil.WriteFile(filePath, []byte("some dummy file content"), 0644) //nolint: gosec
if err != nil {
panic(err)
}
err = storage.ValidateHardLink(filePath)
So(err, ShouldNotBeNil)
})
Convey("Test if filesystem supports hardlink", t, func() {
dir, err := ioutil.TempDir("", "storage-hard-test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
err = storage.ValidateHardLink(dir)
So(err, ShouldBeNil)
err = ioutil.WriteFile(path.Join(dir, "hardtest.txt"), []byte("testing hard link code"), 0644) //nolint: gosec
if err != nil {
panic(err)
}
err = os.Chmod(dir, 0400)
if err != nil {
panic(err)
}
err = os.Link(path.Join(dir, "hardtest.txt"), path.Join(dir, "duphardtest.txt"))
So(err, ShouldNotBeNil)
err = os.Chmod(dir, 0644)
if err != nil {
panic(err)
}
})
}
func randSeq(n int) string {
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}

File diff suppressed because it is too large Load diff