0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-16 21:56:37 -05:00

Merge pull request #83 from rchincha/compl

conformance: align with upstream conformance tests
This commit is contained in:
Tycho Andersen 2020-04-16 17:41:51 -06:00 committed by GitHub
commit b2338b4819
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 131 additions and 54 deletions

View file

@ -0,0 +1,15 @@
{
"version":"0.1.0-dev",
"storage":{
"rootDirectory":"/tmp/zot",
"gc": false,
"dedupe": false
},
"http": {
"address":"127.0.0.1",
"port":"8080"
},
"log":{
"level":"debug"
}
}

View file

@ -12,6 +12,8 @@ var Commit string
type StorageConfig struct {
RootDirectory string
GC bool
Dedupe bool
}
type TLSConfig struct {
@ -77,6 +79,7 @@ func NewConfig() *Config {
return &Config{
Version: dspec.Version,
Commit: Commit,
Storage: StorageConfig{GC: true, Dedupe: true},
HTTP: HTTPConfig{Address: "127.0.0.1", Port: "8080"},
Log: &LogConfig{Level: "debug"},
}

View file

@ -42,7 +42,8 @@ func (c *Controller) Run() error {
engine.Use(log.SessionLogger(c.Log), handlers.RecoveryHandler(handlers.RecoveryLogger(c.Log),
handlers.PrintRecoveryStack(false)))
c.ImageStore = storage.NewImageStore(c.Config.Storage.RootDirectory, c.Log)
c.ImageStore = storage.NewImageStore(c.Config.Storage.RootDirectory, c.Config.Storage.GC,
c.Config.Storage.Dedupe, c.Log)
if c.ImageStore == nil {
// we can't proceed without at least a image store
os.Exit(1)

View file

@ -222,7 +222,12 @@ func (rh *RouteHandler) ListTags(w http.ResponseWriter, r *http.Request) {
pTags.Tags = tags[i+1 : i+1+n]
}
last = pTags.Tags[len(pTags.Tags)-1]
if len(pTags.Tags) == 0 {
last = ""
} else {
last = pTags.Tags[len(pTags.Tags)-1]
}
w.Header().Set("Link", fmt.Sprintf("/v2/%s/tags/list?n=%d&last=%s; rel=\"next\"", name, n, last))
WriteJSON(w, http.StatusOK, pTags)

View file

@ -565,7 +565,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
// delete manifest by digest
resp, err = resty.R().Delete(baseURL + "/v2/repo7/manifests/" + digest.String())
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 202)
So(resp.StatusCode(), ShouldEqual, 404)
// delete again should fail
resp, err = resty.R().Delete(baseURL + "/v2/repo7/manifests/" + digest.String())
So(err, ShouldBeNil)
@ -656,6 +656,10 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
resp, err = resty.R().Get(baseURL + "/v2/page0/tags/list?n=0")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
resp, err = resty.R().Get(baseURL + "/v2/page0/tags/list?n=3")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 200)

View file

@ -154,6 +154,17 @@ func (c *Cache) DeleteBlob(digest string, path string) error {
return err
}
cur := b.Cursor()
k, _ := cur.First()
if k == nil {
c.log.Debug().Str("digest", digest).Str("path", path).Msg("deleting empty bucket")
if err := root.DeleteBucket([]byte(digest)); err != nil {
c.log.Error().Err(err).Str("digest", digest).Str("path", path).Msg("unable to delete")
return err
}
}
return nil
}); err != nil {
return err

View file

@ -38,11 +38,13 @@ type ImageStore struct {
lock *sync.RWMutex
blobUploads map[string]BlobUpload
cache *Cache
gc bool
dedupe bool
log zerolog.Logger
}
// NewImageStore returns a new image store backed by a file storage.
func NewImageStore(rootDir string, log zlog.Logger) *ImageStore {
func NewImageStore(rootDir string, gc bool, dedupe bool, log zlog.Logger) *ImageStore {
if _, err := os.Stat(rootDir); os.IsNotExist(err) {
if err := os.MkdirAll(rootDir, 0700); err != nil {
log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir")
@ -54,10 +56,15 @@ func NewImageStore(rootDir string, log zlog.Logger) *ImageStore {
rootDir: rootDir,
lock: &sync.RWMutex{},
blobUploads: make(map[string]BlobUpload),
cache: NewCache(rootDir, "cache", log),
gc: gc,
dedupe: dedupe,
log: log.With().Caller().Logger(),
}
if dedupe {
is.cache = NewCache(rootDir, "cache", log)
}
return is
}
@ -467,14 +474,16 @@ func (is *ImageStore) PutImageManifest(repo string, reference string, mediaType
return "", err
}
oci, err := umoci.OpenLayout(dir)
if err != nil {
return "", err
}
defer oci.Close()
if is.gc {
oci, err := umoci.OpenLayout(dir)
if err != nil {
return "", err
}
defer oci.Close()
if err := oci.GC(context.Background()); err != nil {
return "", err
if err := oci.GC(context.Background()); err != nil {
return "", err
}
}
return desc.Digest.String(), nil
@ -488,7 +497,7 @@ func (is *ImageStore) DeleteImageManifest(repo string, reference string) error {
}
// as per spec "reference" can only be a digest and not a tag
_, err := godigest.Parse(reference)
digest, err := godigest.Parse(reference)
if err != nil {
is.log.Error().Err(err).Msg("invalid reference")
return errors.ErrBadManifest
@ -509,33 +518,29 @@ func (is *ImageStore) DeleteImageManifest(repo string, reference string) error {
found := false
var digest godigest.Digest
var i int
var m ispec.Descriptor
for i, m = range index.Manifests {
if reference == m.Digest.String() {
digest = m.Digest
found = true
// we are deleting, so keep only those manifests that don't match
outIndex := index
outIndex.Manifests = []ispec.Descriptor{}
break
for _, m = range index.Manifests {
if reference == m.Digest.String() {
found = true
continue
}
outIndex.Manifests = append(outIndex.Manifests, m)
}
if !found {
return errors.ErrManifestNotFound
}
// remove the manifest entry, not preserving order
index.Manifests[i] = index.Manifests[len(index.Manifests)-1]
index.Manifests = index.Manifests[:len(index.Manifests)-1]
// now update "index.json"
dir = path.Join(is.rootDir, repo)
file := path.Join(dir, "index.json")
buf, err = json.Marshal(index)
buf, err = json.Marshal(outIndex)
if err != nil {
return err
@ -545,14 +550,16 @@ func (is *ImageStore) DeleteImageManifest(repo string, reference string) error {
return err
}
oci, err := umoci.OpenLayout(dir)
if err != nil {
return err
}
defer oci.Close()
if is.gc {
oci, err := umoci.OpenLayout(dir)
if err != nil {
return err
}
defer oci.Close()
if err := oci.GC(context.Background()); err != nil {
return err
if err := oci.GC(context.Background()); err != nil {
return err
}
}
p := path.Join(dir, "blobs", digest.Algorithm().String(), digest.Encoded())
@ -737,15 +744,21 @@ func (is *ImageStore) FinishBlobUpload(repo string, uuid string, body io.Reader,
ensureDir(dir, is.log)
dst := is.BlobPath(repo, dstDigest)
if is.cache != nil {
if is.dedupe && is.cache != nil {
if err := is.DedupeBlob(src, dstDigest, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to dedupe blob")
return err
}
} else {
if err := os.Rename(src, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to finish blob")
return err
}
}
return err
return nil
}
// FullBlobUpload handles a full blob upload, and no partial session is created
@ -796,57 +809,72 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, digest string)
ensureDir(dir, is.log)
dst := is.BlobPath(repo, dstDigest)
if is.cache != nil {
if is.dedupe && is.cache != nil {
if err := is.DedupeBlob(src, dstDigest, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to dedupe blob")
return "", -1, err
}
} else {
if err := os.Rename(src, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
Str("dst", dst).Msg("unable to finish blob")
return "", -1, err
}
}
return uuid, n, err
return uuid, n, nil
}
// nolint (interfacer)
func (is *ImageStore) DedupeBlob(src string, dstDigest godigest.Digest, dst string) error {
retry:
is.log.Debug().Str("src", src).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: ENTER")
dstRecord, err := is.cache.GetBlob(dstDigest.String())
if err != nil && err != errors.ErrCacheMiss {
is.log.Error().Err(err).Str("blobPath", dst).Msg("unable to lookup blob record")
is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to lookup blob record")
return err
}
if dstRecord == "" {
if err := is.cache.PutBlob(dstDigest.String(), dst); err != nil {
is.log.Error().Err(err).Str("blobPath", dst).Msg("unable to insert blob record")
is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record")
return err
}
// move the blob from uploads to final dest
if err := os.Rename(src, dst); err != nil {
is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("unable to rename blob")
is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("dedupe: unable to rename blob")
return err
}
is.log.Debug().Str("src", src).Str("dst", dst).Msg("dedupe: rename")
} else {
dstRecordFi, err := os.Stat(dstRecord)
if err != nil {
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("unable to stat")
return err
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat")
// the actual blob on disk may have been removed by GC, so sync the cache
if err := is.cache.DeleteBlob(dstDigest.String(), dst); err != nil {
is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: unable to delete blob record")
return err
}
goto retry
}
dstFi, err := os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("unable to stat")
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat")
return err
}
if !os.SameFile(dstFi, dstRecordFi) {
if err := os.Link(dstRecord, dst); err != nil {
is.log.Error().Err(err).Str("blobPath", dst).Str("link", dstRecord).Msg("unable to hard link")
is.log.Error().Err(err).Str("blobPath", dst).Str("link", dstRecord).Msg("dedupe: unable to hard link")
return err
}
}
if err := os.Remove(src); err != nil {
is.log.Error().Err(err).Str("src", src).Msg("uname to remove blob")
is.log.Error().Err(err).Str("src", src).Msg("dedupe: uname to remove blob")
return err
}
is.log.Debug().Str("src", src).Msg("dedupe: remove")
}
return nil

View file

@ -27,7 +27,7 @@ func TestAPIs(t *testing.T) {
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
Convey("Repo layout", t, func(c C) {
repoName := "test"
@ -113,6 +113,12 @@ func TestAPIs(t *testing.T) {
mb, _ := json.Marshal(m)
Convey("Bad image manifest", func() {
_, err = il.PutImageManifest("test", d.String(), "application/json", mb)
So(err, ShouldNotBeNil)
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, []byte{})
So(err, ShouldNotBeNil)
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb)
So(err, ShouldNotBeNil)
@ -133,6 +139,7 @@ func TestAPIs(t *testing.T) {
Size: int64(l),
},
},
Annotations: map[string]string{ispec.AnnotationRefName: "1.0"},
}
m.SchemaVersion = 2
mb, _ = json.Marshal(m)
@ -140,6 +147,9 @@ func TestAPIs(t *testing.T) {
_, err = il.PutImageManifest("test", d.String(), ispec.MediaTypeImageManifest, mb)
So(err, ShouldBeNil)
_, err = il.GetImageTags("test")
So(err, ShouldBeNil)
_, _, _, err = il.GetImageManifest("test", d.String())
So(err, ShouldBeNil)
@ -394,7 +404,7 @@ func TestDedupe(t *testing.T) {
}
defer os.RemoveAll(dir)
is := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
is := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
So(is.DedupeBlob("", "", ""), ShouldNotBeNil)
})
@ -409,8 +419,8 @@ func TestNegativeCases(t *testing.T) {
}
os.RemoveAll(dir)
So(storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldNotBeNil)
So(storage.NewImageStore("/deadBEEF", log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldBeNil)
So(storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldNotBeNil)
So(storage.NewImageStore("/deadBEEF", true, true, log.Logger{Logger: zerolog.New(os.Stdout)}), ShouldBeNil)
})
Convey("Invalid init repo", t, func(c C) {
@ -419,7 +429,7 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
err = os.Chmod(dir, 0000) // remove all perms
So(err, ShouldBeNil)
So(func() { _ = il.InitRepo("test") }, ShouldPanic)
@ -431,7 +441,7 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
defer os.RemoveAll(dir)
il := storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
il := storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
files, err := ioutil.ReadDir(path.Join(dir, "test"))
@ -462,7 +472,7 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
defer os.RemoveAll(dir)
il = storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
il = storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil)
@ -485,7 +495,7 @@ func TestNegativeCases(t *testing.T) {
panic(err)
}
defer os.RemoveAll(dir)
il = storage.NewImageStore(dir, log.Logger{Logger: zerolog.New(os.Stdout)})
il = storage.NewImageStore(dir, true, true, log.Logger{Logger: zerolog.New(os.Stdout)})
So(il, ShouldNotBeNil)
So(il.InitRepo("test"), ShouldBeNil)
So(os.Remove(path.Join(dir, "test", "index.json")), ShouldBeNil)