0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2025-03-11 02:17:43 -05:00

compliance: be compliant with dist-spec compliance tests

dist-spec compliance tests are now becoming a part of dist-spec repo
itself - we want to be compliant

pkg/api/regex.go:
	* revert uppercasing in repository names

pkg/api/routes.go:
	* ListTags() should support the URL params 'n' and 'last'
	  for pagination

	* s/uuid/session_id/g to use the dist-spec's naming

	* Fix off-by-one error in GetBlobUpload()'s http response "Range" header

	* DeleteManifest() success status code is 202

	* Fix PatchBlobUpload() to account for "streamed" use case
	  where neither "Content-Length" nor "Content-Range" headers are set

pkg/storage/storage.go:
	* Add a "streamed" version of PutBlobChunk() called PutBlobChunkStreamed()

pkg/compliance/v1_0_0/check.go:
	* fix unit tests to account for changed response status codes
This commit is contained in:
Ramkumar Chinchani 2020-01-09 16:31:34 -08:00
parent 92d8f7c866
commit 964af6ba51
5 changed files with 295 additions and 97 deletions

View file

@ -4,7 +4,7 @@
"rootDirectory":"/tmp/zot"
},
"http": {
"address":"0.0.0.0",
"address":"127.0.0.1",
"port":"8080"
},
"log":{

View file

@ -6,7 +6,7 @@ import "regexp"
var (
// alphaNumericRegexp defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumericRegexp = match(`[a-zA-Z0-9]+`)
alphaNumericRegexp = match(`[a-z0-9]+`)
// separatorRegexp defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple

View file

@ -17,6 +17,7 @@ import (
"io/ioutil"
"net/http"
"path"
"sort"
"strconv"
"strings"
@ -35,6 +36,7 @@ const (
DistContentDigestKey = "Docker-Content-Digest"
BlobUploadUUID = "Blob-Upload-UUID"
DefaultMediaType = "application/json"
BinaryMediaType = "application/octet-stream"
)
type RouteHandler struct {
@ -70,13 +72,13 @@ func (rh *RouteHandler) SetupRoutes() {
rh.DeleteBlob).Methods("DELETE")
g.HandleFunc(fmt.Sprintf("/{name:%s}/blobs/uploads/", NameRegexp.String()),
rh.CreateBlobUpload).Methods("POST")
g.HandleFunc(fmt.Sprintf("/{name:%s}/blobs/uploads/{uuid}", NameRegexp.String()),
g.HandleFunc(fmt.Sprintf("/{name:%s}/blobs/uploads/{session_id}", NameRegexp.String()),
rh.GetBlobUpload).Methods("GET")
g.HandleFunc(fmt.Sprintf("/{name:%s}/blobs/uploads/{uuid}", NameRegexp.String()),
g.HandleFunc(fmt.Sprintf("/{name:%s}/blobs/uploads/{session_id}", NameRegexp.String()),
rh.PatchBlobUpload).Methods("PATCH")
g.HandleFunc(fmt.Sprintf("/{name:%s}/blobs/uploads/{uuid}", NameRegexp.String()),
g.HandleFunc(fmt.Sprintf("/{name:%s}/blobs/uploads/{session_id}", NameRegexp.String()),
rh.UpdateBlobUpload).Methods("PUT")
g.HandleFunc(fmt.Sprintf("/{name:%s}/blobs/uploads/{uuid}", NameRegexp.String()),
g.HandleFunc(fmt.Sprintf("/{name:%s}/blobs/uploads/{session_id}", NameRegexp.String()),
rh.DeleteBlobUpload).Methods("DELETE")
g.HandleFunc("/_catalog",
rh.ListRepositories).Methods("GET")
@ -113,8 +115,11 @@ type ImageTags struct {
// @Accept json
// @Produce json
// @Param name path string true "test"
// @Param n query integer true "limit entries for pagination"
// @Param last query string true "last tag value for pagination"
// @Success 200 {object} api.ImageTags
// @Failure 404 {string} string "not found"
// @Failure 400 {string} string "bad request"
func (rh *RouteHandler) ListTags(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name, ok := vars["name"]
@ -124,12 +129,86 @@ func (rh *RouteHandler) ListTags(w http.ResponseWriter, r *http.Request) {
return
}
paginate := false
n := -1
var err error
nQuery, ok := r.URL.Query()["n"]
if ok {
if len(nQuery) != 1 {
w.WriteHeader(http.StatusBadRequest)
return
}
var n1 int64
if n1, err = strconv.ParseInt(nQuery[0], 10, 0); err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
n = int(n1)
paginate = true
}
last := ""
lastQuery, ok := r.URL.Query()["last"]
if ok {
if len(lastQuery) != 1 {
w.WriteHeader(http.StatusBadRequest)
return
}
last = lastQuery[0]
}
tags, err := rh.c.ImageStore.GetImageTags(name)
if err != nil {
WriteJSON(w, http.StatusNotFound, NewError(NAME_UNKNOWN, map[string]string{"name": name}))
return
}
if paginate && (n < len(tags)) {
sort.Strings(tags)
pTags := ImageTags{Name: name}
if last == "" {
// first
pTags.Tags = tags[:n]
} else {
// next
i := -1
tag := ""
found := false
for i, tag = range tags {
if tag == last {
found = true
break
}
}
if !found {
w.WriteHeader(http.StatusNotFound)
return
}
if n >= len(tags)-i {
pTags.Tags = tags[i+1:]
WriteJSON(w, http.StatusOK, pTags)
return
}
pTags.Tags = tags[i+1 : i+1+n]
}
last = pTags.Tags[len(pTags.Tags)-1]
w.Header().Set("Link", fmt.Sprintf("/v2/%s/tags/list?n=%d&last=%s; rel=\"next\"", name, n, last))
WriteJSON(w, http.StatusOK, pTags)
return
}
WriteJSON(w, http.StatusOK, ImageTags{Name: name, Tags: tags})
}
@ -336,7 +415,7 @@ func (rh *RouteHandler) DeleteManifest(w http.ResponseWriter, r *http.Request) {
return
}
w.WriteHeader(http.StatusOK)
w.WriteHeader(http.StatusAccepted)
}
// CheckBlob godoc
@ -494,7 +573,7 @@ func (rh *RouteHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) {
// @Produce json
// @Param name path string true "repository name"
// @Success 202 {string} string "accepted"
// @Header 202 {string} Location "/v2/{name}/blobs/uploads/{uuid}"
// @Header 202 {string} Location "/v2/{name}/blobs/uploads/{session_id}"
// @Header 202 {string} Range "bytes=0-0"
// @Failure 404 {string} string "not found"
// @Failure 500 {string} string "internal server error"
@ -540,17 +619,17 @@ func (rh *RouteHandler) CreateBlobUpload(w http.ResponseWriter, r *http.Request)
// GetBlobUpload godoc
// @Summary Get image blob/layer upload
// @Description Get an image's blob/layer upload given a uuid
// @Description Get an image's blob/layer upload given a session_id
// @Accept json
// @Produce json
// @Param name path string true "repository name"
// @Param uuid path string true "upload uuid"
// @Param session_id path string true "upload session_id"
// @Success 204 {string} string "no content"
// @Header 202 {string} Location "/v2/{name}/blobs/uploads/{uuid}"
// @Header 202 {string} Location "/v2/{name}/blobs/uploads/{session_id}"
// @Header 202 {string} Range "bytes=0-128"
// @Failure 404 {string} string "not found"
// @Failure 500 {string} string "internal server error"
// @Router /v2/{name}/blobs/uploads/{uuid} [get]
// @Router /v2/{name}/blobs/uploads/{session_id} [get]
func (rh *RouteHandler) GetBlobUpload(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name, ok := vars["name"]
@ -560,23 +639,23 @@ func (rh *RouteHandler) GetBlobUpload(w http.ResponseWriter, r *http.Request) {
return
}
uuid, ok := vars["uuid"]
if !ok || uuid == "" {
sessionID, ok := vars["session_id"]
if !ok || sessionID == "" {
w.WriteHeader(http.StatusNotFound)
return
}
size, err := rh.c.ImageStore.GetBlobUpload(name, uuid)
size, err := rh.c.ImageStore.GetBlobUpload(name, sessionID)
if err != nil {
switch err {
case errors.ErrBadUploadRange:
WriteJSON(w, http.StatusBadRequest, NewError(BLOB_UPLOAD_INVALID, map[string]string{"uuid": uuid}))
WriteJSON(w, http.StatusBadRequest, NewError(BLOB_UPLOAD_INVALID, map[string]string{"session_id": sessionID}))
case errors.ErrBadBlobDigest:
WriteJSON(w, http.StatusBadRequest, NewError(BLOB_UPLOAD_INVALID, map[string]string{"uuid": uuid}))
WriteJSON(w, http.StatusBadRequest, NewError(BLOB_UPLOAD_INVALID, map[string]string{"session_id": sessionID}))
case errors.ErrRepoNotFound:
WriteJSON(w, http.StatusNotFound, NewError(NAME_UNKNOWN, map[string]string{"name": name}))
case errors.ErrUploadNotFound:
WriteJSON(w, http.StatusNotFound, NewError(BLOB_UPLOAD_UNKNOWN, map[string]string{"uuid": uuid}))
WriteJSON(w, http.StatusNotFound, NewError(BLOB_UPLOAD_UNKNOWN, map[string]string{"session_id": sessionID}))
default:
rh.c.Log.Error().Err(err).Msg("unexpected error")
w.WriteHeader(http.StatusInternalServerError)
@ -585,27 +664,27 @@ func (rh *RouteHandler) GetBlobUpload(w http.ResponseWriter, r *http.Request) {
return
}
w.Header().Set("Location", path.Join(r.URL.String(), uuid))
w.Header().Set("Range", fmt.Sprintf("bytes=0-%d", size))
w.Header().Set("Location", path.Join(r.URL.String(), sessionID))
w.Header().Set("Range", fmt.Sprintf("bytes=0-%d", size-1))
w.WriteHeader(http.StatusNoContent)
}
// PatchBlobUpload godoc
// @Summary Resume image blob/layer upload
// @Description Resume an image's blob/layer upload given an uuid
// @Description Resume an image's blob/layer upload given an session_id
// @Accept json
// @Produce json
// @Param name path string true "repository name"
// @Param uuid path string true "upload uuid"
// @Param session_id path string true "upload session_id"
// @Success 202 {string} string "accepted"
// @Header 202 {string} Location "/v2/{name}/blobs/uploads/{uuid}"
// @Header 202 {string} Location "/v2/{name}/blobs/uploads/{session_id}"
// @Header 202 {string} Range "bytes=0-128"
// @Header 200 {object} api.BlobUploadUUID
// @Failure 400 {string} string "bad request"
// @Failure 404 {string} string "not found"
// @Failure 416 {string} string "range not satisfiable"
// @Failure 500 {string} string "internal server error"
// @Router /v2/{name}/blobs/uploads/{uuid} [patch]
// @Router /v2/{name}/blobs/uploads/{session_id} [patch]
func (rh *RouteHandler) PatchBlobUpload(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name, ok := vars["name"]
@ -615,53 +694,63 @@ func (rh *RouteHandler) PatchBlobUpload(w http.ResponseWriter, r *http.Request)
return
}
uuid, ok := vars["uuid"]
if !ok || uuid == "" {
sessionID, ok := vars["session_id"]
if !ok || sessionID == "" {
w.WriteHeader(http.StatusNotFound)
return
}
var err error
var contentLength int64
if contentLength, err = strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64); err != nil {
rh.c.Log.Warn().Str("actual", r.Header.Get("Content-Length")).Msg("invalid content length")
w.WriteHeader(http.StatusBadRequest)
return
}
contentRange := r.Header.Get("Content-Range")
if contentRange == "" {
rh.c.Log.Warn().Str("actual", r.Header.Get("Content-Range")).Msg("invalid content range")
w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
return
}
var from, to int64
if from, to, err = getContentRange(r); err != nil || (to-from) != contentLength {
w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
return
}
if contentType := r.Header.Get("Content-Type"); contentType != "application/octet-stream" {
rh.c.Log.Warn().Str("actual", contentType).Str("expected", "application/octet-stream").Msg("invalid media type")
if contentType := r.Header.Get("Content-Type"); contentType != BinaryMediaType {
rh.c.Log.Warn().Str("actual", contentType).Str("expected", BinaryMediaType).Msg("invalid media type")
w.WriteHeader(http.StatusUnsupportedMediaType)
return
}
clen, err := rh.c.ImageStore.PutBlobChunk(name, uuid, from, to, r.Body)
var err error
var clen int64
if r.Header.Get("Content-Length") == "" || r.Header.Get("Content-Range") == "" {
// streamed blob upload
clen, err = rh.c.ImageStore.PutBlobChunkStreamed(name, sessionID, r.Body)
} else {
// chunked blob upload
var contentLength int64
if contentLength, err = strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64); err != nil {
rh.c.Log.Warn().Str("actual", r.Header.Get("Content-Length")).Msg("invalid content length")
w.WriteHeader(http.StatusBadRequest)
return
}
contentRange := r.Header.Get("Content-Range")
if contentRange == "" {
rh.c.Log.Warn().Str("actual", r.Header.Get("Content-Range")).Msg("invalid content range")
w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
return
}
var from, to int64
if from, to, err = getContentRange(r); err != nil || (to-from)+1 != contentLength {
w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
return
}
clen, err = rh.c.ImageStore.PutBlobChunk(name, sessionID, from, to, r.Body)
}
if err != nil {
switch err {
case errors.ErrBadUploadRange:
WriteJSON(w, http.StatusBadRequest, NewError(BLOB_UPLOAD_INVALID, map[string]string{"uuid": uuid}))
WriteJSON(w, http.StatusBadRequest, NewError(BLOB_UPLOAD_INVALID, map[string]string{"session_id": sessionID}))
case errors.ErrRepoNotFound:
WriteJSON(w, http.StatusNotFound, NewError(NAME_UNKNOWN, map[string]string{"name": name}))
case errors.ErrUploadNotFound:
WriteJSON(w, http.StatusNotFound, NewError(BLOB_UPLOAD_UNKNOWN, map[string]string{"uuid": uuid}))
WriteJSON(w, http.StatusNotFound, NewError(BLOB_UPLOAD_UNKNOWN, map[string]string{"session_id": sessionID}))
default:
rh.c.Log.Error().Err(err).Msg("unexpected error")
w.WriteHeader(http.StatusInternalServerError)
@ -670,10 +759,10 @@ func (rh *RouteHandler) PatchBlobUpload(w http.ResponseWriter, r *http.Request)
return
}
w.Header().Set("Location", path.Join(r.URL.String(), uuid))
w.Header().Set("Range", fmt.Sprintf("bytes=0-%d", clen))
w.Header().Set("Location", r.URL.String())
w.Header().Set("Range", fmt.Sprintf("bytes=0-%d", clen-1))
w.Header().Set("Content-Length", "0")
w.Header().Set(BlobUploadUUID, uuid)
w.Header().Set(BlobUploadUUID, sessionID)
w.WriteHeader(http.StatusAccepted)
}
@ -683,15 +772,16 @@ func (rh *RouteHandler) PatchBlobUpload(w http.ResponseWriter, r *http.Request)
// @Accept json
// @Produce json
// @Param name path string true "repository name"
// @Param uuid path string true "upload uuid"
// @Param session_id path string true "upload session_id"
// @Param digest query string true "blob/layer digest"
// @Success 201 {string} string "created"
// @Header 202 {string} Location "/v2/{name}/blobs/{digest}"
// @Header 202 {string} Location "/v2/{name}/blobs/uploads/{digest}"
// @Header 200 {object} api.DistContentDigestKey
// @Failure 404 {string} string "not found"
// @Failure 500 {string} string "internal server error"
// @Router /v2/{name}/blobs/uploads/{uuid} [put]
// @Router /v2/{name}/blobs/uploads/{session_id} [put]
func (rh *RouteHandler) UpdateBlobUpload(w http.ResponseWriter, r *http.Request) {
rh.c.Log.Info().Interface("headers", r.Header).Msg("HEADERS")
vars := mux.Vars(r)
name, ok := vars["name"]
@ -700,8 +790,8 @@ func (rh *RouteHandler) UpdateBlobUpload(w http.ResponseWriter, r *http.Request)
return
}
uuid, ok := vars["uuid"]
if !ok || uuid == "" {
sessionID, ok := vars["session_id"]
if !ok || sessionID == "" {
w.WriteHeader(http.StatusNotFound)
return
}
@ -714,10 +804,12 @@ func (rh *RouteHandler) UpdateBlobUpload(w http.ResponseWriter, r *http.Request)
digest := digests[0]
rh.c.Log.Info().Int64("r.ContentLength", r.ContentLength).Msg("DEBUG")
contentPresent := true
contentLen, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64)
if err != nil || contentLen == 0 {
if err != nil {
contentPresent = false
}
@ -737,18 +829,12 @@ func (rh *RouteHandler) UpdateBlobUpload(w http.ResponseWriter, r *http.Request)
var from, to int64
if contentPresent {
if r.Header.Get("Content-Type") != "application/octet-stream" {
w.WriteHeader(http.StatusUnsupportedMediaType)
return
}
contentRange := r.Header.Get("Content-Range")
if contentRange == "" { // monolithic upload
from = 0
if contentLen == 0 {
w.WriteHeader(http.StatusBadRequest)
return
goto finish // FIXME:
}
to = contentLen
@ -757,15 +843,20 @@ func (rh *RouteHandler) UpdateBlobUpload(w http.ResponseWriter, r *http.Request)
return
}
_, err = rh.c.ImageStore.PutBlobChunk(name, uuid, from, to, r.Body)
if r.Header.Get("Content-Type") != BinaryMediaType {
w.WriteHeader(http.StatusUnsupportedMediaType)
return
}
_, err = rh.c.ImageStore.PutBlobChunk(name, sessionID, from, to, r.Body)
if err != nil {
switch err {
case errors.ErrBadUploadRange:
WriteJSON(w, http.StatusBadRequest, NewError(BLOB_UPLOAD_INVALID, map[string]string{"uuid": uuid}))
WriteJSON(w, http.StatusBadRequest, NewError(BLOB_UPLOAD_INVALID, map[string]string{"session_id": sessionID}))
case errors.ErrRepoNotFound:
WriteJSON(w, http.StatusNotFound, NewError(NAME_UNKNOWN, map[string]string{"name": name}))
case errors.ErrUploadNotFound:
WriteJSON(w, http.StatusNotFound, NewError(BLOB_UPLOAD_UNKNOWN, map[string]string{"uuid": uuid}))
WriteJSON(w, http.StatusNotFound, NewError(BLOB_UPLOAD_UNKNOWN, map[string]string{"session_id": sessionID}))
default:
rh.c.Log.Error().Err(err).Msg("unexpected error")
w.WriteHeader(http.StatusInternalServerError)
@ -775,17 +866,22 @@ func (rh *RouteHandler) UpdateBlobUpload(w http.ResponseWriter, r *http.Request)
}
}
finish:
if r.Header.Get("Content-Type") != BinaryMediaType {
w.WriteHeader(http.StatusUnsupportedMediaType)
return
}
// blob chunks already transferred, just finish
if err := rh.c.ImageStore.FinishBlobUpload(name, uuid, r.Body, digest); err != nil {
if err := rh.c.ImageStore.FinishBlobUpload(name, sessionID, r.Body, digest); err != nil {
switch err {
case errors.ErrBadBlobDigest:
WriteJSON(w, http.StatusBadRequest, NewError(DIGEST_INVALID, map[string]string{"digest": digest}))
case errors.ErrBadUploadRange:
WriteJSON(w, http.StatusBadRequest, NewError(BLOB_UPLOAD_INVALID, map[string]string{"uuid": uuid}))
WriteJSON(w, http.StatusBadRequest, NewError(BLOB_UPLOAD_INVALID, map[string]string{"session_id": sessionID}))
case errors.ErrRepoNotFound:
WriteJSON(w, http.StatusNotFound, NewError(NAME_UNKNOWN, map[string]string{"name": name}))
case errors.ErrUploadNotFound:
WriteJSON(w, http.StatusNotFound, NewError(BLOB_UPLOAD_UNKNOWN, map[string]string{"uuid": uuid}))
WriteJSON(w, http.StatusNotFound, NewError(BLOB_UPLOAD_UNKNOWN, map[string]string{"session_id": sessionID}))
default:
rh.c.Log.Error().Err(err).Msg("unexpected error")
w.WriteHeader(http.StatusInternalServerError)
@ -806,11 +902,11 @@ func (rh *RouteHandler) UpdateBlobUpload(w http.ResponseWriter, r *http.Request)
// @Accept json
// @Produce json
// @Param name path string true "repository name"
// @Param uuid path string true "upload uuid"
// @Param session_id path string true "upload session_id"
// @Success 200 {string} string "ok"
// @Failure 404 {string} string "not found"
// @Failure 500 {string} string "internal server error"
// @Router /v2/{name}/blobs/uploads/{uuid} [delete]
// @Router /v2/{name}/blobs/uploads/{session_id} [delete]
func (rh *RouteHandler) DeleteBlobUpload(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name, ok := vars["name"]
@ -820,18 +916,18 @@ func (rh *RouteHandler) DeleteBlobUpload(w http.ResponseWriter, r *http.Request)
return
}
uuid, ok := vars["uuid"]
if !ok || uuid == "" {
sessionID, ok := vars["session_id"]
if !ok || sessionID == "" {
w.WriteHeader(http.StatusNotFound)
return
}
if err := rh.c.ImageStore.DeleteBlobUpload(name, uuid); err != nil {
if err := rh.c.ImageStore.DeleteBlobUpload(name, sessionID); err != nil {
switch err {
case errors.ErrRepoNotFound:
WriteJSON(w, http.StatusNotFound, NewError(NAME_UNKNOWN, map[string]string{"name": name}))
case errors.ErrUploadNotFound:
WriteJSON(w, http.StatusNotFound, NewError(BLOB_UPLOAD_UNKNOWN, map[string]string{"uuid": uuid}))
WriteJSON(w, http.StatusNotFound, NewError(BLOB_UPLOAD_UNKNOWN, map[string]string{"session_id": sessionID}))
default:
rh.c.Log.Error().Err(err).Msg("unexpected error")
w.WriteHeader(http.StatusInternalServerError)

View file

@ -144,7 +144,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
// without the Content-Length should fail
resp, err = resty.R().SetQueryParam("digest", digest.String()).Put(loc)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 400)
So(resp.StatusCode(), ShouldEqual, 415)
// without any data to send, should fail
resp, err = resty.R().SetQueryParam("digest", digest.String()).
SetHeader("Content-Type", "application/octet-stream").Put(loc)
@ -199,7 +199,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
// without the Content-Length should fail
resp, err = resty.R().SetQueryParam("digest", digest.String()).Put(loc)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 400)
So(resp.StatusCode(), ShouldEqual, 415)
// without any data to send, should fail
resp, err = resty.R().SetQueryParam("digest", digest.String()).
SetHeader("Content-Type", "application/octet-stream").Put(loc)
@ -239,7 +239,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(err, ShouldBeNil)
// write first chunk
contentRange := fmt.Sprintf("%d-%d", 0, len(chunk1))
contentRange := fmt.Sprintf("%d-%d", 0, len(chunk1)-1)
resp, err = resty.R().SetHeader("Content-Type", "application/octet-stream").
SetHeader("Content-Range", contentRange).SetBody(chunk1).Patch(loc)
So(err, ShouldBeNil)
@ -254,7 +254,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(r, ShouldEqual, "bytes="+contentRange)
// write same chunk should fail
contentRange = fmt.Sprintf("%d-%d", 0, len(chunk1))
contentRange = fmt.Sprintf("%d-%d", 0, len(chunk1)-1)
resp, err = resty.R().SetHeader("Content-Type", "application/octet-stream").
SetHeader("Content-Range", contentRange).SetBody(chunk1).Patch(loc)
So(err, ShouldBeNil)
@ -270,7 +270,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(digest, ShouldNotBeNil)
// write final chunk
contentRange = fmt.Sprintf("%d-%d", len(chunk1), len(buf.Bytes()))
contentRange = fmt.Sprintf("%d-%d", len(chunk1), len(buf.Bytes())-1)
resp, err = resty.R().SetQueryParam("digest", digest.String()).
SetHeader("Content-Range", contentRange).
SetHeader("Content-Type", "application/octet-stream").SetBody(chunk2).Put(loc)
@ -307,7 +307,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(err, ShouldBeNil)
// write first chunk
contentRange := fmt.Sprintf("%d-%d", 0, len(chunk1))
contentRange := fmt.Sprintf("%d-%d", 0, len(chunk1)-1)
resp, err = resty.R().SetHeader("Content-Type", "application/octet-stream").
SetHeader("Content-Range", contentRange).SetBody(chunk1).Patch(loc)
So(err, ShouldBeNil)
@ -322,7 +322,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(r, ShouldEqual, "bytes="+contentRange)
// write same chunk should fail
contentRange = fmt.Sprintf("%d-%d", 0, len(chunk1))
contentRange = fmt.Sprintf("%d-%d", 0, len(chunk1)-1)
resp, err = resty.R().SetHeader("Content-Type", "application/octet-stream").
SetHeader("Content-Range", contentRange).SetBody(chunk1).Patch(loc)
So(err, ShouldBeNil)
@ -338,7 +338,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(digest, ShouldNotBeNil)
// write final chunk
contentRange = fmt.Sprintf("%d-%d", len(chunk1), len(buf.Bytes()))
contentRange = fmt.Sprintf("%d-%d", len(chunk1), len(buf.Bytes())-1)
resp, err = resty.R().SetQueryParam("digest", digest.String()).
SetHeader("Content-Range", contentRange).
SetHeader("Content-Type", "application/octet-stream").SetBody(chunk2).Put(loc)
@ -470,7 +470,7 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
// delete manifest
resp, err = resty.R().Delete(baseURL + "/v2/repo7/manifests/test:1.0")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
So(resp.StatusCode(), ShouldEqual, 202)
// delete again should fail
resp, err = resty.R().Delete(baseURL + "/v2/repo7/manifests/" + digest.String())
So(err, ShouldBeNil)
@ -494,6 +494,67 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
So(resp.Body(), ShouldNotBeEmpty)
})
// pagination
Convey("Pagination", func() {
Print("\nPagination")
for i := 0; i <= 4; i++ {
// create a blob/layer
resp, err := resty.R().Post(baseURL + "/v2/page0/blobs/uploads/")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 202)
loc := Location(baseURL, resp)
So(loc, ShouldNotBeEmpty)
resp, err = resty.R().Get(loc)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 204)
content := []byte("this is a blob")
digest := godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
// monolithic blob upload: success
resp, err = resty.R().SetQueryParam("digest", digest.String()).
SetHeader("Content-Type", "application/octet-stream").SetBody(content).Put(loc)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 201)
blobLoc := resp.Header().Get("Location")
So(blobLoc, ShouldNotBeEmpty)
So(resp.Header().Get("Content-Length"), ShouldEqual, "0")
So(resp.Header().Get(api.DistContentDigestKey), ShouldNotBeEmpty)
// create a manifest
m := ispec.Manifest{Layers: []ispec.Descriptor{{Digest: digest}}}
content, err = json.Marshal(m)
So(err, ShouldBeNil)
digest = godigest.FromBytes(content)
So(digest, ShouldNotBeNil)
resp, err = resty.R().SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json").
SetBody(content).Put(baseURL + fmt.Sprintf("/v2/page0/manifests/test:%d.0", i))
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 201)
d := resp.Header().Get(api.DistContentDigestKey)
So(d, ShouldNotBeEmpty)
So(d, ShouldEqual, digest.String())
}
resp, err := resty.R().Get(baseURL + "/v2/page0/tags/list")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
resp, err = resty.R().Get(baseURL + "/v2/page0/tags/list?n=3")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
next := resp.Header().Get("Link")
So(next, ShouldNotBeEmpty)
u := baseURL + strings.Split(next, ";")[0]
resp, err = resty.R().Get(u)
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 200)
next = resp.Header().Get("Link")
So(next, ShouldBeEmpty)
})
// this is an additional test for repository names (alphanumeric)
Convey("Repository names", func() {
Print("\nRepository names")
@ -504,9 +565,6 @@ func CheckWorkflows(t *testing.T, config *compliance.Config) {
resp, err = resty.R().Post(baseURL + "/v2/repotest123/blobs/uploads/")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 202)
resp, err = resty.R().Post(baseURL + "/v2/repoTest123/blobs/uploads/")
So(err, ShouldBeNil)
So(resp.StatusCode(), ShouldEqual, 202)
})
})
}

View file

@ -243,6 +243,11 @@ func (is *ImageStore) GetImageManifest(repo string, reference string) ([]byte, s
if err != nil {
is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
if os.IsNotExist(err) {
return nil, "", "", errors.ErrRepoNotFound
}
return nil, "", "", err
}
@ -286,8 +291,14 @@ func (is *ImageStore) GetImageManifest(repo string, reference string) ([]byte, s
p = path.Join(p, digest.Encoded())
buf, err = ioutil.ReadFile(p)
if err != nil {
is.log.Error().Err(err).Str("blob", p).Msg("failed to read manifest")
if os.IsNotExist(err) {
return nil, "", "", errors.ErrManifestNotFound
}
return nil, "", "", err
}
@ -548,6 +559,39 @@ func (is *ImageStore) GetBlobUpload(repo string, uuid string) (int64, error) {
return fi.Size(), nil
}
// PutBlobChunkStreamed appends another chunk of data to the specified blob. It returns
// the number of actual bytes to the blob.
func (is *ImageStore) PutBlobChunkStreamed(repo string, uuid string, body io.Reader) (int64, error) {
if err := is.InitRepo(repo); err != nil {
return -1, err
}
blobUploadPath := is.BlobUploadPath(repo, uuid)
_, err := os.Stat(blobUploadPath)
if err != nil {
return -1, errors.ErrUploadNotFound
}
file, err := os.OpenFile(
blobUploadPath,
os.O_WRONLY|os.O_CREATE,
0600,
)
if err != nil {
is.log.Fatal().Err(err).Msg("failed to open file")
}
defer file.Close()
if _, err := file.Seek(0, io.SeekEnd); err != nil {
is.log.Fatal().Err(err).Msg("failed to seek file")
}
n, err := io.Copy(file, body)
return n, err
}
// PutBlobChunk writes another chunk of data to the specified blob. It returns
// the number of actual bytes to the blob.
func (is *ImageStore) PutBlobChunk(repo string, uuid string, from int64, to int64,
@ -579,7 +623,7 @@ func (is *ImageStore) PutBlobChunk(repo string, uuid string, from int64, to int6
}
defer file.Close()
if _, err := file.Seek(from, 0); err != nil {
if _, err := file.Seek(from, io.SeekStart); err != nil {
is.log.Fatal().Err(err).Msg("failed to seek file")
}