diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 9187831a..8f5431c3 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -24,7 +24,7 @@ jobs: uses: golangci/golangci-lint-action@v2 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version - version: v1.43.0 + version: v1.45.2 # Optional: working directory, useful for monorepos # working-directory: somedir diff --git a/Makefile b/Makefile index 4d3f14fc..296229ed 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,7 @@ TOOLSDIR := $(shell pwd)/hack/tools PATH := bin:$(TOOLSDIR)/bin:$(PATH) STACKER := $(shell which stacker) GOLINTER := $(TOOLSDIR)/bin/golangci-lint +GOLINTER_VERSION := v1.45.2 NOTATION := $(TOOLSDIR)/bin/notation BATS := $(TOOLSDIR)/bin/bats TESTDATA := $(TOP_LEVEL)/test/data @@ -89,7 +90,7 @@ covhtml: $(GOLINTER): mkdir -p $(TOOLSDIR)/bin - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(TOOLSDIR)/bin v1.43.0 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(TOOLSDIR)/bin $(GOLINTER_VERSION) $(GOLINTER) version .PHONY: check diff --git a/cmd/zb/helper.go b/cmd/zb/helper.go index b66ea5a6..3571686b 100644 --- a/cmd/zb/helper.go +++ b/cmd/zb/helper.go @@ -38,7 +38,8 @@ func deleteTestRepo(repos []string, url string, client *resty.Client) error { } func pullAndCollect(url string, repos []string, manifestItem manifestStruct, - config testConfig, client *resty.Client, statsCh chan statsRecord) []string { + config testConfig, client *resty.Client, statsCh chan statsRecord, +) []string { manifestHash := manifestItem.manifestHash manifestBySizeHash := manifestItem.manifestBySizeHash @@ -252,7 +253,8 @@ func pullAndCollect(url string, repos []string, manifestItem manifestStruct, } func pushMonolithImage(workdir, url, trepo string, repos []string, size int, - client *resty.Client) (map[string]string, []string, error) { + client *resty.Client, +) (map[string]string, []string, error) { var statusCode int // key: repository name. value: manifest name @@ -398,7 +400,8 @@ func pushMonolithImage(workdir, url, trepo string, repos []string, size int, func pushMonolithAndCollect(workdir, url, trepo string, count int, repos []string, config testConfig, client *resty.Client, - statsCh chan statsRecord) []string { + statsCh chan statsRecord, +) []string { func() { start := time.Now() @@ -618,7 +621,8 @@ func pushMonolithAndCollect(workdir, url, trepo string, count int, func pushChunkAndCollect(workdir, url, trepo string, count int, repos []string, config testConfig, client *resty.Client, - statsCh chan statsRecord) []string { + statsCh chan statsRecord, +) []string { func() { start := time.Now() diff --git a/errors/errors.go b/errors/errors.go index 9730bfa3..220be1de 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -4,6 +4,7 @@ import "errors" var ( ErrBadConfig = errors.New("config: invalid config") + ErrCliBadConfig = errors.New("cli: bad config") ErrRepoNotFound = errors.New("repository: not found") ErrRepoIsNotDir = errors.New("repository: not a directory") ErrRepoBadVersion = errors.New("repository: unsupported layout version") diff --git a/golangcilint.yaml b/golangcilint.yaml index 45fd227e..d4d68df3 100644 --- a/golangcilint.yaml +++ b/golangcilint.yaml @@ -5,7 +5,7 @@ run: linters: enable-all: true - disable: funlen,gocognit,exhaustivestruct,paralleltest,forbidigo,ireturn,wrapcheck,exhaustive + disable: funlen,gocognit,exhaustivestruct,paralleltest,forbidigo,ireturn,wrapcheck,exhaustive,maintidx linters-settings: dupl: @@ -24,6 +24,19 @@ linters-settings: - err - ok - gc + - wg + ignore-decls: + - n int + - i int + - r *os.File + - w *os.File + - to int64 + - l *ldap.Conn + wsl: + allow-assign-and-anything: true + enforce-err-cuddling: true + nolintlint: + allow-unused: true gomnd: settings: mnd: diff --git a/pkg/api/controller_test.go b/pkg/api/controller_test.go index 1968744e..bdbad423 100644 --- a/pkg/api/controller_test.go +++ b/pkg/api/controller_test.go @@ -10,6 +10,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + goerrors "errors" "fmt" "io" "io/ioutil" @@ -1324,7 +1325,8 @@ func (l *testLDAPServer) Bind(bindDN, bindSimplePw string, conn net.Conn) (vldap } func (l *testLDAPServer) Search(boundDN string, req vldap.SearchRequest, - conn net.Conn) (vldap.ServerSearchResult, error) { + conn net.Conn, +) (vldap.ServerSearchResult, error) { check := fmt.Sprintf("(uid=%s)", username) if check == req.Filter { return vldap.ServerSearchResult{ @@ -1893,11 +1895,9 @@ func TestAuthorizationWithBasicAuth(t *testing.T) { // first let's use global based policies // add test user to global policy with create perm - conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Users = - append(conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Users, "test") + conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Users = append(conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Users, "test") //nolint:lll // gofumpt conflicts with lll - conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions = - append(conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions, "create") + conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions = append(conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions, "create") //nolint:lll // gofumpt conflicts with lll // now it should get 202 resp, err = resty.R().SetBasicAuth(username, passphrase). @@ -1933,8 +1933,7 @@ func TestAuthorizationWithBasicAuth(t *testing.T) { So(resp.StatusCode(), ShouldEqual, http.StatusForbidden) // get tags with read access should get 200 - conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions = - append(conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions, "read") + conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions = append(conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions, "read") //nolint:lll // gofumpt conflicts with lll resp, err = resty.R().SetBasicAuth(username, passphrase). Get(baseURL + "/v2/" + AuthorizationNamespace + "/tags/list") @@ -1964,8 +1963,7 @@ func TestAuthorizationWithBasicAuth(t *testing.T) { So(resp.StatusCode(), ShouldEqual, http.StatusForbidden) // add delete perm on repo - conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions = - append(conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions, "delete") + conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions = append(conf.AccessControl.Repositories[AuthorizationAllRepos].Policies[0].Actions, "delete") //nolint:lll // gofumpt conflicts with lll // delete blob should get 202 resp, err = resty.R().SetBasicAuth(username, passphrase). @@ -1987,10 +1985,8 @@ func TestAuthorizationWithBasicAuth(t *testing.T) { DefaultPolicy: []string{}, } - conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Users = - append(conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Users, "test") - conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions = - append(conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions, "create") + conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Users = append(conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Users, "test") //nolint:lll // gofumpt conflicts with lll + conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions = append(conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions, "create") //nolint:lll // gofumpt conflicts with lll // now it should get 202 resp, err = resty.R().SetBasicAuth(username, passphrase). @@ -2026,8 +2022,7 @@ func TestAuthorizationWithBasicAuth(t *testing.T) { So(resp.StatusCode(), ShouldEqual, http.StatusForbidden) // get tags with read access should get 200 - conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions = - append(conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions, "read") + conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions = append(conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions, "read") //nolint:lll // gofumpt conflicts with lll resp, err = resty.R().SetBasicAuth(username, passphrase). Get(baseURL + "/v2/" + AuthorizationNamespace + "/tags/list") @@ -2063,8 +2058,7 @@ func TestAuthorizationWithBasicAuth(t *testing.T) { So(resp.StatusCode(), ShouldEqual, http.StatusForbidden) // add delete perm on repo - conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions = - append(conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions, "delete") + conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions = append(conf.AccessControl.Repositories[AuthorizationNamespace].Policies[0].Actions, "delete") //nolint:lll // gofumpt conflicts with lll // delete blob should get 202 resp, err = resty.R().SetBasicAuth(username, passphrase). @@ -2111,8 +2105,7 @@ func TestAuthorizationWithBasicAuth(t *testing.T) { So(resp.StatusCode(), ShouldEqual, http.StatusForbidden) // add create perm on repo - conf.AccessControl.Repositories["zot-test"].Policies[0].Actions = - append(conf.AccessControl.Repositories["zot-test"].Policies[0].Actions, "create") + conf.AccessControl.Repositories["zot-test"].Policies[0].Actions = append(conf.AccessControl.Repositories["zot-test"].Policies[0].Actions, "create") //nolint:lll // gofumpt conflicts with lll // should get 201 with create perm resp, err = resty.R().SetBasicAuth(username, passphrase). @@ -2131,8 +2124,7 @@ func TestAuthorizationWithBasicAuth(t *testing.T) { So(resp.StatusCode(), ShouldEqual, http.StatusForbidden) // add update perm on repo - conf.AccessControl.Repositories["zot-test"].Policies[0].Actions = - append(conf.AccessControl.Repositories["zot-test"].Policies[0].Actions, "update") + conf.AccessControl.Repositories["zot-test"].Policies[0].Actions = append(conf.AccessControl.Repositories["zot-test"].Policies[0].Actions, "update") //nolint:lll // gofumpt conflicts with lll // update manifest should get 201 with update perm resp, err = resty.R().SetBasicAuth(username, passphrase). @@ -2854,7 +2846,7 @@ func TestParallelRequests(t *testing.T) { for { nbytes, err := reader.Read(buf) if err != nil { - if err == io.EOF { + if goerrors.Is(err, io.EOF) { break } panic(err) @@ -2879,7 +2871,7 @@ func TestParallelRequests(t *testing.T) { for { nbytes, err := reader.Read(buf) if err != nil { - if err == io.EOF { + if goerrors.Is(err, io.EOF) { break } panic(err) @@ -4343,7 +4335,7 @@ func TestInjectTooManyOpenFiles(t *testing.T) { So(digest, ShouldNotBeNil) // Testing router path: @Router /v2/{name}/manifests/{reference} [put] - // nolint: lll + //nolint:lll // gofumpt conflicts with lll Convey("Uploading an image manifest blob (when injected simulates that PutImageManifest failed due to 'too many open files' error)", func() { injected := test.InjectFailure(1) @@ -4526,7 +4518,7 @@ func TestPeriodicGC(t *testing.T) { subPaths := make(map[string]config.StorageConfig) - subPaths["/a"] = config.StorageConfig{RootDirectory: subDir, GC: true, GCDelay: 1 * time.Second, GCInterval: 24 * time.Hour} // nolint:lll + subPaths["/a"] = config.StorageConfig{RootDirectory: subDir, GC: true, GCDelay: 1 * time.Second, GCInterval: 24 * time.Hour} //nolint:lll // gofumpt conflicts with lll ctlr.Config.Storage.SubPaths = subPaths ctlr.Config.Storage.RootDirectory = dir @@ -4542,7 +4534,7 @@ func TestPeriodicGC(t *testing.T) { "\"GCDelay\":3600000000000,\"GCInterval\":0,\"RootDirectory\":\""+dir+"\"") // periodic GC is enabled for sub store So(string(data), ShouldContainSubstring, - fmt.Sprintf("\"SubPaths\":{\"/a\":{\"RootDirectory\":\"%s\",\"GC\":true,\"Dedupe\":false,\"Commit\":false,\"GCDelay\":1000000000,\"GCInterval\":86400000000000", subDir)) // nolint:lll + fmt.Sprintf("\"SubPaths\":{\"/a\":{\"RootDirectory\":\"%s\",\"GC\":true,\"Dedupe\":false,\"Commit\":false,\"GCDelay\":1000000000,\"GCInterval\":86400000000000", subDir)) //nolint:lll // gofumpt conflicts with lll So(string(data), ShouldContainSubstring, fmt.Sprintf("executing GC of orphaned blobs for %s", ctlr.StoreController.SubStore["/a"].RootDir())) }) diff --git a/pkg/api/routes.go b/pkg/api/routes.go index 6d093e08..a94301f8 100644 --- a/pkg/api/routes.go +++ b/pkg/api/routes.go @@ -33,8 +33,7 @@ import ( ext "zotregistry.io/zot/pkg/extensions" "zotregistry.io/zot/pkg/log" "zotregistry.io/zot/pkg/storage" - "zotregistry.io/zot/pkg/test" - + "zotregistry.io/zot/pkg/test" // nolint: goimports // as required by swaggo. _ "zotregistry.io/zot/swagger" ) @@ -184,17 +183,17 @@ func (rh *RouteHandler) ListTags(response http.ResponseWriter, request *http.Req return } - var n1 int64 + var nQuery1 int64 var err error - if n1, err = strconv.ParseInt(nQuery[0], 10, 0); err != nil { + if nQuery1, err = strconv.ParseInt(nQuery[0], 10, 0); err != nil { response.WriteHeader(http.StatusBadRequest) return } - numTags = int(n1) + numTags = int(nQuery1) paginate = true } @@ -1298,7 +1297,8 @@ func WriteData(w http.ResponseWriter, status int, mediaType string, data []byte) } func WriteDataFromReader(response http.ResponseWriter, status int, length int64, mediaType string, - reader io.Reader, logger log.Logger) { + reader io.Reader, logger log.Logger, +) { response.Header().Set("Content-Type", mediaType) response.Header().Set("Content-Length", strconv.FormatInt(length, 10)) response.WriteHeader(status) @@ -1325,7 +1325,8 @@ func (rh *RouteHandler) getImageStore(name string) storage.ImageStore { // will sync on demand if an image is not found, in case sync extensions is enabled. func getImageManifest(routeHandler *RouteHandler, imgStore storage.ImageStore, name, - reference string) ([]byte, string, string, error) { + reference string, +) ([]byte, string, string, error) { content, digest, mediaType, err := imgStore.GetImageManifest(name, reference) if err != nil { if errors.Is(err, zerr.ErrRepoNotFound) || errors.Is(err, zerr.ErrManifestNotFound) { @@ -1354,7 +1355,8 @@ func getImageManifest(routeHandler *RouteHandler, imgStore storage.ImageStore, n // will sync referrers on demand if they are not found, in case sync extensions is enabled. func getReferrers(routeHandler *RouteHandler, imgStore storage.ImageStore, name, digest, - artifactType string) ([]artifactspec.Descriptor, error) { + artifactType string, +) ([]artifactspec.Descriptor, error) { refs, err := imgStore.GetReferrers(name, digest, artifactType) if err != nil { if routeHandler.c.Config.Extensions != nil && diff --git a/pkg/cli/client.go b/pkg/cli/client.go index 0da5295d..3af4f19a 100644 --- a/pkg/cli/client.go +++ b/pkg/cli/client.go @@ -38,7 +38,7 @@ const ( ) func createHTTPClient(verifyTLS bool, host string) *http.Client { - htr := http.DefaultTransport.(*http.Transport).Clone() + htr := http.DefaultTransport.(*http.Transport).Clone() //nolint: forcetypeassert if !verifyTLS { htr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint: gosec @@ -65,7 +65,8 @@ func createHTTPClient(verifyTLS bool, host string) *http.Client { } func makeGETRequest(ctx context.Context, url, username, password string, - verifyTLS bool, resultsPtr interface{}) (http.Header, error) { + verifyTLS bool, resultsPtr interface{}, +) (http.Header, error) { req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return nil, err @@ -77,7 +78,8 @@ func makeGETRequest(ctx context.Context, url, username, password string, } func makeGraphQLRequest(ctx context.Context, url, query, username, - password string, verifyTLS bool, resultsPtr interface{}) error { + password string, verifyTLS bool, resultsPtr interface{}, +) error { req, err := http.NewRequestWithContext(ctx, "GET", url, bytes.NewBufferString(query)) if err != nil { return err @@ -201,7 +203,6 @@ type requestsPool struct { done chan struct{} wtgrp *sync.WaitGroup outputCh chan stringResult - context context.Context } type manifestJob struct { @@ -216,7 +217,7 @@ type manifestJob struct { const rateLimiterBuffer = 5000 -func newSmoothRateLimiter(ctx context.Context, wtgrp *sync.WaitGroup, opch chan stringResult) *requestsPool { +func newSmoothRateLimiter(wtgrp *sync.WaitGroup, opch chan stringResult) *requestsPool { ch := make(chan *manifestJob, rateLimiterBuffer) return &requestsPool{ @@ -224,7 +225,6 @@ func newSmoothRateLimiter(ctx context.Context, wtgrp *sync.WaitGroup, opch chan done: make(chan struct{}), wtgrp: wtgrp, outputCh: opch, - context: ctx, } } diff --git a/pkg/cli/config_cmd.go b/pkg/cli/config_cmd.go index 133a65b2..1ec3d85c 100644 --- a/pkg/cli/config_cmd.go +++ b/pkg/cli/config_cmd.go @@ -150,7 +150,12 @@ func getConfigMapFromFile(filePath string) ([]interface{}, error) { return nil, zerr.ErrEmptyJSON } - return jsonMap["configs"].([]interface{}), nil + configs, ok := jsonMap["configs"].([]interface{}) + if !ok { + return nil, zerr.ErrCliBadConfig + } + + return configs, nil } func saveConfigMapToFile(filePath string, configMap []interface{}) error { diff --git a/pkg/cli/config_cmd_test.go b/pkg/cli/config_cmd_test.go index 74cd00c3..631246ac 100644 --- a/pkg/cli/config_cmd_test.go +++ b/pkg/cli/config_cmd_test.go @@ -79,6 +79,19 @@ func TestConfigCmdMain(t *testing.T) { So(actualStr, ShouldContainSubstring, "https://test-url.com") }) + Convey("Test add config with invalid format", t, func() { + args := []string{"--list"} + configPath := makeConfigFile(`{"configs":{"_name":"configtest","url":"https://test-url.com","showspinner":false}}`) + defer os.Remove(configPath) + cmd := NewConfigCommand() + buff := bytes.NewBufferString("") + cmd.SetOut(buff) + cmd.SetErr(buff) + cmd.SetArgs(args) + err := cmd.Execute() + So(err, ShouldEqual, zotErrors.ErrCliBadConfig) + }) + Convey("Test add config with invalid URL", t, func() { args := []string{"add", "configtest1", "test..com"} file := makeConfigFile("") diff --git a/pkg/cli/extensions_test.go b/pkg/cli/extensions_test.go index 9fbdbd1f..98ed2d0f 100644 --- a/pkg/cli/extensions_test.go +++ b/pkg/cli/extensions_test.go @@ -102,7 +102,7 @@ func TestServeExtensions(t *testing.T) { WaitTillServerReady(baseURL) data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) - So(string(data), ShouldContainSubstring, "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":null,\"Scrub\":null") // nolint:lll + So(string(data), ShouldContainSubstring, "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":null,\"Scrub\":null") //nolint:lll // gofumpt conflicts with lll }) } @@ -143,7 +143,7 @@ func testWithMetricsEnabled(cfgContentFormat string) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) So(string(data), ShouldContainSubstring, - "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":{\"Enable\":true,\"Prometheus\":{\"Path\":\"/metrics\"}},\"Scrub\":null}") // nolint:lll + "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":{\"Enable\":true,\"Prometheus\":{\"Path\":\"/metrics\"}},\"Scrub\":null}") //nolint:lll // gofumpt conflicts with lll } func TestServeMetricsExtension(t *testing.T) { @@ -267,7 +267,7 @@ func TestServeMetricsExtension(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) So(string(data), ShouldContainSubstring, - "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":{\"Enable\":false,\"Prometheus\":{\"Path\":\"/metrics\"}},\"Scrub\":null}") // nolint:lll + "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":{\"Enable\":false,\"Prometheus\":{\"Path\":\"/metrics\"}},\"Scrub\":null}") //nolint:lll // gofumpt conflicts with lll }) } @@ -508,7 +508,7 @@ func TestServeScrubExtension(t *testing.T) { So(err, ShouldBeNil) // Even if in config we specified scrub interval=1h, the minimum interval is 2h So(string(data), ShouldContainSubstring, - "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":null,\"Scrub\":{\"Interval\":3600000000000}") // nolint:lll + "\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":null,\"Scrub\":{\"Interval\":3600000000000}") //nolint:lll // gofumpt conflicts with lll So(string(data), ShouldContainSubstring, "executing scrub to check manifest/blob integrity") So(string(data), ShouldContainSubstring, "Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") @@ -612,7 +612,7 @@ func TestServeSearchExtension(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) So(string(data), ShouldContainSubstring, - "\"Extensions\":{\"Search\":{\"CVE\":{\"UpdateInterval\":86400000000000},\"Enable\":true},\"Sync\":null,\"Metrics\":null,\"Scrub\":null}") // nolint:lll + "\"Extensions\":{\"Search\":{\"CVE\":{\"UpdateInterval\":86400000000000},\"Enable\":true},\"Sync\":null,\"Metrics\":null,\"Scrub\":null}") //nolint:lll // gofumpt conflicts with lll So(string(data), ShouldContainSubstring, "updating the CVE database") }) @@ -663,7 +663,7 @@ func TestServeSearchExtension(t *testing.T) { So(err, ShouldBeNil) // Even if in config we specified updateInterval=1h, the minimum interval is 2h So(string(data), ShouldContainSubstring, - "\"Extensions\":{\"Search\":{\"CVE\":{\"UpdateInterval\":3600000000000},\"Enable\":true},\"Sync\":null,\"Metrics\":null,\"Scrub\":null}") // nolint:lll + "\"Extensions\":{\"Search\":{\"CVE\":{\"UpdateInterval\":3600000000000},\"Enable\":true},\"Sync\":null,\"Metrics\":null,\"Scrub\":null}") //nolint:lll // gofumpt conflicts with lll So(string(data), ShouldContainSubstring, "updating the CVE database") So(string(data), ShouldContainSubstring, "CVE update interval set to too-short interval < 2h, changing update duration to 2 hours and continuing.") @@ -713,7 +713,7 @@ func TestServeSearchExtension(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) So(string(data), ShouldContainSubstring, - "\"Extensions\":{\"Search\":{\"CVE\":{\"UpdateInterval\":86400000000000},\"Enable\":true},\"Sync\":null,\"Metrics\":null,\"Scrub\":null}") // nolint:lll + "\"Extensions\":{\"Search\":{\"CVE\":{\"UpdateInterval\":86400000000000},\"Enable\":true},\"Sync\":null,\"Metrics\":null,\"Scrub\":null}") //nolint:lll // gofumpt conflicts with lll So(string(data), ShouldContainSubstring, "updating the CVE database") }) @@ -764,7 +764,7 @@ func TestServeSearchExtension(t *testing.T) { data, err := os.ReadFile(logFile.Name()) So(err, ShouldBeNil) So(string(data), ShouldContainSubstring, - "\"Extensions\":{\"Search\":{\"CVE\":{\"UpdateInterval\":10800000000000},\"Enable\":false},\"Sync\":null,\"Metrics\":null,\"Scrub\":null}") // nolint:lll + "\"Extensions\":{\"Search\":{\"CVE\":{\"UpdateInterval\":10800000000000},\"Enable\":false},\"Sync\":null,\"Metrics\":null,\"Scrub\":null}") //nolint:lll // gofumpt conflicts with lll So(string(data), ShouldContainSubstring, "CVE config not provided, skipping CVE update") So(string(data), ShouldNotContainSubstring, "CVE update interval set to too-short interval < 2h, changing update duration to 2 hours and continuing.") diff --git a/pkg/cli/image_cmd.go b/pkg/cli/image_cmd.go index 4488579d..22a8a1be 100644 --- a/pkg/cli/image_cmd.go +++ b/pkg/cli/image_cmd.go @@ -116,7 +116,8 @@ func parseBooleanConfig(configPath, configName, configParam string) (bool, error } func setupImageFlags(imageCmd *cobra.Command, searchImageParams map[string]*string, - servURL, user, outputFormat *string, verbose *bool) { + servURL, user, outputFormat *string, verbose *bool, +) { searchImageParams["imageName"] = imageCmd.Flags().StringP("name", "n", "", "List image details by name") searchImageParams["digest"] = imageCmd.Flags().StringP("digest", "d", "", "List images containing a specific manifest, config, or layer digest") diff --git a/pkg/cli/image_cmd_test.go b/pkg/cli/image_cmd_test.go index 2c03e6d7..cf09ca12 100644 --- a/pkg/cli/image_cmd_test.go +++ b/pkg/cli/image_cmd_test.go @@ -312,14 +312,18 @@ func TestServerResponse(t *testing.T) { _ = controller.Server.Shutdown(ctx) }(ctlr) - uploadManifest(url) + err := uploadManifest(url) + t.Logf("%s", ctlr.Config.Storage.RootDirectory) + So(err, ShouldBeNil) Convey("Test all images config url", func() { + t.Logf("%s", ctlr.Config.Storage.RootDirectory) args := []string{"imagetest"} configPath := makeConfigFile(fmt.Sprintf(`{"configs":[{"_name":"imagetest","url":"%s","showspinner":false}]}`, url)) defer os.Remove(configPath) cmd := NewImageCommand(new(searchService)) - buff := bytes.NewBufferString("") + // buff := bytes.NewBufferString("") + buff := &bytes.Buffer{} cmd.SetOut(buff) cmd.SetErr(buff) cmd.SetArgs(args) @@ -454,7 +458,7 @@ func TestServerResponse(t *testing.T) { }) } -func uploadManifest(url string) { +func uploadManifest(url string) error { // create a blob/layer resp, _ := resty.R().Post(url + "/v2/repo7/blobs/uploads/") loc := test.Location(url, resp) @@ -493,7 +497,12 @@ func uploadManifest(url string) { }, } manifest.SchemaVersion = 2 - content, _ = json.Marshal(manifest) + + content, err := json.Marshal(manifest) + if err != nil { + return err + } + _, _ = resty.R().SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). SetBody(content).Put(url + "/v2/repo7/manifests/test:1.0") @@ -515,15 +524,22 @@ func uploadManifest(url string) { }, } manifest.SchemaVersion = 2 - content, _ = json.Marshal(manifest) + + content, err = json.Marshal(manifest) + if err != nil { + return err + } _, _ = resty.R().SetHeader("Content-Type", "application/vnd.oci.image.manifest.v1+json"). SetBody(content).Put(url + "/v2/repo7/manifests/test:2.0") + + return nil } type mockService struct{} func (service mockService) getAllImages(ctx context.Context, config searchConfig, username, password string, - channel chan stringResult, wtgrp *sync.WaitGroup) { + channel chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() defer close(channel) @@ -548,7 +564,8 @@ func (service mockService) getAllImages(ctx context.Context, config searchConfig } func (service mockService) getImageByName(ctx context.Context, config searchConfig, - username, password, imageName string, channel chan stringResult, wtgrp *sync.WaitGroup) { + username, password, imageName string, channel chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() defer close(channel) @@ -573,7 +590,8 @@ func (service mockService) getImageByName(ctx context.Context, config searchConf } func (service mockService) getCveByImage(ctx context.Context, config searchConfig, username, password, - imageName string, rch chan stringResult, wtgrp *sync.WaitGroup) { + imageName string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() defer close(rch) @@ -610,22 +628,26 @@ func (service mockService) getCveByImage(ctx context.Context, config searchConfi } func (service mockService) getImagesByCveID(ctx context.Context, config searchConfig, username, password, cvid string, - rch chan stringResult, wtgrp *sync.WaitGroup) { + rch chan stringResult, wtgrp *sync.WaitGroup, +) { service.getImageByName(ctx, config, username, password, "anImage", rch, wtgrp) } func (service mockService) getImagesByDigest(ctx context.Context, config searchConfig, username, - password, digest string, rch chan stringResult, wtgrp *sync.WaitGroup) { + password, digest string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { service.getImageByName(ctx, config, username, password, "anImage", rch, wtgrp) } func (service mockService) getImageByNameAndCVEID(ctx context.Context, config searchConfig, username, - password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup) { + password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { service.getImageByName(ctx, config, username, password, imageName, rch, wtgrp) } func (service mockService) getFixedTagsForCVE(ctx context.Context, config searchConfig, - username, password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup) { + username, password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { service.getImageByName(ctx, config, username, password, imageName, rch, wtgrp) } diff --git a/pkg/cli/searcher.go b/pkg/cli/searcher.go index 2b974e35..32f3a7ff 100644 --- a/pkg/cli/searcher.go +++ b/pkg/cli/searcher.go @@ -299,7 +299,8 @@ func (search fixedTagsSearcher) search(config searchConfig) (bool, error) { } func collectResults(config searchConfig, wg *sync.WaitGroup, imageErr chan stringResult, - cancel context.CancelFunc, printHeader printHeader, errCh chan error) { + cancel context.CancelFunc, printHeader printHeader, errCh chan error, +) { var foundResult bool defer wg.Done() diff --git a/pkg/cli/service.go b/pkg/cli/service.go index 6f295168..400eb07e 100644 --- a/pkg/cli/service.go +++ b/pkg/cli/service.go @@ -44,12 +44,13 @@ func NewSearchService() SearchService { } func (service searchService) getImageByName(ctx context.Context, config searchConfig, - username, password, imageName string, rch chan stringResult, wtgrp *sync.WaitGroup) { + username, password, imageName string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() defer close(rch) var localWg sync.WaitGroup - rlim := newSmoothRateLimiter(ctx, &localWg, rch) + rlim := newSmoothRateLimiter(&localWg, rch) localWg.Add(1) @@ -62,7 +63,8 @@ func (service searchService) getImageByName(ctx context.Context, config searchCo } func (service searchService) getAllImages(ctx context.Context, config searchConfig, username, password string, - rch chan stringResult, wtgrp *sync.WaitGroup) { + rch chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() defer close(rch) @@ -90,7 +92,7 @@ func (service searchService) getAllImages(ctx context.Context, config searchConf var localWg sync.WaitGroup - rlim := newSmoothRateLimiter(ctx, &localWg, rch) + rlim := newSmoothRateLimiter(&localWg, rch) localWg.Add(1) @@ -106,7 +108,8 @@ func (service searchService) getAllImages(ctx context.Context, config searchConf } func getImage(ctx context.Context, config searchConfig, username, password, imageName string, - rch chan stringResult, wtgrp *sync.WaitGroup, pool *requestsPool) { + rch chan stringResult, wtgrp *sync.WaitGroup, pool *requestsPool, +) { defer wtgrp.Done() tagListEndpoint, err := combineServerAndEndpointURL(*config.servURL, fmt.Sprintf("/v2/%s/tags/list", imageName)) @@ -139,7 +142,8 @@ func getImage(ctx context.Context, config searchConfig, username, password, imag } func (service searchService) getImagesByCveID(ctx context.Context, config searchConfig, username, - password, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup) { + password, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() defer close(rch) @@ -176,7 +180,7 @@ func (service searchService) getImagesByCveID(ctx context.Context, config search var localWg sync.WaitGroup - rlim := newSmoothRateLimiter(ctx, &localWg, rch) + rlim := newSmoothRateLimiter(&localWg, rch) localWg.Add(1) go rlim.startRateLimiter(ctx) @@ -193,7 +197,8 @@ func (service searchService) getImagesByCveID(ctx context.Context, config search } func (service searchService) getImagesByDigest(ctx context.Context, config searchConfig, username, - password string, digest string, rch chan stringResult, wtgrp *sync.WaitGroup) { + password string, digest string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() defer close(rch) @@ -230,7 +235,7 @@ func (service searchService) getImagesByDigest(ctx context.Context, config searc var localWg sync.WaitGroup - rlim := newSmoothRateLimiter(ctx, &localWg, rch) + rlim := newSmoothRateLimiter(&localWg, rch) localWg.Add(1) go rlim.startRateLimiter(ctx) @@ -247,7 +252,8 @@ func (service searchService) getImagesByDigest(ctx context.Context, config searc } func (service searchService) getImageByNameAndCVEID(ctx context.Context, config searchConfig, username, - password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup) { + password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() defer close(rch) @@ -284,7 +290,7 @@ func (service searchService) getImageByNameAndCVEID(ctx context.Context, config var localWg sync.WaitGroup - rlim := newSmoothRateLimiter(ctx, &localWg, rch) + rlim := newSmoothRateLimiter(&localWg, rch) localWg.Add(1) go rlim.startRateLimiter(ctx) @@ -305,7 +311,8 @@ func (service searchService) getImageByNameAndCVEID(ctx context.Context, config } func (service searchService) getCveByImage(ctx context.Context, config searchConfig, username, password, - imageName string, rch chan stringResult, wtgrp *sync.WaitGroup) { + imageName string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() defer close(rch) @@ -388,7 +395,8 @@ func isContextDone(ctx context.Context) bool { } func (service searchService) getFixedTagsForCVE(ctx context.Context, config searchConfig, - username, password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup) { + username, password, imageName, cvid string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() defer close(rch) @@ -425,7 +433,7 @@ func (service searchService) getFixedTagsForCVE(ctx context.Context, config sear var localWg sync.WaitGroup - rlim := newSmoothRateLimiter(ctx, &localWg, rch) + rlim := newSmoothRateLimiter(&localWg, rch) localWg.Add(1) go rlim.startRateLimiter(ctx) @@ -443,7 +451,8 @@ func (service searchService) getFixedTagsForCVE(ctx context.Context, config sear // errors are returned in the stringResult channel, the unmarshalled payload is in resultPtr. func (service searchService) makeGraphQLQuery(ctx context.Context, config searchConfig, username, password, query string, - resultPtr interface{}) error { + resultPtr interface{}, +) error { endPoint, err := combineServerAndEndpointURL(*config.servURL, "/query") if err != nil { return err @@ -458,7 +467,8 @@ func (service searchService) makeGraphQLQuery(ctx context.Context, config search } func addManifestCallToPool(ctx context.Context, config searchConfig, pool *requestsPool, - username, password, imageName, tagName string, rch chan stringResult, wtgrp *sync.WaitGroup) { + username, password, imageName, tagName string, rch chan stringResult, wtgrp *sync.WaitGroup, +) { defer wtgrp.Done() resultManifest := manifestResponse{} diff --git a/pkg/compliance/v1_0_0/check.go b/pkg/compliance/v1_0_0/check.go index 5f38f313..0bdb6d3f 100644 --- a/pkg/compliance/v1_0_0/check.go +++ b/pkg/compliance/v1_0_0/check.go @@ -13,8 +13,7 @@ import ( "testing" godigest "github.com/opencontainers/go-digest" - ispec "github.com/opencontainers/image-spec/specs-go/v1" - + ispec "github.com/opencontainers/image-spec/specs-go/v1" // nolint: goimports // nolint:golint,stylecheck,revive . "github.com/smartystreets/goconvey/convey" "github.com/smartystreets/goconvey/convey/reporting" @@ -1105,14 +1104,14 @@ func outputJSONExit() { } func validateMinifyRawJSON(rawJSON string) string { - var j interface{} + var jsonData interface{} - err := json.Unmarshal([]byte(rawJSON), &j) + err := json.Unmarshal([]byte(rawJSON), &jsonData) if err != nil { panic(err) } - rawJSONBytesMinified, err := json.Marshal(j) + rawJSONBytesMinified, err := json.Marshal(jsonData) if err != nil { panic(err) } diff --git a/pkg/extensions/extensions.go b/pkg/extensions/extensions.go index f75af40e..790f5084 100644 --- a/pkg/extensions/extensions.go +++ b/pkg/extensions/extensions.go @@ -43,7 +43,7 @@ func EnableExtensions(config *config.Config, log log.Logger, rootDir string) { if config.Extensions.Search.CVE.UpdateInterval < defaultUpdateInterval { config.Extensions.Search.CVE.UpdateInterval = defaultUpdateInterval - log.Warn().Msg("CVE update interval set to too-short interval < 2h, changing update duration to 2 hours and continuing.") // nolint: lll + log.Warn().Msg("CVE update interval set to too-short interval < 2h, changing update duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll } go func() { @@ -72,7 +72,8 @@ func EnableExtensions(config *config.Config, log log.Logger, rootDir string) { // EnableSyncExtension enables sync extension. func EnableSyncExtension(ctx context.Context, config *config.Config, wg *goSync.WaitGroup, - storeController storage.StoreController, log log.Logger) { + storeController storage.StoreController, log log.Logger, +) { if config.Extensions.Sync != nil && *config.Extensions.Sync.Enable { if err := sync.Run(ctx, *config.Extensions.Sync, storeController, wg, log); err != nil { log.Error().Err(err).Msg("Error encountered while setting up syncing") @@ -84,7 +85,8 @@ func EnableSyncExtension(ctx context.Context, config *config.Config, wg *goSync. // EnableScrubExtension enables scrub extension. func EnableScrubExtension(config *config.Config, storeController storage.StoreController, - log log.Logger) { + log log.Logger, +) { if config.Extensions.Scrub != nil && config.Extensions.Scrub.Interval != 0 { minScrubInterval, _ := time.ParseDuration("2h") @@ -92,7 +94,7 @@ func EnableScrubExtension(config *config.Config, storeController storage.StoreCo if config.Extensions.Scrub.Interval < minScrubInterval { config.Extensions.Scrub.Interval = minScrubInterval - log.Warn().Msg("Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") // nolint: lll + log.Warn().Msg("Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll } go func() { @@ -108,7 +110,8 @@ func EnableScrubExtension(config *config.Config, storeController storage.StoreCo // SetupRoutes ... func SetupRoutes(config *config.Config, router *mux.Router, storeController storage.StoreController, - l log.Logger) { + l log.Logger, +) { // fork a new zerolog child to avoid data race log := log.Logger{Logger: l.With().Caller().Timestamp().Logger()} log.Info().Msg("setting up extensions routes") @@ -134,7 +137,8 @@ func SetupRoutes(config *config.Config, router *mux.Router, storeController stor // SyncOneImage syncs one image. func SyncOneImage(config *config.Config, storeController storage.StoreController, - repoName, reference string, isArtifact bool, log log.Logger) error { + repoName, reference string, isArtifact bool, log log.Logger, +) error { log.Info().Msgf("syncing image %s:%s", repoName, reference) err := sync.OneImage(*config.Extensions.Sync, storeController, repoName, reference, isArtifact, log) diff --git a/pkg/extensions/minimal.go b/pkg/extensions/minimal.go index cf0d5055..2126bc5a 100644 --- a/pkg/extensions/minimal.go +++ b/pkg/extensions/minimal.go @@ -27,14 +27,16 @@ func EnableExtensions(config *config.Config, log log.Logger, rootDir string) { // EnableSyncExtension ... func EnableSyncExtension(ctx context.Context, config *config.Config, wg *goSync.WaitGroup, - storeController storage.StoreController, log log.Logger) { + storeController storage.StoreController, log log.Logger, +) { log.Warn().Msg("skipping enabling sync extension because given zot binary doesn't support any extensions," + "please build zot full binary for this feature") } // EnableScrubExtension ... func EnableScrubExtension(config *config.Config, storeController storage.StoreController, - log log.Logger) { + log log.Logger, +) { log.Warn().Msg("skipping enabling scrub extension because given zot binary doesn't support any extensions," + "please build zot full binary for this feature") } @@ -47,7 +49,8 @@ func SetupRoutes(conf *config.Config, router *mux.Router, storeController storag // SyncOneImage ... func SyncOneImage(config *config.Config, storeController storage.StoreController, - repoName, reference string, isArtifact bool, log log.Logger) error { + repoName, reference string, isArtifact bool, log log.Logger, +) error { log.Warn().Msg("skipping syncing on demand because given zot binary doesn't support any extensions," + "please build zot full binary for this feature") diff --git a/pkg/extensions/monitoring/extension.go b/pkg/extensions/monitoring/extension.go index 4d889b2b..7d930805 100644 --- a/pkg/extensions/monitoring/extension.go +++ b/pkg/extensions/monitoring/extension.go @@ -169,7 +169,7 @@ func IncDownloadCounter(ms MetricServer, repo string) { }) } -func SetStorageUsage(ms MetricServer, rootDir string, repo string) { +func SetStorageUsage(ms MetricServer, rootDir, repo string) { ms.SendMetric(func() { dir := path.Join(rootDir, repo) repoSize, err := getDirSize(dir) diff --git a/pkg/extensions/monitoring/minimal.go b/pkg/extensions/monitoring/minimal.go index ceab0590..5bd6ee2f 100644 --- a/pkg/extensions/monitoring/minimal.go +++ b/pkg/extensions/monitoring/minimal.go @@ -235,7 +235,7 @@ func GetHistograms() map[string][]string { // return true if a metric does not have any labels or if the label // values for searched metric corresponds to the one in the cached slice. -func isMetricMatch(lValues []string, metricValues []string) bool { +func isMetricMatch(lValues, metricValues []string) bool { if len(lValues) == len(metricValues) { for i, v := range metricValues { if v != lValues[i] { @@ -400,7 +400,7 @@ func (ms *metricServer) HistogramObserve(hv *HistogramValue) { } // nolint: goerr113 -func sanityChecks(name string, knownLabels []string, found bool, labelNames []string, labelValues []string) error { +func sanityChecks(name string, knownLabels []string, found bool, labelNames, labelValues []string) error { if !found { return fmt.Errorf("metric %s: not found", name) } @@ -479,7 +479,7 @@ func IncUploadCounter(ms MetricServer, repo string) { ms.SendMetric(uCounter) } -func SetStorageUsage(ms MetricServer, rootDir string, repo string) { +func SetStorageUsage(ms MetricServer, rootDir, repo string) { dir := path.Join(rootDir, repo) repoSize, err := getDirSize(dir) diff --git a/pkg/extensions/monitoring/minimal_client.go b/pkg/extensions/monitoring/minimal_client.go index 77467b8a..011a7dd8 100644 --- a/pkg/extensions/monitoring/minimal_client.go +++ b/pkg/extensions/monitoring/minimal_client.go @@ -38,7 +38,7 @@ type MetricsClient struct { } func newHTTPMetricsClient() *http.Client { - defaultTransport := http.DefaultTransport.(*http.Transport).Clone() + defaultTransport := http.DefaultTransport.(*http.Transport).Clone() //nolint: forcetypeassert defaultTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint: gosec return &http.Client{ diff --git a/pkg/extensions/search/common/common.go b/pkg/extensions/search/common/common.go index 19cc7563..5c4ba00c 100644 --- a/pkg/extensions/search/common/common.go +++ b/pkg/extensions/search/common/common.go @@ -67,7 +67,7 @@ func GetRepo(image string) string { return image } -func GetFixedTags(allTags []TagInfo, infectedTags []TagInfo) []TagInfo { +func GetFixedTags(allTags, infectedTags []TagInfo) []TagInfo { sort.Slice(allTags, func(i, j int) bool { return allTags[i].Timestamp.Before(allTags[j].Timestamp) }) diff --git a/pkg/extensions/search/cve/cve.go b/pkg/extensions/search/cve/cve.go index 724d9c5c..5f3ae962 100644 --- a/pkg/extensions/search/cve/cve.go +++ b/pkg/extensions/search/cve/cve.go @@ -139,8 +139,9 @@ func (cveinfo CveInfo) GetTrivyContext(image string) *TrivyCtx { return trivyCtx } -func (cveinfo CveInfo) GetImageListForCVE(repo string, cvid string, imgStore storage.ImageStore, - trivyCtx *TrivyCtx) ([]*string, error) { +func (cveinfo CveInfo) GetImageListForCVE(repo, cvid string, imgStore storage.ImageStore, + trivyCtx *TrivyCtx, +) ([]*string, error) { tags := make([]*string, 0) tagList, err := imgStore.GetImageTags(repo) diff --git a/pkg/extensions/search/cve/cve_test.go b/pkg/extensions/search/cve/cve_test.go index 4d29051b..1cd086cf 100644 --- a/pkg/extensions/search/cve/cve_test.go +++ b/pkg/extensions/search/cve/cve_test.go @@ -315,7 +315,7 @@ func generateTestData() error { // nolint: gocyclo return nil } -func makeTestFile(fileName string, content string) error { +func makeTestFile(fileName, content string) error { if err := ioutil.WriteFile(fileName, []byte(content), 0o600); err != nil { panic(err) } @@ -426,15 +426,15 @@ func TestCVESearch(t *testing.T) { So(err, ShouldBeNil) So(resp, ShouldNotBeNil) So(resp.StatusCode(), ShouldEqual, 401) - var e api.Error - err = json.Unmarshal(resp.Body(), &e) + var apiErr api.Error + err = json.Unmarshal(resp.Body(), &apiErr) So(err, ShouldBeNil) resp, err = resty.R().Get(baseURL + "/query/") So(err, ShouldBeNil) So(resp, ShouldNotBeNil) So(resp.StatusCode(), ShouldEqual, 401) - err = json.Unmarshal(resp.Body(), &e) + err = json.Unmarshal(resp.Body(), &apiErr) So(err, ShouldBeNil) // with creds, should get expected status code diff --git a/pkg/extensions/search/digest/digest.go b/pkg/extensions/search/digest/digest.go index 68e5ddbb..9925a324 100644 --- a/pkg/extensions/search/digest/digest.go +++ b/pkg/extensions/search/digest/digest.go @@ -23,7 +23,7 @@ func NewDigestInfo(storeController storage.StoreController, log log.Logger) *Dig } // FilterImagesByDigest returns a list of image tags in a repository matching a specific divest. -func (digestinfo DigestInfo) GetImageTagsByDigest(repo string, digest string) ([]*string, error) { +func (digestinfo DigestInfo) GetImageTagsByDigest(repo, digest string) ([]*string, error) { uniqueTags := []*string{} manifests, err := digestinfo.LayoutUtils.GetImageManifests(repo) diff --git a/pkg/extensions/search/resolver.go b/pkg/extensions/search/resolver.go index 783c0b91..4bfe5892 100644 --- a/pkg/extensions/search/resolver.go +++ b/pkg/extensions/search/resolver.go @@ -9,7 +9,7 @@ import ( "strings" godigest "github.com/opencontainers/go-digest" - "zotregistry.io/zot/pkg/log" + "zotregistry.io/zot/pkg/log" // nolint: gci "zotregistry.io/zot/pkg/extensions/search/common" cveinfo "zotregistry.io/zot/pkg/extensions/search/cve" @@ -247,7 +247,8 @@ func (r *queryResolver) ImageListForCve(ctx context.Context, cvid string) ([]*Im } func (r *queryResolver) getImageListForCVE(repoList []string, cvid string, imgStore storage.ImageStore, - trivyCtx *cveinfo.TrivyCtx) ([]*ImgResultForCve, error) { + trivyCtx *cveinfo.TrivyCtx, +) ([]*ImgResultForCve, error) { cveResult := []*ImgResultForCve{} for _, repo := range repoList { @@ -270,7 +271,7 @@ func (r *queryResolver) getImageListForCVE(repoList []string, cvid string, imgSt return cveResult, nil } -func (r *queryResolver) ImageListWithCVEFixed(ctx context.Context, cvid string, image string) (*ImgResultForFixedCve, error) { // nolint: lll +func (r *queryResolver) ImageListWithCVEFixed(ctx context.Context, cvid, image string) (*ImgResultForFixedCve, error) { // nolint: lll imgResultForFixedCVE := &ImgResultForFixedCve{} r.log.Info().Str("image", image).Msg("extracting list of tags available in image") @@ -396,7 +397,8 @@ func (r *queryResolver) ImageListForDigest(ctx context.Context, digestID string) } func (r *queryResolver) getImageListForDigest(repoList []string, - digest string) ([]*ImgResultForDigest, error) { + digest string, +) ([]*ImgResultForDigest, error) { imgResultForDigest := []*ImgResultForDigest{} var errResult error diff --git a/pkg/extensions/sync/on_demand.go b/pkg/extensions/sync/on_demand.go index 378d4219..4f223cef 100644 --- a/pkg/extensions/sync/on_demand.go +++ b/pkg/extensions/sync/on_demand.go @@ -44,7 +44,7 @@ func (di *demandedImages) loadOrStoreChan(key string, value chan error) (chan er return errChannel, found } -func (di *demandedImages) loadOrStoreStr(key string, value string) (string, bool) { +func (di *demandedImages) loadOrStoreStr(key, value string) (string, bool) { val, found := di.syncedMap.LoadOrStore(key, value) str, _ := val.(string) @@ -56,7 +56,8 @@ func (di *demandedImages) delete(key string) { } func OneImage(cfg Config, storeController storage.StoreController, - repo, tag string, isArtifact bool, log log.Logger) error { + repo, tag string, isArtifact bool, log log.Logger, +) error { // guard against multiple parallel requests demandedImage := fmt.Sprintf("%s:%s", repo, tag) // loadOrStore image-based channel @@ -88,7 +89,8 @@ func OneImage(cfg Config, storeController storage.StoreController, } func syncOneImage(imageChannel chan error, cfg Config, storeController storage.StoreController, - localRepo, tag string, isArtifact bool, log log.Logger) { + localRepo, tag string, isArtifact bool, log log.Logger, +) { var credentialsFile CredentialsFile if cfg.CredentialsFile != "" { @@ -267,7 +269,8 @@ func syncOneImage(imageChannel chan error, cfg Config, storeController storage.S } func syncRun(regCfg RegistryConfig, localRepo, remoteRepo, tag string, utils syncContextUtils, - log log.Logger) (bool, error) { + log log.Logger, +) (bool, error) { upstreamImageRef, err := getImageRef(utils.upstreamAddr, remoteRepo, tag) if err != nil { log.Error().Err(err).Msgf("error creating docker reference for repository %s/%s:%s", diff --git a/pkg/extensions/sync/signatures.go b/pkg/extensions/sync/signatures.go index 7faaa81f..d7732a88 100644 --- a/pkg/extensions/sync/signatures.go +++ b/pkg/extensions/sync/signatures.go @@ -20,8 +20,9 @@ import ( ) func getCosignManifest(client *resty.Client, regURL url.URL, repo, digest string, - log log.Logger) (*ispec.Manifest, error) { - var m ispec.Manifest + log log.Logger, +) (*ispec.Manifest, error) { + var cosignManifest ispec.Manifest cosignTag := getCosignTagFromImageDigest(digest) @@ -51,7 +52,7 @@ func getCosignManifest(client *resty.Client, regURL url.URL, repo, digest string return nil, zerr.ErrSyncSignature } - err = json.Unmarshal(resp.Body(), &m) + err = json.Unmarshal(resp.Body(), &cosignManifest) if err != nil { log.Error().Err(err).Str("url", getCosignManifestURL.String()). Msgf("couldn't unmarshal cosign manifest %s", cosignTag) @@ -59,7 +60,7 @@ func getCosignManifest(client *resty.Client, regURL url.URL, repo, digest string return nil, err } - return &m, nil + return &cosignManifest, nil } func getNotaryRefs(client *resty.Client, regURL url.URL, repo, digest string, log log.Logger) (ReferenceList, error) { @@ -195,7 +196,8 @@ func syncCosignSignature(client *resty.Client, imageStore storage.ImageStore, } func syncNotarySignature(client *resty.Client, imageStore storage.ImageStore, - regURL url.URL, localRepo, remoteRepo, digest string, referrers ReferenceList, log log.Logger) error { + regURL url.URL, localRepo, remoteRepo, digest string, referrers ReferenceList, log log.Logger, +) error { if len(referrers.References) == 0 { return nil } @@ -217,16 +219,16 @@ func syncNotarySignature(client *resty.Client, imageStore storage.ImageStore, } // read manifest - var m artifactspec.Manifest + var artifactManifest artifactspec.Manifest - err = json.Unmarshal(resp.Body(), &m) + err = json.Unmarshal(resp.Body(), &artifactManifest) if err != nil { log.Error().Err(err).Msgf("couldn't unmarshal notary manifest: %s", getRefManifestURL.String()) return err } - for _, blob := range m.Blobs { + for _, blob := range artifactManifest.Blobs { getBlobURL := regURL getBlobURL.Path = path.Join(getBlobURL.Path, "v2", remoteRepo, "blobs", blob.Digest.String()) getBlobURL.RawQuery = getBlobURL.Query().Encode() @@ -270,7 +272,8 @@ func syncNotarySignature(client *resty.Client, imageStore storage.ImageStore, } func canSkipNotarySignature(repo, tag, digest string, refs ReferenceList, imageStore storage.ImageStore, - log log.Logger) (bool, error) { + log log.Logger, +) (bool, error) { // check notary signature already synced if len(refs.References) > 0 { localRefs, err := imageStore.GetReferrers(repo, digest, notreg.ArtifactTypeNotation) @@ -297,7 +300,8 @@ func canSkipNotarySignature(repo, tag, digest string, refs ReferenceList, imageS } func canSkipCosignSignature(repo, tag, digest string, cosignManifest *ispec.Manifest, imageStore storage.ImageStore, - log log.Logger) (bool, error) { + log log.Logger, +) (bool, error) { // check cosign signature already synced if cosignManifest != nil { var localCosignManifest ispec.Manifest diff --git a/pkg/extensions/sync/sync.go b/pkg/extensions/sync/sync.go index 1c24f0fa..2401f138 100644 --- a/pkg/extensions/sync/sync.go +++ b/pkg/extensions/sync/sync.go @@ -75,7 +75,7 @@ type Tags struct { // getUpstreamCatalog gets all repos from a registry. func getUpstreamCatalog(client *resty.Client, upstreamURL string, log log.Logger) (catalog, error) { - var c catalog + var catalog catalog registryCatalogURL := fmt.Sprintf("%s%s", upstreamURL, "/v2/_catalog") @@ -83,24 +83,24 @@ func getUpstreamCatalog(client *resty.Client, upstreamURL string, log log.Logger if err != nil { log.Err(err).Msgf("couldn't query %s", registryCatalogURL) - return c, err + return catalog, err } if resp.IsError() { log.Error().Msgf("couldn't query %s, status code: %d, body: %s", registryCatalogURL, resp.StatusCode(), resp.Body()) - return c, zerr.ErrSyncMissingCatalog + return catalog, zerr.ErrSyncMissingCatalog } - err = json.Unmarshal(resp.Body(), &c) + err = json.Unmarshal(resp.Body(), &catalog) if err != nil { log.Err(err).Str("body", string(resp.Body())).Msg("couldn't unmarshal registry's catalog") - return c, err + return catalog, err } - return c, nil + return catalog, nil } // getImageTags lists all tags in a repository. @@ -189,7 +189,8 @@ func filterImagesBySemver(upstreamReferences *[]types.ImageReference, content Co // imagesToCopyFromRepos lists all images given a registry name and its repos. func imagesToCopyFromUpstream(ctx context.Context, registryName string, repos []string, - upstreamCtx *types.SystemContext, content Content, log log.Logger) ([]types.ImageReference, error) { + upstreamCtx *types.SystemContext, content Content, log log.Logger, +) ([]types.ImageReference, error) { var upstreamReferences []types.ImageReference for _, repoName := range repos { @@ -286,7 +287,8 @@ func getUpstreamContext(regCfg *RegistryConfig, credentials Credentials) *types. func syncRegistry(ctx context.Context, regCfg RegistryConfig, upstreamURL string, storeController storage.StoreController, localCtx *types.SystemContext, policyCtx *signature.PolicyContext, credentials Credentials, - retryOptions *retry.RetryOptions, log log.Logger) error { + retryOptions *retry.RetryOptions, log log.Logger, +) error { log.Info().Msgf("syncing registry: %s", upstreamURL) var err error @@ -532,7 +534,8 @@ func getLocalContexts(log log.Logger) (*types.SystemContext, *signature.PolicyCo } func Run(ctx context.Context, cfg Config, storeController storage.StoreController, - wtgrp *goSync.WaitGroup, logger log.Logger) error { + wtgrp *goSync.WaitGroup, logger log.Logger, +) error { var credentialsFile CredentialsFile var err error diff --git a/pkg/extensions/sync/sync_test.go b/pkg/extensions/sync/sync_test.go index 8c40ad2e..fbd8de8c 100644 --- a/pkg/extensions/sync/sync_test.go +++ b/pkg/extensions/sync/sync_test.go @@ -1324,13 +1324,13 @@ func TestNoImagesByRegex(t *testing.T) { So(resp, ShouldNotBeEmpty) So(resp.StatusCode(), ShouldEqual, 200) - var c catalog - err = json.Unmarshal(resp.Body(), &c) + var catalog catalog + err = json.Unmarshal(resp.Body(), &catalog) if err != nil { panic(err) } - So(c.Repositories, ShouldResemble, []string{}) + So(catalog.Repositories, ShouldResemble, []string{}) }) } @@ -2215,17 +2215,17 @@ func TestPeriodicallySignaturesErr(t *testing.T) { So(err, ShouldBeNil) // read manifest - var nm artifactspec.Manifest + var artifactManifest artifactspec.Manifest for _, ref := range referrers.References { refPath := path.Join(srcDir, repoName, "blobs", string(ref.Digest.Algorithm()), ref.Digest.Hex()) body, err := ioutil.ReadFile(refPath) So(err, ShouldBeNil) - err = json.Unmarshal(body, &nm) + err = json.Unmarshal(body, &artifactManifest) So(err, ShouldBeNil) // triggers perm denied on sig blobs - for _, blob := range nm.Blobs { + for _, blob := range artifactManifest.Blobs { blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex()) err := os.Chmod(blobPath, 0o000) So(err, ShouldBeNil) @@ -2383,17 +2383,17 @@ func TestSignatures(t *testing.T) { err = os.RemoveAll(path.Join(destDir, repoName)) So(err, ShouldBeNil) - var nm artifactspec.Manifest + var artifactManifest artifactspec.Manifest for _, ref := range referrers.References { refPath := path.Join(srcDir, repoName, "blobs", string(ref.Digest.Algorithm()), ref.Digest.Hex()) body, err := ioutil.ReadFile(refPath) So(err, ShouldBeNil) - err = json.Unmarshal(body, &nm) + err = json.Unmarshal(body, &artifactManifest) So(err, ShouldBeNil) // triggers perm denied on notary sig blobs on downstream - for _, blob := range nm.Blobs { + for _, blob := range artifactManifest.Blobs { blobPath := path.Join(destDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex()) err := os.MkdirAll(blobPath, 0o755) So(err, ShouldBeNil) @@ -2426,7 +2426,7 @@ func TestSignatures(t *testing.T) { So(resp.StatusCode(), ShouldEqual, 200) // triggers perm denied on sig blobs - for _, blob := range nm.Blobs { + for _, blob := range artifactManifest.Blobs { blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex()) err := os.Chmod(blobPath, 0o000) So(err, ShouldBeNil) @@ -2449,29 +2449,29 @@ func TestSignatures(t *testing.T) { mResp, err := resty.R().Get(getCosignManifestURL) So(err, ShouldBeNil) - var cm ispec.Manifest + var imageManifest ispec.Manifest - err = json.Unmarshal(mResp.Body(), &cm) + err = json.Unmarshal(mResp.Body(), &imageManifest) So(err, ShouldBeNil) downstreaamCosignManifest := ispec.Manifest{ - MediaType: cm.MediaType, + MediaType: imageManifest.MediaType, Config: ispec.Descriptor{ - MediaType: cm.Config.MediaType, - Size: cm.Config.Size, - Digest: cm.Config.Digest, - Annotations: cm.Config.Annotations, + MediaType: imageManifest.Config.MediaType, + Size: imageManifest.Config.Size, + Digest: imageManifest.Config.Digest, + Annotations: imageManifest.Config.Annotations, }, - Layers: cm.Layers, - Versioned: cm.Versioned, - Annotations: cm.Annotations, + Layers: imageManifest.Layers, + Versioned: imageManifest.Versioned, + Annotations: imageManifest.Annotations, } buf, err := json.Marshal(downstreaamCosignManifest) So(err, ShouldBeNil) cosignManifestDigest := godigest.FromBytes(buf) - for _, blob := range cm.Layers { + for _, blob := range imageManifest.Layers { blobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex()) err := os.Chmod(blobPath, 0o000) So(err, ShouldBeNil) @@ -2490,7 +2490,7 @@ func TestSignatures(t *testing.T) { err = os.RemoveAll(path.Join(destDir, repoName)) So(err, ShouldBeNil) - for _, blob := range cm.Layers { + for _, blob := range imageManifest.Layers { srcBlobPath := path.Join(srcDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex()) err := os.Chmod(srcBlobPath, 0o755) So(err, ShouldBeNil) @@ -2507,7 +2507,7 @@ func TestSignatures(t *testing.T) { So(err, ShouldBeNil) So(resp.StatusCode(), ShouldEqual, 200) - for _, blob := range cm.Layers { + for _, blob := range imageManifest.Layers { destBlobPath := path.Join(destDir, repoName, "blobs", string(blob.Digest.Algorithm()), blob.Digest.Hex()) err = os.Chmod(destBlobPath, 0o755) So(err, ShouldBeNil) @@ -2516,8 +2516,8 @@ func TestSignatures(t *testing.T) { } // trigger error on upstream config blob - srcConfigBlobPath := path.Join(srcDir, repoName, "blobs", string(cm.Config.Digest.Algorithm()), - cm.Config.Digest.Hex()) + srcConfigBlobPath := path.Join(srcDir, repoName, "blobs", string(imageManifest.Config.Digest.Algorithm()), + imageManifest.Config.Digest.Hex()) err = os.Chmod(srcConfigBlobPath, 0o000) So(err, ShouldBeNil) @@ -2538,8 +2538,8 @@ func TestSignatures(t *testing.T) { err = os.RemoveAll(path.Join(destDir, repoName)) So(err, ShouldBeNil) - destConfigBlobPath := path.Join(destDir, repoName, "blobs", string(cm.Config.Digest.Algorithm()), - cm.Config.Digest.Hex()) + destConfigBlobPath := path.Join(destDir, repoName, "blobs", string(imageManifest.Config.Digest.Algorithm()), + imageManifest.Config.Digest.Hex()) err = os.MkdirAll(destConfigBlobPath, 0o755) So(err, ShouldBeNil) @@ -3062,15 +3062,15 @@ func TestSignaturesOnDemand(t *testing.T) { mResp, err := resty.R().Get(getCosignManifestURL) So(err, ShouldBeNil) - var cm ispec.Manifest + var imageManifest ispec.Manifest - err = json.Unmarshal(mResp.Body(), &cm) + err = json.Unmarshal(mResp.Body(), &imageManifest) So(err, ShouldBeNil) // trigger errors on cosign blobs // trigger error on cosign config blob - srcConfigBlobPath := path.Join(srcDir, repoName, "blobs", string(cm.Config.Digest.Algorithm()), - cm.Config.Digest.Hex()) + srcConfigBlobPath := path.Join(srcDir, repoName, "blobs", string(imageManifest.Config.Digest.Algorithm()), + imageManifest.Config.Digest.Hex()) err = os.Chmod(srcConfigBlobPath, 0o000) So(err, ShouldBeNil) @@ -3084,8 +3084,8 @@ func TestSignaturesOnDemand(t *testing.T) { So(resp.StatusCode(), ShouldEqual, 200) // trigger error on cosign layer blob - srcSignatureBlobPath := path.Join(srcDir, repoName, "blobs", string(cm.Layers[0].Digest.Algorithm()), - cm.Layers[0].Digest.Hex()) + srcSignatureBlobPath := path.Join(srcDir, repoName, "blobs", string(imageManifest.Layers[0].Digest.Algorithm()), + imageManifest.Layers[0].Digest.Hex()) err = os.Chmod(srcConfigBlobPath, 0o755) So(err, ShouldBeNil) diff --git a/pkg/extensions/sync/utils.go b/pkg/extensions/sync/utils.go index 22c243ca..1b90ca63 100644 --- a/pkg/extensions/sync/utils.go +++ b/pkg/extensions/sync/utils.go @@ -206,7 +206,8 @@ func getFileCredentials(filepath string) (CredentialsFile, error) { } func getHTTPClient(regCfg *RegistryConfig, upstreamURL string, credentials Credentials, - log log.Logger) (*resty.Client, *url.URL, error) { + log log.Logger, +) (*resty.Client, *url.URL, error) { client := resty.New() if !common.Contains(regCfg.URLs, upstreamURL) { @@ -262,7 +263,8 @@ func getHTTPClient(regCfg *RegistryConfig, upstreamURL string, credentials Crede } func pushSyncedLocalImage(localRepo, tag, localCachePath string, - imageStore storage.ImageStore, log log.Logger) error { + imageStore storage.ImageStore, log log.Logger, +) error { log.Info().Msgf("pushing synced local image %s/%s:%s to local registry", localCachePath, localRepo, tag) metrics := monitoring.NewMetricsServer(false, log) diff --git a/pkg/log/log.go b/pkg/log/log.go index 52ae9baf..d8d951eb 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -21,7 +21,7 @@ func (l Logger) Println(v ...interface{}) { l.Logger.Error().Msg("panic recovered") } -func NewLogger(level string, output string) Logger { +func NewLogger(level, output string) Logger { zerolog.TimeFieldFormat = time.RFC3339Nano lvl, err := zerolog.ParseLevel(level) @@ -46,7 +46,7 @@ func NewLogger(level string, output string) Logger { return Logger{Logger: log.Hook(goroutineHook{}).With().Caller().Timestamp().Logger()} } -func NewAuditLogger(level string, audit string) *Logger { +func NewAuditLogger(level, audit string) *Logger { zerolog.TimeFieldFormat = time.RFC3339Nano lvl, err := zerolog.ParseLevel(level) diff --git a/pkg/storage/cache.go b/pkg/storage/cache.go index f27b3bf4..aaa98877 100644 --- a/pkg/storage/cache.go +++ b/pkg/storage/cache.go @@ -27,7 +27,7 @@ type Blob struct { Path string } -func NewCache(rootDir string, name string, log zlog.Logger) *Cache { +func NewCache(rootDir, name string, log zlog.Logger) *Cache { dbPath := path.Join(rootDir, name+".db") dbOpts := &bbolt.Options{ Timeout: dbCacheLockCheckTimeout, @@ -60,7 +60,7 @@ func NewCache(rootDir string, name string, log zlog.Logger) *Cache { return &Cache{rootDir: rootDir, db: cacheDB, log: log} } -func (c *Cache) PutBlob(digest string, path string) error { +func (c *Cache) PutBlob(digest, path string) error { if path == "" { c.log.Error().Err(errors.ErrEmptyValue).Str("digest", digest).Msg("empty path provided") @@ -136,7 +136,7 @@ func (c *Cache) GetBlob(digest string) (string, error) { return blobPath.String(), nil } -func (c *Cache) HasBlob(digest string, blob string) bool { +func (c *Cache) HasBlob(digest, blob string) bool { if err := c.db.View(func(tx *bbolt.Tx) error { root := tx.Bucket([]byte(BlobsCache)) if root == nil { @@ -164,7 +164,7 @@ func (c *Cache) HasBlob(digest string, blob string) bool { return true } -func (c *Cache) DeleteBlob(digest string, path string) error { +func (c *Cache) DeleteBlob(digest, path string) error { // use only relative (to rootDir) paths on blobs relp, err := filepath.Rel(c.rootDir, path) if err != nil { diff --git a/pkg/storage/s3/s3_test.go b/pkg/storage/s3/s3_test.go index 507cbccf..3c7e2ff9 100644 --- a/pkg/storage/s3/s3_test.go +++ b/pkg/storage/s3/s3_test.go @@ -166,7 +166,7 @@ type StorageDriverMock struct { writerFn func(ctx context.Context, path string, isAppend bool) (driver.FileWriter, error) statFn func(ctx context.Context, path string) (driver.FileInfo, error) listFn func(ctx context.Context, path string) ([]string, error) - moveFn func(ctx context.Context, sourcePath string, destPath string) error + moveFn func(ctx context.Context, sourcePath, destPath string) error deleteFn func(ctx context.Context, path string) error walkFn func(ctx context.Context, path string, f driver.WalkFn) error } @@ -227,7 +227,7 @@ func (s *StorageDriverMock) List(ctx context.Context, path string) ([]string, er return []string{"a"}, nil } -func (s *StorageDriverMock) Move(ctx context.Context, sourcePath string, destPath string) error { +func (s *StorageDriverMock) Move(ctx context.Context, sourcePath, destPath string) error { if s != nil && s.moveFn != nil { return s.moveFn(ctx, sourcePath, destPath) } diff --git a/pkg/storage/s3/storage.go b/pkg/storage/s3/storage.go index 3b23dd37..26ccff9a 100644 --- a/pkg/storage/s3/storage.go +++ b/pkg/storage/s3/storage.go @@ -16,7 +16,6 @@ import ( // Add s3 support. "github.com/docker/distribution/registry/storage/driver" - // Load s3 driver. _ "github.com/docker/distribution/registry/storage/driver/s3-aws" guuid "github.com/gofrs/uuid" @@ -65,7 +64,8 @@ func (is *ObjectStorage) DirExists(d string) bool { // see https://github.com/docker/docker.github.io/tree/master/registry/storage-drivers func NewImageStore(rootDir string, gc bool, gcDelay time.Duration, dedupe, commit bool, log zlog.Logger, metrics monitoring.MetricServer, - store driver.StorageDriver) storage.ImageStore { + store driver.StorageDriver, +) storage.ImageStore { imgStore := &ObjectStorage{ rootDir: rootDir, store: store, @@ -307,7 +307,7 @@ func (is *ObjectStorage) GetImageTags(repo string) ([]string, error) { } // GetImageManifest returns the image manifest of an image in the specific repository. -func (is *ObjectStorage) GetImageManifest(repo string, reference string) ([]byte, string, string, error) { +func (is *ObjectStorage) GetImageManifest(repo, reference string) ([]byte, string, string, error) { var lockLatency time.Time dir := path.Join(is.rootDir, repo) @@ -381,8 +381,9 @@ func (is *ObjectStorage) GetImageManifest(repo string, reference string) ([]byte } // PutImageManifest adds an image manifest to the repository. -func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaType string, - body []byte) (string, error) { +func (is *ObjectStorage) PutImageManifest(repo, reference, mediaType string, + body []byte, +) (string, error) { if err := is.InitRepo(repo); err != nil { is.log.Debug().Err(err).Msg("init repo") @@ -402,20 +403,20 @@ func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaTy return "", zerr.ErrBadManifest } - var m ispec.Manifest - if err := json.Unmarshal(body, &m); err != nil { + var imageManifest ispec.Manifest + if err := json.Unmarshal(body, &imageManifest); err != nil { is.log.Error().Err(err).Msg("unable to unmarshal JSON") return "", zerr.ErrBadManifest } - if m.SchemaVersion != storage.SchemaVersion { - is.log.Error().Int("SchemaVersion", m.SchemaVersion).Msg("invalid manifest") + if imageManifest.SchemaVersion != storage.SchemaVersion { + is.log.Error().Int("SchemaVersion", imageManifest.SchemaVersion).Msg("invalid manifest") return "", zerr.ErrBadManifest } - for _, l := range m.Layers { + for _, l := range imageManifest.Layers { digest := l.Digest blobPath := is.BlobPath(repo, digest) is.log.Info().Str("blobPath", blobPath).Str("reference", reference).Msg("manifest layers") @@ -548,7 +549,7 @@ func (is *ObjectStorage) PutImageManifest(repo string, reference string, mediaTy } // DeleteImageManifest deletes the image manifest from the repository. -func (is *ObjectStorage) DeleteImageManifest(repo string, reference string) error { +func (is *ObjectStorage) DeleteImageManifest(repo, reference string) error { var lockLatency time.Time dir := path.Join(is.rootDir, repo) @@ -657,7 +658,7 @@ func (is *ObjectStorage) DeleteImageManifest(repo string, reference string) erro } // BlobUploadPath returns the upload path for a blob in this store. -func (is *ObjectStorage) BlobUploadPath(repo string, uuid string) string { +func (is *ObjectStorage) BlobUploadPath(repo, uuid string) string { dir := path.Join(is.rootDir, repo) blobUploadPath := path.Join(dir, storage.BlobUploadDir, uuid) @@ -692,7 +693,7 @@ func (is *ObjectStorage) NewBlobUpload(repo string) (string, error) { } // GetBlobUpload returns the current size of a blob upload. -func (is *ObjectStorage) GetBlobUpload(repo string, uuid string) (int64, error) { +func (is *ObjectStorage) GetBlobUpload(repo, uuid string) (int64, error) { var fileSize int64 blobUploadPath := is.BlobUploadPath(repo, uuid) @@ -727,7 +728,7 @@ func (is *ObjectStorage) GetBlobUpload(repo string, uuid string) (int64, error) // PutBlobChunkStreamed appends another chunk of data to the specified blob. It returns // the number of actual bytes to the blob. -func (is *ObjectStorage) PutBlobChunkStreamed(repo string, uuid string, body io.Reader) (int64, error) { +func (is *ObjectStorage) PutBlobChunkStreamed(repo, uuid string, body io.Reader) (int64, error) { if err := is.InitRepo(repo); err != nil { return -1, err } @@ -769,8 +770,9 @@ func (is *ObjectStorage) PutBlobChunkStreamed(repo string, uuid string, body io. // PutBlobChunk writes another chunk of data to the specified blob. It returns // the number of actual bytes to the blob. -func (is *ObjectStorage) PutBlobChunk(repo string, uuid string, from int64, to int64, - body io.Reader) (int64, error) { +func (is *ObjectStorage) PutBlobChunk(repo, uuid string, from, to int64, + body io.Reader, +) (int64, error) { if err := is.InitRepo(repo); err != nil { return -1, err } @@ -828,7 +830,7 @@ func (is *ObjectStorage) PutBlobChunk(repo string, uuid string, from int64, to i } // BlobUploadInfo returns the current blob size in bytes. -func (is *ObjectStorage) BlobUploadInfo(repo string, uuid string) (int64, error) { +func (is *ObjectStorage) BlobUploadInfo(repo, uuid string) (int64, error) { var fileSize int64 blobUploadPath := is.BlobUploadPath(repo, uuid) @@ -861,7 +863,7 @@ func (is *ObjectStorage) BlobUploadInfo(repo string, uuid string) (int64, error) } // FinishBlobUpload finalizes the blob upload and moves blob the repository. -func (is *ObjectStorage) FinishBlobUpload(repo string, uuid string, body io.Reader, digest string) error { +func (is *ObjectStorage) FinishBlobUpload(repo, uuid string, body io.Reader, digest string) error { dstDigest, err := godigest.Parse(digest) if err != nil { is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest") @@ -1010,7 +1012,7 @@ func (is *ObjectStorage) RunGCPeriodically(gcInterval time.Duration) { } // DeleteBlobUpload deletes an existing blob upload that is currently in progress. -func (is *ObjectStorage) DeleteBlobUpload(repo string, uuid string) error { +func (is *ObjectStorage) DeleteBlobUpload(repo, uuid string) error { blobUploadPath := is.BlobUploadPath(repo, uuid) if err := is.store.Delete(context.Background(), blobUploadPath); err != nil { is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("error deleting blob upload") @@ -1027,7 +1029,7 @@ func (is *ObjectStorage) BlobPath(repo string, digest godigest.Digest) string { } // CheckBlob verifies a blob and returns true if the blob is correct. -func (is *ObjectStorage) CheckBlob(repo string, digest string) (bool, int64, error) { +func (is *ObjectStorage) CheckBlob(repo, digest string) (bool, int64, error) { var lockLatency time.Time dgst, err := godigest.Parse(digest) @@ -1061,7 +1063,7 @@ func (is *ObjectStorage) CheckBlob(repo string, digest string) (bool, int64, err // GetBlob returns a stream to read the blob. // blob selector instead of directly downloading the blob. -func (is *ObjectStorage) GetBlob(repo string, digest string, mediaType string) (io.Reader, int64, error) { +func (is *ObjectStorage) GetBlob(repo, digest, mediaType string) (io.Reader, int64, error) { var lockLatency time.Time dgst, err := godigest.Parse(digest) @@ -1093,7 +1095,7 @@ func (is *ObjectStorage) GetBlob(repo string, digest string, mediaType string) ( return blobReader, binfo.Size(), nil } -func (is *ObjectStorage) GetBlobContent(repo string, digest string) ([]byte, error) { +func (is *ObjectStorage) GetBlobContent(repo, digest string) ([]byte, error) { blob, _, err := is.GetBlob(repo, digest, ispec.MediaTypeImageManifest) if err != nil { return []byte{}, err @@ -1111,7 +1113,7 @@ func (is *ObjectStorage) GetBlobContent(repo string, digest string) ([]byte, err return buf.Bytes(), nil } -func (is *ObjectStorage) GetReferrers(repo, digest string, mediaType string) ([]artifactspec.Descriptor, error) { +func (is *ObjectStorage) GetReferrers(repo, digest, mediaType string) ([]artifactspec.Descriptor, error) { return nil, zerr.ErrMethodNotSupported } @@ -1134,7 +1136,7 @@ func (is *ObjectStorage) GetIndexContent(repo string) ([]byte, error) { } // DeleteBlob removes the blob from the repository. -func (is *ObjectStorage) DeleteBlob(repo string, digest string) error { +func (is *ObjectStorage) DeleteBlob(repo, digest string) error { var lockLatency time.Time dgst, err := godigest.Parse(digest) diff --git a/pkg/storage/scrub.go b/pkg/storage/scrub.go index be417687..bde9aacb 100644 --- a/pkg/storage/scrub.go +++ b/pkg/storage/scrub.go @@ -126,8 +126,7 @@ func checkRepo(imageName string, imgStore ImageStore) ([]ScrubImageResult, error return results, nil } -func checkIntegrity(ctx context.Context, imageName, tagName string, oci casext.Engine, manifest ispec.Descriptor, - dir string) ScrubImageResult { +func checkIntegrity(ctx context.Context, imageName, tagName string, oci casext.Engine, manifest ispec.Descriptor, dir string) ScrubImageResult { // nolint: lll // check manifest and config stat, err := umoci.Stat(ctx, oci, manifest) diff --git a/pkg/storage/scrub_test.go b/pkg/storage/scrub_test.go index b140f03d..e4cc848e 100644 --- a/pkg/storage/scrub_test.go +++ b/pkg/storage/scrub_test.go @@ -117,7 +117,8 @@ func TestCheckAllBlobsIntegrity(t *testing.T) { } mnfst.SchemaVersion = 2 - mb, _ := json.Marshal(mnfst) + mb, err := json.Marshal(mnfst) + So(err, ShouldBeNil) manifest, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest, mb) So(err, ShouldBeNil) diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 77a42c3a..aadceab5 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -24,23 +24,23 @@ type ImageStore interface { ValidateRepo(name string) (bool, error) GetRepositories() ([]string, error) GetImageTags(repo string) ([]string, error) - GetImageManifest(repo string, reference string) ([]byte, string, string, error) - PutImageManifest(repo string, reference string, mediaType string, body []byte) (string, error) - DeleteImageManifest(repo string, reference string) error - BlobUploadPath(repo string, uuid string) string + GetImageManifest(repo, reference string) ([]byte, string, string, error) + PutImageManifest(repo, reference, mediaType string, body []byte) (string, error) + DeleteImageManifest(repo, reference string) error + BlobUploadPath(repo, uuid string) string NewBlobUpload(repo string) (string, error) - GetBlobUpload(repo string, uuid string) (int64, error) - PutBlobChunkStreamed(repo string, uuid string, body io.Reader) (int64, error) - PutBlobChunk(repo string, uuid string, from int64, to int64, body io.Reader) (int64, error) - BlobUploadInfo(repo string, uuid string) (int64, error) - FinishBlobUpload(repo string, uuid string, body io.Reader, digest string) error + GetBlobUpload(repo, uuid string) (int64, error) + PutBlobChunkStreamed(repo, uuid string, body io.Reader) (int64, error) + PutBlobChunk(repo, uuid string, from, to int64, body io.Reader) (int64, error) + BlobUploadInfo(repo, uuid string) (int64, error) + FinishBlobUpload(repo, uuid string, body io.Reader, digest string) error FullBlobUpload(repo string, body io.Reader, digest string) (string, int64, error) DedupeBlob(src string, dstDigest digest.Digest, dst string) error - DeleteBlobUpload(repo string, uuid string) error + DeleteBlobUpload(repo, uuid string) error BlobPath(repo string, digest digest.Digest) string - CheckBlob(repo string, digest string) (bool, int64, error) - GetBlob(repo string, digest string, mediaType string) (io.Reader, int64, error) - DeleteBlob(repo string, digest string) error + CheckBlob(repo, digest string) (bool, int64, error) + GetBlob(repo, digest, mediaType string) (io.Reader, int64, error) + DeleteBlob(repo, digest string) error GetIndexContent(repo string) ([]byte, error) GetBlobContent(repo, digest string) ([]byte, error) GetReferrers(repo, digest string, mediaType string) ([]artifactspec.Descriptor, error) diff --git a/pkg/storage/storage_fs.go b/pkg/storage/storage_fs.go index 3ce8acb2..1ea4b92b 100644 --- a/pkg/storage/storage_fs.go +++ b/pkg/storage/storage_fs.go @@ -106,7 +106,8 @@ func (sc StoreController) GetImageStore(name string) ImageStore { // NewImageStore returns a new image store backed by a file storage. func NewImageStore(rootDir string, gc bool, gcDelay time.Duration, dedupe, commit bool, - log zlog.Logger, metrics monitoring.MetricServer) ImageStore { + log zlog.Logger, metrics monitoring.MetricServer, +) ImageStore { if _, err := os.Stat(rootDir); os.IsNotExist(err) { if err := os.MkdirAll(rootDir, DefaultDirPerms); err != nil { log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir") @@ -387,7 +388,7 @@ func (is *ImageStoreFS) GetImageTags(repo string) ([]string, error) { } // GetImageManifest returns the image manifest of an image in the specific repository. -func (is *ImageStoreFS) GetImageManifest(repo string, reference string) ([]byte, string, string, error) { +func (is *ImageStoreFS) GetImageManifest(repo, reference string) ([]byte, string, string, error) { var lockLatency time.Time dir := path.Join(is.rootDir, repo) @@ -471,7 +472,7 @@ func (is *ImageStoreFS) GetImageManifest(repo string, reference string) ([]byte, return buf, digest.String(), mediaType, nil } -func (is *ImageStoreFS) validateOCIManifest(repo string, reference string, manifest *ispec.Manifest) (string, error) { +func (is *ImageStoreFS) validateOCIManifest(repo, reference string, manifest *ispec.Manifest) (string, error) { if manifest.SchemaVersion != SchemaVersion { is.log.Error().Int("SchemaVersion", manifest.SchemaVersion).Msg("invalid manifest") @@ -526,8 +527,9 @@ func (is *ImageStoreFS) validateOCIManifest(repo string, reference string, manif } // PutImageManifest adds an image manifest to the repository. -func (is *ImageStoreFS) PutImageManifest(repo string, reference string, mediaType string, - body []byte) (string, error) { +func (is *ImageStoreFS) PutImageManifest(repo, reference, mediaType string, + body []byte, +) (string, error) { if err := is.InitRepo(repo); err != nil { is.log.Debug().Err(err).Msg("init repo") @@ -703,7 +705,7 @@ func (is *ImageStoreFS) PutImageManifest(repo string, reference string, mediaTyp } // DeleteImageManifest deletes the image manifest from the repository. -func (is *ImageStoreFS) DeleteImageManifest(repo string, reference string) error { +func (is *ImageStoreFS) DeleteImageManifest(repo, reference string) error { var lockLatency time.Time dir := path.Join(is.rootDir, repo) @@ -815,7 +817,7 @@ func (is *ImageStoreFS) DeleteImageManifest(repo string, reference string) error } // BlobUploadPath returns the upload path for a blob in this store. -func (is *ImageStoreFS) BlobUploadPath(repo string, uuid string) string { +func (is *ImageStoreFS) BlobUploadPath(repo, uuid string) string { dir := path.Join(is.rootDir, repo) blobUploadPath := path.Join(dir, BlobUploadDir, uuid) @@ -849,7 +851,7 @@ func (is *ImageStoreFS) NewBlobUpload(repo string) (string, error) { } // GetBlobUpload returns the current size of a blob upload. -func (is *ImageStoreFS) GetBlobUpload(repo string, uuid string) (int64, error) { +func (is *ImageStoreFS) GetBlobUpload(repo, uuid string) (int64, error) { blobUploadPath := is.BlobUploadPath(repo, uuid) binfo, err := os.Stat(blobUploadPath) @@ -866,7 +868,7 @@ func (is *ImageStoreFS) GetBlobUpload(repo string, uuid string) (int64, error) { // PutBlobChunkStreamed appends another chunk of data to the specified blob. It returns // the number of actual bytes to the blob. -func (is *ImageStoreFS) PutBlobChunkStreamed(repo string, uuid string, body io.Reader) (int64, error) { +func (is *ImageStoreFS) PutBlobChunkStreamed(repo, uuid string, body io.Reader) (int64, error) { if err := is.InitRepo(repo); err != nil { return -1, err } @@ -906,8 +908,9 @@ func (is *ImageStoreFS) PutBlobChunkStreamed(repo string, uuid string, body io.R // PutBlobChunk writes another chunk of data to the specified blob. It returns // the number of actual bytes to the blob. -func (is *ImageStoreFS) PutBlobChunk(repo string, uuid string, from int64, to int64, - body io.Reader) (int64, error) { +func (is *ImageStoreFS) PutBlobChunk(repo, uuid string, from, to int64, + body io.Reader, +) (int64, error) { if err := is.InitRepo(repo); err != nil { return -1, err } @@ -953,7 +956,7 @@ func (is *ImageStoreFS) PutBlobChunk(repo string, uuid string, from int64, to in } // BlobUploadInfo returns the current blob size in bytes. -func (is *ImageStoreFS) BlobUploadInfo(repo string, uuid string) (int64, error) { +func (is *ImageStoreFS) BlobUploadInfo(repo, uuid string) (int64, error) { blobUploadPath := is.BlobUploadPath(repo, uuid) binfo, err := os.Stat(blobUploadPath) @@ -969,7 +972,7 @@ func (is *ImageStoreFS) BlobUploadInfo(repo string, uuid string) (int64, error) } // FinishBlobUpload finalizes the blob upload and moves blob the repository. -func (is *ImageStoreFS) FinishBlobUpload(repo string, uuid string, body io.Reader, digest string) error { +func (is *ImageStoreFS) FinishBlobUpload(repo, uuid string, body io.Reader, digest string) error { dstDigest, err := godigest.Parse(digest) if err != nil { is.log.Error().Err(err).Str("digest", digest).Msg("failed to parse digest") @@ -1169,7 +1172,7 @@ retry: is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat") // the actual blob on disk may have been removed by GC, so sync the cache if err := is.cache.DeleteBlob(dstDigest.String(), dstRecord); err != nil { - // nolint:lll + //nolint:lll // gofumpt conflicts with lll is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: unable to delete blob record") return err @@ -1215,7 +1218,7 @@ retry: } // DeleteBlobUpload deletes an existing blob upload that is currently in progress. -func (is *ImageStoreFS) DeleteBlobUpload(repo string, uuid string) error { +func (is *ImageStoreFS) DeleteBlobUpload(repo, uuid string) error { blobUploadPath := is.BlobUploadPath(repo, uuid) if err := os.Remove(blobUploadPath); err != nil { is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("error deleting blob upload") @@ -1232,7 +1235,7 @@ func (is *ImageStoreFS) BlobPath(repo string, digest godigest.Digest) string { } // CheckBlob verifies a blob and returns true if the blob is correct. -func (is *ImageStoreFS) CheckBlob(repo string, digest string) (bool, int64, error) { +func (is *ImageStoreFS) CheckBlob(repo, digest string) (bool, int64, error) { var lockLatency time.Time parsedDigest, err := godigest.Parse(digest) @@ -1301,7 +1304,7 @@ func (is *ImageStoreFS) checkCacheBlob(digest string) (string, error) { return dstRecord, nil } -func (is *ImageStoreFS) copyBlob(repo string, blobPath string, dstRecord string) (int64, error) { +func (is *ImageStoreFS) copyBlob(repo, blobPath, dstRecord string) (int64, error) { if err := is.initRepo(repo); err != nil { is.log.Error().Err(err).Str("repo", repo).Msg("unable to initialize an empty repo") @@ -1326,7 +1329,7 @@ func (is *ImageStoreFS) copyBlob(repo string, blobPath string, dstRecord string) // GetBlob returns a stream to read the blob. // blob selector instead of directly downloading the blob. -func (is *ImageStoreFS) GetBlob(repo string, digest string, mediaType string) (io.Reader, int64, error) { +func (is *ImageStoreFS) GetBlob(repo, digest, mediaType string) (io.Reader, int64, error) { var lockLatency time.Time parsedDigest, err := godigest.Parse(digest) @@ -1358,7 +1361,7 @@ func (is *ImageStoreFS) GetBlob(repo string, digest string, mediaType string) (i return blobReader, binfo.Size(), nil } -func (is *ImageStoreFS) GetBlobContent(repo string, digest string) ([]byte, error) { +func (is *ImageStoreFS) GetBlobContent(repo, digest string) ([]byte, error) { blob, _, err := is.GetBlob(repo, digest, ispec.MediaTypeImageManifest) if err != nil { return []byte{}, err @@ -1401,7 +1404,7 @@ func (is *ImageStoreFS) GetIndexContent(repo string) ([]byte, error) { } // DeleteBlob removes the blob from the repository. -func (is *ImageStoreFS) DeleteBlob(repo string, digest string) error { +func (is *ImageStoreFS) DeleteBlob(repo, digest string) error { var lockLatency time.Time dgst, err := godigest.Parse(digest) @@ -1440,7 +1443,7 @@ func (is *ImageStoreFS) DeleteBlob(repo string, digest string) error { return nil } -func (is *ImageStoreFS) GetReferrers(repo, digest string, mediaType string) ([]artifactspec.Descriptor, error) { +func (is *ImageStoreFS) GetReferrers(repo, digest, mediaType string) ([]artifactspec.Descriptor, error) { var lockLatency time.Time dir := path.Join(is.rootDir, repo) diff --git a/pkg/storage/storage_fs_test.go b/pkg/storage/storage_fs_test.go index 1635ff0e..f3497322 100644 --- a/pkg/storage/storage_fs_test.go +++ b/pkg/storage/storage_fs_test.go @@ -84,7 +84,8 @@ func TestStorageFSAPIs(t *testing.T) { } manifest.SchemaVersion = 2 - manifestBuf, _ := json.Marshal(manifest) + manifestBuf, err := json.Marshal(manifest) + So(err, ShouldBeNil) digest = godigest.FromBytes(manifestBuf) err = os.Chmod(path.Join(imgStore.RootDir(), repoName, "index.json"), 0o000) @@ -217,7 +218,8 @@ func TestDedupeLinks(t *testing.T) { }, } manifest.SchemaVersion = 2 - manifestBuf, _ := json.Marshal(manifest) + manifestBuf, err := json.Marshal(manifest) + So(err, ShouldBeNil) digest = godigest.FromBytes(manifestBuf) _, err = imgStore.PutImageManifest("dedupe1", digest.String(), ispec.MediaTypeImageManifest, manifestBuf) So(err, ShouldBeNil) @@ -273,7 +275,8 @@ func TestDedupeLinks(t *testing.T) { }, } manifest.SchemaVersion = 2 - manifestBuf, _ = json.Marshal(manifest) + manifestBuf, err = json.Marshal(manifest) + So(err, ShouldBeNil) digest = godigest.FromBytes(manifestBuf) _, err = imgStore.PutImageManifest("dedupe2", "1.0", ispec.MediaTypeImageManifest, manifestBuf) So(err, ShouldBeNil) @@ -779,7 +782,8 @@ func TestGarbageCollect(t *testing.T) { } manifest.SchemaVersion = 2 - manifestBuf, _ := json.Marshal(manifest) + manifestBuf, err := json.Marshal(manifest) + So(err, ShouldBeNil) digest := godigest.FromBytes(manifestBuf) _, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest, manifestBuf) @@ -866,7 +870,8 @@ func TestGarbageCollect(t *testing.T) { } manifest.SchemaVersion = 2 - manifestBuf, _ := json.Marshal(manifest) + manifestBuf, err := json.Marshal(manifest) + So(err, ShouldBeNil) digest := godigest.FromBytes(manifestBuf) _, err = imgStore.PutImageManifest(repoName, tag, ispec.MediaTypeImageManifest, manifestBuf) @@ -945,7 +950,8 @@ func TestGarbageCollect(t *testing.T) { } manifest.SchemaVersion = 2 - manifestBuf, _ := json.Marshal(manifest) + manifestBuf, err := json.Marshal(manifest) + So(err, ShouldBeNil) _, err = imgStore.PutImageManifest(repo1Name, tag, ispec.MediaTypeImageManifest, manifestBuf) So(err, ShouldBeNil) @@ -1007,7 +1013,8 @@ func TestGarbageCollect(t *testing.T) { } manifest.SchemaVersion = 2 - manifestBuf, _ = json.Marshal(manifest) + manifestBuf, err = json.Marshal(manifest) + So(err, ShouldBeNil) _, err = imgStore.PutImageManifest(repo2Name, tag, ispec.MediaTypeImageManifest, manifestBuf) So(err, ShouldBeNil) @@ -1062,7 +1069,8 @@ func TestGarbageCollect(t *testing.T) { } manifest.SchemaVersion = 2 - manifestBuf, _ = json.Marshal(manifest) + manifestBuf, err = json.Marshal(manifest) + So(err, ShouldBeNil) digest := godigest.FromBytes(manifestBuf) _, err = imgStore.PutImageManifest(repo2Name, tag, ispec.MediaTypeImageManifest, manifestBuf) diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index 7d3928b2..d814f380 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -235,7 +235,8 @@ func TestStorageAPIs(t *testing.T) { manifest := ispec.Manifest{} manifest.SchemaVersion = 2 - manifestBuf, _ := json.Marshal(manifest) + manifestBuf, err := json.Marshal(manifest) + So(err, ShouldBeNil) Convey("Bad image manifest", func() { _, err = imgStore.PutImageManifest("test", digest.String(), "application/json", @@ -289,12 +290,14 @@ func TestStorageAPIs(t *testing.T) { } manifest.SchemaVersion = 2 - manifestBuf, _ = json.Marshal(manifest) + manifestBuf, err = json.Marshal(manifest) + So(err, ShouldBeNil) digest := godigest.FromBytes(manifestBuf) // bad manifest manifest.Layers[0].Digest = godigest.FromBytes([]byte("inexistent")) - badMb, _ := json.Marshal(manifest) + badMb, err := json.Marshal(manifest) + So(err, ShouldBeNil) _, err = imgStore.PutImageManifest("test", "1.0", ispec.MediaTypeImageManifest, badMb) So(err, ShouldNotBeNil) @@ -428,7 +431,8 @@ func TestStorageAPIs(t *testing.T) { manifest := ispec.Manifest{} manifest.SchemaVersion = 2 - manifestBuf, _ := json.Marshal(manifest) + manifestBuf, err := json.Marshal(manifest) + So(err, ShouldBeNil) Convey("Bad digests", func() { _, _, err := imgStore.FullBlobUpload("test", bytes.NewBuffer([]byte{}), "inexistent") @@ -473,7 +477,8 @@ func TestStorageAPIs(t *testing.T) { }, } manifest.SchemaVersion = 2 - manifestBuf, _ = json.Marshal(manifest) + manifestBuf, err = json.Marshal(manifest) + So(err, ShouldBeNil) digest := godigest.FromBytes(manifestBuf) _, err = imgStore.PutImageManifest("test", digest.String(), ispec.MediaTypeImageManifest, manifestBuf) So(err, ShouldBeNil) @@ -567,7 +572,9 @@ func TestStorageAPIs(t *testing.T) { }, } manifest.SchemaVersion = 2 - manifestBuf, _ := json.Marshal(manifest) + manifestBuf, err := json.Marshal(manifest) + So(err, ShouldBeNil) + digest = godigest.FromBytes(manifestBuf) _, err = imgStore.PutImageManifest("replace", "1.0", ispec.MediaTypeImageManifest, manifestBuf) So(err, ShouldBeNil) @@ -617,7 +624,8 @@ func TestStorageAPIs(t *testing.T) { }, } manifest.SchemaVersion = 2 - manifestBuf, _ = json.Marshal(manifest) + manifestBuf, err = json.Marshal(manifest) + So(err, ShouldBeNil) _ = godigest.FromBytes(manifestBuf) _, err = imgStore.PutImageManifest("replace", "1.0", ispec.MediaTypeImageManifest, manifestBuf) So(err, ShouldBeNil) diff --git a/pkg/test/common.go b/pkg/test/common.go index f9c3e4c2..49aeb7b3 100644 --- a/pkg/test/common.go +++ b/pkg/test/common.go @@ -85,7 +85,7 @@ func Location(baseURL string, resp *resty.Response) string { return baseURL + path } -func CopyFiles(sourceDir string, destDir string) error { +func CopyFiles(sourceDir, destDir string) error { sourceMeta, err := os.Stat(sourceDir) if err != nil { return fmt.Errorf("CopyFiles os.Stat failed: %w", err)