mirror of
https://github.com/project-zot/zot.git
synced 2024-12-16 21:56:37 -05:00
refactor(test): add lint rule for messages starting with the component (#2045)
Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com> Signed-off-by: Laurentiu Niculae <niculae.laurentiu1@gmail.com> Signed-off-by: Andrei Aaron <aaaron@luxoft.com>
This commit is contained in:
parent
262a904286
commit
79e14027ee
81 changed files with 855 additions and 681 deletions
3
.github/workflows/golangci-lint.yaml
vendored
3
.github/workflows/golangci-lint.yaml
vendored
|
@ -45,3 +45,6 @@ jobs:
|
|||
- name: Run linter from make target
|
||||
run: |
|
||||
make check
|
||||
- name: Run log linter
|
||||
run: |
|
||||
make check-logs
|
||||
|
|
4
Makefile
4
Makefile
|
@ -311,6 +311,10 @@ $(GOLINTER):
|
|||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(TOOLSDIR)/bin $(GOLINTER_VERSION)
|
||||
$(GOLINTER) version
|
||||
|
||||
.PHONY: check-logs
|
||||
check-logs:
|
||||
@./scripts/check_logs.sh
|
||||
|
||||
.PHONY: check
|
||||
check: $(if $(findstring ui,$(BUILD_LABELS)), ui)
|
||||
check: ./golangcilint.yaml $(GOLINTER)
|
||||
|
|
233
errors/errors.go
233
errors/errors.go
|
@ -46,129 +46,126 @@ func GetDetails(err error) map[string]string {
|
|||
}
|
||||
|
||||
var (
|
||||
ErrBadConfig = errors.New("config: invalid config")
|
||||
ErrCliBadConfig = errors.New("cli: bad config")
|
||||
ErrRepoNotFound = errors.New("repository: not found")
|
||||
ErrRepoBadVersion = errors.New("repository: unsupported layout version")
|
||||
ErrRepoBadLayout = errors.New("repository: invalid layout")
|
||||
ErrManifestNotFound = errors.New("manifest: not found")
|
||||
ErrBadManifest = errors.New("manifest: invalid contents")
|
||||
ErrUploadNotFound = errors.New("uploads: not found")
|
||||
ErrBadUploadRange = errors.New("uploads: bad range")
|
||||
ErrBlobNotFound = errors.New("blob: not found")
|
||||
ErrBadBlob = errors.New("blob: bad blob")
|
||||
ErrBadBlobDigest = errors.New("blob: bad blob digest")
|
||||
ErrBlobReferenced = errors.New("blob: referenced by manifest")
|
||||
ErrManifestReferenced = errors.New("manifest: referenced by index image")
|
||||
ErrUnknownCode = errors.New("error: unknown error code")
|
||||
ErrBadCACert = errors.New("tls: invalid ca cert")
|
||||
ErrBadUser = errors.New("auth: non-existent user")
|
||||
ErrEntriesExceeded = errors.New("ldap: too many entries returned")
|
||||
ErrLDAPEmptyPassphrase = errors.New("ldap: empty passphrase")
|
||||
ErrLDAPBadConn = errors.New("ldap: bad connection")
|
||||
ErrLDAPConfig = errors.New("config: invalid LDAP configuration")
|
||||
ErrCacheRootBucket = errors.New("cache: unable to create/update root bucket")
|
||||
ErrCacheNoBucket = errors.New("cache: unable to find bucket")
|
||||
ErrCacheMiss = errors.New("cache: miss")
|
||||
ErrRequireCred = errors.New("ldap: bind credentials required")
|
||||
ErrInvalidCred = errors.New("ldap: invalid credentials")
|
||||
ErrEmptyJSON = errors.New("cli: config json is empty")
|
||||
ErrInvalidArgs = errors.New("cli: Invalid Arguments")
|
||||
ErrInvalidFlagsCombination = errors.New("cli: Invalid combination of flags")
|
||||
ErrInvalidURL = errors.New("cli: invalid URL format")
|
||||
ErrExtensionNotEnabled = errors.New("cli: functionality is not built/configured in the current server")
|
||||
ErrUnauthorizedAccess = errors.New("auth: unauthorized access. check credentials")
|
||||
ErrURLNotFound = errors.New("url not found")
|
||||
ErrCannotResetConfigKey = errors.New("cli: cannot reset given config key")
|
||||
ErrConfigNotFound = errors.New("cli: config with the given name does not exist")
|
||||
ErrNoURLProvided = errors.New("cli: no URL provided by flag or via config")
|
||||
ErrIllegalConfigKey = errors.New("cli: given config key is not allowed")
|
||||
ErrScanNotSupported = errors.New("search: scanning of image media type not supported")
|
||||
ErrCLITimeout = errors.New("cli: Query timed out while waiting for results")
|
||||
ErrDuplicateConfigName = errors.New("cli: cli config name already added")
|
||||
ErrInvalidRoute = errors.New("routes: invalid route prefix")
|
||||
ErrImgStoreNotFound = errors.New("routes: image store not found corresponding to given route")
|
||||
ErrEmptyValue = errors.New("cache: empty value")
|
||||
ErrEmptyRepoList = errors.New("search: no repository found")
|
||||
ErrCVESearchDisabled = errors.New("search: CVE search is disabled")
|
||||
ErrCVEDBNotFound = errors.New("cve: CVE DB is not present")
|
||||
ErrInvalidRepositoryName = errors.New("repository: not a valid repository name")
|
||||
ErrSyncMissingCatalog = errors.New("sync: couldn't fetch upstream registry's catalog")
|
||||
ErrMethodNotSupported = errors.New("storage: method not supported")
|
||||
ErrInvalidMetric = errors.New("metrics: invalid metric func")
|
||||
ErrInjected = errors.New("test: injected failure")
|
||||
ErrSyncInvalidUpstreamURL = errors.New("sync: upstream url not found in sync config")
|
||||
ErrRegistryNoContent = errors.New("sync: could not find a Content that matches localRepo")
|
||||
ErrSyncReferrerNotFound = errors.New("sync: couldn't find upstream referrer")
|
||||
ErrImageLintAnnotations = errors.New("routes: lint checks failed")
|
||||
ErrParsingAuthHeader = errors.New("auth: failed parsing authorization header")
|
||||
ErrBadType = errors.New("core: invalid type")
|
||||
ErrParsingHTTPHeader = errors.New("routes: invalid HTTP header")
|
||||
ErrBadRange = errors.New("storage: bad range")
|
||||
ErrBadLayerCount = errors.New("manifest: layers count doesn't correspond to config history")
|
||||
ErrManifestConflict = errors.New("manifest: multiple manifests found")
|
||||
ErrManifestMetaNotFound = errors.New("metadb: image metadata not found for given manifest reference")
|
||||
ErrImageMetaNotFound = errors.New("metadb: image meta not found")
|
||||
ErrUnexpectedMediaType = errors.New("metadb: got unexpected media type")
|
||||
ErrRepoMetaNotFound = errors.New("metadb: repo metadata not found for given repo name")
|
||||
ErrTagMetaNotFound = errors.New("metadb: tag metadata not found for given repo and tag names")
|
||||
ErrTypeAssertionFailed = errors.New("storage: failed DatabaseDriver type assertion")
|
||||
ErrInvalidRequestParams = errors.New("resolver: parameter sent has invalid value")
|
||||
ErrBadConfig = errors.New("invalid server config")
|
||||
ErrCliBadConfig = errors.New("invalid cli config")
|
||||
ErrRepoNotFound = errors.New("repository not found")
|
||||
ErrRepoBadVersion = errors.New("unsupported repository layout version")
|
||||
ErrRepoBadLayout = errors.New("invalid repository layout")
|
||||
ErrManifestNotFound = errors.New("manifest not found")
|
||||
ErrBadManifest = errors.New("invalid manifest content")
|
||||
ErrUploadNotFound = errors.New("upload destination not found")
|
||||
ErrBadUploadRange = errors.New("bad upload content-length")
|
||||
ErrBlobNotFound = errors.New("blob not found")
|
||||
ErrBadBlob = errors.New("bad blob")
|
||||
ErrBadBlobDigest = errors.New("bad blob digest")
|
||||
ErrBlobReferenced = errors.New("blob referenced by manifest")
|
||||
ErrManifestReferenced = errors.New("manifest referenced by index image")
|
||||
ErrUnknownCode = errors.New("unknown error code")
|
||||
ErrBadCACert = errors.New("invalid tls ca cert")
|
||||
ErrBadUser = errors.New("non-existent user")
|
||||
ErrEntriesExceeded = errors.New("too many entries returned")
|
||||
ErrLDAPEmptyPassphrase = errors.New("empty ldap passphrase")
|
||||
ErrLDAPBadConn = errors.New("bad ldap connection")
|
||||
ErrLDAPConfig = errors.New("invalid LDAP configuration")
|
||||
ErrCacheRootBucket = errors.New("unable to create/update root cache bucket")
|
||||
ErrCacheNoBucket = errors.New("unable to find cache bucket")
|
||||
ErrCacheMiss = errors.New("cache miss")
|
||||
ErrRequireCred = errors.New("bind ldap credentials required")
|
||||
ErrInvalidCred = errors.New("invalid ldap credentials")
|
||||
ErrEmptyJSON = errors.New("cli config json is empty")
|
||||
ErrInvalidArgs = errors.New("invalid cli arguments")
|
||||
ErrInvalidFlagsCombination = errors.New("invalid cli combination of flags")
|
||||
ErrInvalidURL = errors.New("invalid URL format")
|
||||
ErrExtensionNotEnabled = errors.New("functionality is not built/configured in the current server")
|
||||
ErrUnauthorizedAccess = errors.New("unauthorized access. check credentials")
|
||||
ErrCannotResetConfigKey = errors.New("cannot reset given config key")
|
||||
ErrConfigNotFound = errors.New("config with the given name does not exist")
|
||||
ErrNoURLProvided = errors.New("no URL provided")
|
||||
ErrIllegalConfigKey = errors.New("given config key is not allowed")
|
||||
ErrScanNotSupported = errors.New("scanning is not supported for given image media type")
|
||||
ErrCLITimeout = errors.New("query timed out while waiting for results")
|
||||
ErrDuplicateConfigName = errors.New("cli config name already added")
|
||||
ErrInvalidRoute = errors.New("invalid route prefix")
|
||||
ErrImgStoreNotFound = errors.New("image store not found corresponding to given route")
|
||||
ErrEmptyValue = errors.New("empty cache value")
|
||||
ErrEmptyRepoList = errors.New("no repository found")
|
||||
ErrCVESearchDisabled = errors.New("cve search is disabled")
|
||||
ErrCVEDBNotFound = errors.New("cve-db is not present")
|
||||
ErrInvalidRepositoryName = errors.New("not a valid repository name")
|
||||
ErrSyncMissingCatalog = errors.New("couldn't fetch upstream registry's catalog")
|
||||
ErrInvalidMetric = errors.New("invalid metric func")
|
||||
ErrInjected = errors.New("injected failure")
|
||||
ErrSyncInvalidUpstreamURL = errors.New("upstream url not found in sync config")
|
||||
ErrRegistryNoContent = errors.New("could not find a Content that matches localRepo")
|
||||
ErrSyncReferrerNotFound = errors.New("couldn't find upstream referrer")
|
||||
ErrImageLintAnnotations = errors.New("lint checks failed")
|
||||
ErrParsingAuthHeader = errors.New("failed parsing authorization header")
|
||||
ErrBadType = errors.New("invalid type")
|
||||
ErrParsingHTTPHeader = errors.New("invalid HTTP header")
|
||||
ErrBadRange = errors.New("bad range for streaming blob")
|
||||
ErrBadLayerCount = errors.New("manifest layers count doesn't correspond to config history")
|
||||
ErrManifestConflict = errors.New("multiple manifests found")
|
||||
ErrImageMetaNotFound = errors.New("image meta not found")
|
||||
ErrUnexpectedMediaType = errors.New("unexpected media type")
|
||||
ErrRepoMetaNotFound = errors.New("repo metadata not found for given repo name")
|
||||
ErrTagMetaNotFound = errors.New("tag metadata not found for given repo and tag names")
|
||||
ErrTypeAssertionFailed = errors.New("failed DatabaseDriver type assertion")
|
||||
ErrInvalidRequestParams = errors.New("request parameter has invalid value")
|
||||
ErrBadCtxFormat = errors.New("type assertion failed")
|
||||
ErrEmptyRepoName = errors.New("metadb: repo name can't be empty string")
|
||||
ErrEmptyTag = errors.New("metadb: tag can't be empty string")
|
||||
ErrEmptyDigest = errors.New("metadb: digest can't be empty string")
|
||||
ErrInvalidRepoRefFormat = errors.New("invalid image reference format [repo:tag] or [repo@digest]")
|
||||
ErrLimitIsNegative = errors.New("pagination: limit has negative value")
|
||||
ErrOffsetIsNegative = errors.New("pagination: offset has negative value")
|
||||
ErrSortCriteriaNotSupported = errors.New("pagination: the sort criteria is not supported")
|
||||
ErrMediaTypeNotSupported = errors.New("metadb: media type is not supported")
|
||||
ErrEmptyRepoName = errors.New("repo name can't be empty string")
|
||||
ErrEmptyTag = errors.New("tag can't be empty string")
|
||||
ErrEmptyDigest = errors.New("digest can't be empty string")
|
||||
ErrInvalidRepoRefFormat = errors.New("invalid image reference format, use [repo:tag] or [repo@digest]")
|
||||
ErrLimitIsNegative = errors.New("pagination limit has negative value")
|
||||
ErrOffsetIsNegative = errors.New("pagination offset has negative value")
|
||||
ErrSortCriteriaNotSupported = errors.New("the pagination sort criteria is not supported")
|
||||
ErrMediaTypeNotSupported = errors.New("media type is not supported")
|
||||
ErrTimeout = errors.New("operation timeout")
|
||||
ErrNotImplemented = errors.New("not implemented")
|
||||
ErrDedupeRebuild = errors.New("dedupe: couldn't rebuild dedupe index")
|
||||
ErrMissingAuthHeader = errors.New("auth: required authorization header is missing")
|
||||
ErrUserAPIKeyNotFound = errors.New("userDB: user info for given API key hash not found")
|
||||
ErrUserSessionNotFound = errors.New("userDB: user session for given ID not found")
|
||||
ErrBucketDoesNotExist = errors.New("DB: bucket does not exist")
|
||||
ErrOpenIDProviderDoesNotExist = errors.New("openID: provider does not exist in given config")
|
||||
ErrHashKeyNotCreated = errors.New("cookiestore: generated random hash key is nil, not continuing")
|
||||
ErrDedupeRebuild = errors.New("couldn't rebuild dedupe index")
|
||||
ErrMissingAuthHeader = errors.New("required authorization header is missing")
|
||||
ErrUserAPIKeyNotFound = errors.New("user info for given API key hash not found")
|
||||
ErrUserSessionNotFound = errors.New("user session for given ID not found")
|
||||
ErrBucketDoesNotExist = errors.New("bucket does not exist")
|
||||
ErrOpenIDProviderDoesNotExist = errors.New("openid provider does not exist in given config")
|
||||
ErrHashKeyNotCreated = errors.New("cookiestore generated random hash key is nil, aborting")
|
||||
ErrFailedTypeAssertion = errors.New("type assertion failed")
|
||||
ErrInvalidOldUserStarredRepos = errors.New("metadb: invalid old entry for user starred repos")
|
||||
ErrUnmarshalledRepoListIsNil = errors.New("metadb: list of repos is still nil")
|
||||
ErrCouldNotMarshalStarredRepos = errors.New("metadb: could not repack entry for user starred repos")
|
||||
ErrInvalidOldUserBookmarkedRepos = errors.New("metadb: invalid old entry for user bookmarked repos")
|
||||
ErrCouldNotMarshalBookmarkedRepos = errors.New("metadb: could not repack entry for user bookmarked repos")
|
||||
ErrUserDataNotFound = errors.New("metadb: user data not found for given user identifier")
|
||||
ErrUserDataNotAllowed = errors.New("metadb: user data operations are not allowed")
|
||||
ErrCouldNotPersistData = errors.New("metadb: could not persist to db")
|
||||
ErrSignConfigDirNotSet = errors.New("signatures: signature config dir not set")
|
||||
ErrBadManifestDigest = errors.New("signatures: bad manifest digest")
|
||||
ErrInvalidSignatureType = errors.New("signatures: invalid signature type")
|
||||
ErrSyncPingRegistry = errors.New("sync: unable to ping any registry URLs")
|
||||
ErrSyncImageNotSigned = errors.New("sync: image is not signed")
|
||||
ErrSyncImageFilteredOut = errors.New("sync: image is filtered out by sync config")
|
||||
ErrCallerInfo = errors.New("runtime: failed to get info regarding the current runtime")
|
||||
ErrInvalidTruststoreType = errors.New("signatures: invalid truststore type")
|
||||
ErrInvalidTruststoreName = errors.New("signatures: invalid truststore name")
|
||||
ErrInvalidCertificateContent = errors.New("signatures: invalid certificate content")
|
||||
ErrInvalidPublicKeyContent = errors.New("signatures: invalid public key content")
|
||||
ErrInvalidStateCookie = errors.New("auth: state cookie not present or differs from original state")
|
||||
ErrSyncNoURLsLeft = errors.New("sync: no valid registry urls left after filtering local ones")
|
||||
ErrInvalidCLIParameter = errors.New("cli: invalid parameter")
|
||||
ErrGQLEndpointNotFound = errors.New("cli: the server doesn't have a gql endpoint")
|
||||
ErrGQLQueryNotSupported = errors.New("cli: query is not supported or has different arguments")
|
||||
ErrBadHTTPStatusCode = errors.New("cli: the response doesn't contain the expected status code")
|
||||
ErrFormatNotSupported = errors.New("cli: the given output format is not supported")
|
||||
ErrAPINotSupported = errors.New("registry at the given address doesn't implement the correct API")
|
||||
ErrInvalidSearchQuery = errors.New("invalid search query")
|
||||
ErrFileAlreadyCancelled = errors.New("storageDriver: file already cancelled")
|
||||
ErrFileAlreadyClosed = errors.New("storageDriver: file already closed")
|
||||
ErrFileAlreadyCommitted = errors.New("storageDriver: file already committed")
|
||||
ErrInvalidOutputFormat = errors.New("cli: invalid output format")
|
||||
ErrInvalidOldUserStarredRepos = errors.New("invalid old entry for user starred repos")
|
||||
ErrUnmarshalledRepoListIsNil = errors.New("list of repos is still nil")
|
||||
ErrCouldNotMarshalStarredRepos = errors.New("could not repack entry for user starred repos")
|
||||
ErrInvalidOldUserBookmarkedRepos = errors.New("invalid old entry for user bookmarked repos")
|
||||
ErrCouldNotMarshalBookmarkedRepos = errors.New("could not repack entry for user bookmarked repos")
|
||||
ErrUserDataNotFound = errors.New("user data not found for given user identifier")
|
||||
ErrUserDataNotAllowed = errors.New("user data operations are not allowed")
|
||||
ErrCouldNotPersistData = errors.New("could not persist to db")
|
||||
ErrSignConfigDirNotSet = errors.New("signature config dir not set")
|
||||
ErrBadSignatureManifestDigest = errors.New("bad signature manifest digest")
|
||||
ErrInvalidSignatureType = errors.New("invalid signature type")
|
||||
ErrSyncPingRegistry = errors.New("unable to ping any registry URLs")
|
||||
ErrSyncImageNotSigned = errors.New("synced image is not signed")
|
||||
ErrSyncImageFilteredOut = errors.New("image is filtered out by sync config")
|
||||
ErrInvalidTruststoreType = errors.New("invalid signature truststore type")
|
||||
ErrInvalidTruststoreName = errors.New("invalid signature truststore name")
|
||||
ErrInvalidCertificateContent = errors.New("invalid signature certificate content")
|
||||
ErrInvalidPublicKeyContent = errors.New("invalid signature public key content")
|
||||
ErrInvalidStateCookie = errors.New("auth state cookie not present or differs from original state")
|
||||
ErrSyncNoURLsLeft = errors.New("no valid registry urls left after filtering local ones")
|
||||
ErrInvalidCLIParameter = errors.New("invalid cli parameter")
|
||||
ErrGQLEndpointNotFound = errors.New("the server doesn't have a gql endpoint")
|
||||
ErrGQLQueryNotSupported = errors.New("query is not supported or has different arguments")
|
||||
ErrBadHTTPStatusCode = errors.New("the response doesn't contain the expected status code")
|
||||
ErrFileAlreadyCancelled = errors.New("storageDriver file already cancelled")
|
||||
ErrFileAlreadyClosed = errors.New("storageDriver file already closed")
|
||||
ErrFileAlreadyCommitted = errors.New("storageDriver file already committed")
|
||||
ErrInvalidOutputFormat = errors.New("invalid cli output format")
|
||||
ErrServerIsRunning = errors.New("server is running")
|
||||
ErrDatabaseFileAlreadyInUse = errors.New("boltdb file is already in use")
|
||||
ErrFlagValueUnsupported = errors.New("supported values ")
|
||||
ErrUnknownSubcommand = errors.New("cli: unknown subcommand")
|
||||
ErrMultipleReposSameName = errors.New("test: can't have multiple repos with the same name")
|
||||
ErrRetentionPolicyNotFound = errors.New("retention: repo or tag policy not found")
|
||||
ErrUnknownSubcommand = errors.New("unknown cli subcommand")
|
||||
ErrMultipleReposSameName = errors.New("can't have multiple repos with the same name")
|
||||
ErrRetentionPolicyNotFound = errors.New("retention repo or tag policy not found")
|
||||
ErrFormatNotSupported = errors.New("the given output format is not supported")
|
||||
ErrAPINotSupported = errors.New("registry at the given address doesn't implement the correct API")
|
||||
ErrURLNotFound = errors.New("url not found")
|
||||
ErrInvalidSearchQuery = errors.New("invalid search query")
|
||||
)
|
||||
|
|
|
@ -86,7 +86,7 @@ func (amw *AuthnMiddleware) sessionAuthn(ctlr *Controller, userAc *reqCtx.UserAc
|
|||
|
||||
groups, err := ctlr.MetaDB.GetUserGroups(request.Context())
|
||||
if err != nil {
|
||||
ctlr.Log.Err(err).Str("identity", identity).Msg("can not get user profile in DB")
|
||||
ctlr.Log.Err(err).Str("identity", identity).Msg("failed to get user profile in DB")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func (amw *AuthnMiddleware) basicAuthn(ctlr *Controller, userAc *reqCtx.UserAcce
|
|||
|
||||
// we have already populated the request context with userAc
|
||||
if err := ctlr.MetaDB.SetUserGroups(request.Context(), groups); err != nil {
|
||||
ctlr.Log.Error().Err(err).Str("identity", identity).Msg("couldn't update user profile")
|
||||
ctlr.Log.Error().Err(err).Str("identity", identity).Msg("failed to update user profile")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ func (amw *AuthnMiddleware) basicAuthn(ctlr *Controller, userAc *reqCtx.UserAcce
|
|||
|
||||
// we have already populated the request context with userAc
|
||||
if err := ctlr.MetaDB.SetUserGroups(request.Context(), groups); err != nil {
|
||||
ctlr.Log.Error().Err(err).Str("identity", identity).Msg("couldn't update user profile")
|
||||
ctlr.Log.Error().Err(err).Str("identity", identity).Msg("failed to update user profile")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ func (amw *AuthnMiddleware) basicAuthn(ctlr *Controller, userAc *reqCtx.UserAcce
|
|||
apiKey := passphrase
|
||||
|
||||
if !strings.HasPrefix(apiKey, constants.APIKeysPrefix) {
|
||||
ctlr.Log.Error().Msg("api token has invalid format")
|
||||
ctlr.Log.Error().Msg("invalid api token format")
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
@ -198,12 +198,12 @@ func (amw *AuthnMiddleware) basicAuthn(ctlr *Controller, userAc *reqCtx.UserAcce
|
|||
storedIdentity, err := ctlr.MetaDB.GetUserAPIKeyInfo(hashedKey)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrUserAPIKeyNotFound) {
|
||||
ctlr.Log.Info().Err(err).Msgf("can not find any user info for hashed key %s in DB", hashedKey)
|
||||
ctlr.Log.Info().Err(err).Msgf("failed to find any user info for hashed key %s in DB", hashedKey)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
ctlr.Log.Error().Err(err).Msgf("can not get user info for hashed key %s in DB", hashedKey)
|
||||
ctlr.Log.Error().Err(err).Msgf("failed to get user info for hashed key %s in DB", hashedKey)
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ func (amw *AuthnMiddleware) basicAuthn(ctlr *Controller, userAc *reqCtx.UserAcce
|
|||
// check if api key expired
|
||||
isExpired, err := ctlr.MetaDB.IsAPIKeyExpired(request.Context(), hashedKey)
|
||||
if err != nil {
|
||||
ctlr.Log.Err(err).Str("identity", identity).Msg("can not verify if api key expired")
|
||||
ctlr.Log.Err(err).Str("identity", identity).Msg("failed to verify if api key expired")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -226,14 +226,14 @@ func (amw *AuthnMiddleware) basicAuthn(ctlr *Controller, userAc *reqCtx.UserAcce
|
|||
|
||||
err = ctlr.MetaDB.UpdateUserAPIKeyLastUsed(request.Context(), hashedKey)
|
||||
if err != nil {
|
||||
ctlr.Log.Err(err).Str("identity", identity).Msg("can not update user profile in DB")
|
||||
ctlr.Log.Err(err).Str("identity", identity).Msg("failed to update user profile in DB")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
groups, err := ctlr.MetaDB.GetUserGroups(request.Context())
|
||||
if err != nil {
|
||||
ctlr.Log.Err(err).Str("identity", identity).Msg("can not get user's groups in DB")
|
||||
ctlr.Log.Err(err).Str("identity", identity).Msg("failed to get user's groups in DB")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -376,7 +376,7 @@ func (amw *AuthnMiddleware) tryAuthnHandlers(ctlr *Controller) mux.MiddlewareFun
|
|||
authenticated, err := amw.sessionAuthn(ctlr, userAc, response, request)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrUserDataNotFound) {
|
||||
ctlr.Log.Err(err).Msg("can not find user profile in DB")
|
||||
ctlr.Log.Err(err).Msg("failed to find user profile in DB")
|
||||
|
||||
authFail(response, request, ctlr.Config.HTTP.Realm, delay)
|
||||
}
|
||||
|
@ -419,7 +419,7 @@ func bearerAuthHandler(ctlr *Controller) mux.MiddlewareFunc {
|
|||
EmptyDefaultNamespace: true,
|
||||
})
|
||||
if err != nil {
|
||||
ctlr.Log.Panic().Err(err).Msg("error creating bearer authorizer")
|
||||
ctlr.Log.Panic().Err(err).Msg("failed to create bearer authorizer")
|
||||
}
|
||||
|
||||
return func(next http.Handler) http.Handler {
|
||||
|
@ -452,7 +452,7 @@ func bearerAuthHandler(ctlr *Controller) mux.MiddlewareFunc {
|
|||
|
||||
permissions, err := authorizer.Authorize(header, action, name)
|
||||
if err != nil {
|
||||
ctlr.Log.Error().Err(err).Msg("issue parsing Authorization header")
|
||||
ctlr.Log.Error().Err(err).Msg("failed to parse Authorization header")
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
zcommon.WriteJSON(response, http.StatusInternalServerError, apiErr.NewError(apiErr.UNSUPPORTED))
|
||||
|
||||
|
@ -523,7 +523,7 @@ func (rh *RouteHandler) AuthURLHandler() http.HandlerFunc {
|
|||
|
||||
client, ok := rh.c.RelyingParties[provider]
|
||||
if !ok {
|
||||
rh.c.Log.Error().Msg("unrecognized openid provider")
|
||||
rh.c.Log.Error().Msg("failed to authenticate due to unrecognized openid provider")
|
||||
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
|
||||
|
@ -547,7 +547,7 @@ func (rh *RouteHandler) AuthURLHandler() http.HandlerFunc {
|
|||
// let the session set its own id
|
||||
err := session.Save(r, w)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("unable to save http session")
|
||||
rh.c.Log.Error().Err(err).Msg("failed to save http session")
|
||||
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
|
@ -720,7 +720,7 @@ func GetGithubUserInfo(ctx context.Context, client *github.Client, log log.Logge
|
|||
|
||||
userEmails, _, err := client.Users.ListEmails(ctx, nil)
|
||||
if err != nil {
|
||||
log.Error().Msg("couldn't set user record for empty email value")
|
||||
log.Error().Msg("failed to set user record for empty email value")
|
||||
|
||||
return "", []string{}, err
|
||||
}
|
||||
|
@ -737,7 +737,7 @@ func GetGithubUserInfo(ctx context.Context, client *github.Client, log log.Logge
|
|||
|
||||
orgs, _, err := client.Organizations.List(ctx, "", nil)
|
||||
if err != nil {
|
||||
log.Error().Msg("couldn't set user record for empty email value")
|
||||
log.Error().Msg("failed to set user record for empty email value")
|
||||
|
||||
return "", []string{}, err
|
||||
}
|
||||
|
@ -764,7 +764,7 @@ func saveUserLoggedSession(cookieStore sessions.Store, response http.ResponseWri
|
|||
// let the session set its own id
|
||||
err := session.Save(request, response)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("identity", identity).Msg("unable to save http session")
|
||||
log.Error().Err(err).Str("identity", identity).Msg("failed to save http session")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -790,13 +790,15 @@ func OAuth2Callback(ctlr *Controller, w http.ResponseWriter, r *http.Request, st
|
|||
|
||||
stateOrigin, ok := stateCookie.Values["state"].(string)
|
||||
if !ok {
|
||||
ctlr.Log.Error().Err(zerr.ErrInvalidStateCookie).Msg("openID: unable to get 'state' cookie from request")
|
||||
ctlr.Log.Error().Err(zerr.ErrInvalidStateCookie).Str("component", "openID").
|
||||
Msg(": failed to get 'state' cookie from request")
|
||||
|
||||
return "", zerr.ErrInvalidStateCookie
|
||||
}
|
||||
|
||||
if stateOrigin != state {
|
||||
ctlr.Log.Error().Err(zerr.ErrInvalidStateCookie).Msg("openID: 'state' cookie differs from the actual one")
|
||||
ctlr.Log.Error().Err(zerr.ErrInvalidStateCookie).Str("component", "openID").
|
||||
Msg(": 'state' cookie differs from the actual one")
|
||||
|
||||
return "", zerr.ErrInvalidStateCookie
|
||||
}
|
||||
|
@ -813,7 +815,7 @@ func OAuth2Callback(ctlr *Controller, w http.ResponseWriter, r *http.Request, st
|
|||
}
|
||||
|
||||
if err := ctlr.MetaDB.SetUserGroups(r.Context(), groups); err != nil {
|
||||
ctlr.Log.Error().Err(err).Str("identity", email).Msg("couldn't update the user profile")
|
||||
ctlr.Log.Error().Err(err).Str("identity", email).Msg("failed to update the user profile")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
@ -841,7 +843,7 @@ func GetAuthUserFromRequestSession(cookieStore sessions.Store, request *http.Req
|
|||
) (string, bool) {
|
||||
session, err := cookieStore.Get(request, "session")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("can not decode existing session")
|
||||
log.Error().Err(err).Msg("failed to decode existing session")
|
||||
// expired cookie, no need to return err
|
||||
return "", false
|
||||
}
|
||||
|
@ -854,14 +856,14 @@ func GetAuthUserFromRequestSession(cookieStore sessions.Store, request *http.Req
|
|||
|
||||
authenticated := session.Values["authStatus"]
|
||||
if authenticated != true {
|
||||
log.Error().Msg("can not get `user` session value")
|
||||
log.Error().Msg("failed to get `user` session value")
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
identity, ok := session.Values["user"].(string)
|
||||
if !ok {
|
||||
log.Error().Msg("can not get `user` session value")
|
||||
log.Error().Msg("failed to get `user` session value")
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
@ -873,7 +875,7 @@ func GenerateAPIKey(uuidGenerator guuid.Generator, log log.Logger,
|
|||
) (string, string, error) {
|
||||
apiKeyBase, err := uuidGenerator.NewV4()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("unable to generate uuid for api key base")
|
||||
log.Error().Err(err).Msg("failed to generate uuid for api key base")
|
||||
|
||||
return "", "", err
|
||||
}
|
||||
|
@ -883,7 +885,7 @@ func GenerateAPIKey(uuidGenerator guuid.Generator, log log.Logger,
|
|||
// will be used for identifying a specific api key
|
||||
apiKeyID, err := uuidGenerator.NewV4()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("unable to generate uuid for api key id")
|
||||
log.Error().Err(err).Msg("failed to generate uuid for api key id")
|
||||
|
||||
return "", "", err
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ func TestAPIKeys(t *testing.T) {
|
|||
conf.Extensions.UI.Enable = &defaultVal
|
||||
|
||||
ctlr := api.NewController(conf)
|
||||
ctlr.Log.Info().Int64("seedUser", seedUser).Int64("seedPass", seedPass).Msg("random seed for username & password")
|
||||
ctlr.Log.Info().Int64("seedUser", seedUser).Int64("seedPass", seedPass).Msg("Random seed for username & password")
|
||||
dir := t.TempDir()
|
||||
|
||||
ctlr.Config.Storage.RootDirectory = dir
|
||||
|
|
|
@ -438,7 +438,7 @@ func (c *Controller) StartBackgroundTasks(reloadCtx context.Context) {
|
|||
//nolint: contextcheck
|
||||
syncOnDemand, err := ext.EnableSyncExtension(c.Config, c.MetaDB, c.StoreController, c.taskScheduler, c.Log)
|
||||
if err != nil {
|
||||
c.Log.Error().Err(err).Msg("unable to start sync extension")
|
||||
c.Log.Error().Err(err).Msg("failed to start sync extension")
|
||||
}
|
||||
|
||||
c.SyncOnDemand = syncOnDemand
|
||||
|
|
|
@ -8950,7 +8950,7 @@ func TestPeriodicGC(t *testing.T) {
|
|||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring,
|
||||
"\"GC\":true,\"Commit\":false,\"GCDelay\":1000000000,\"GCInterval\":3600000000000")
|
||||
So(string(data), ShouldContainSubstring, "failure walking storage root-dir") //nolint:lll
|
||||
So(string(data), ShouldContainSubstring, "failed to walk storage root-dir") //nolint:lll
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ func (lc *LDAPClient) Connect() error {
|
|||
if !lc.UseSSL {
|
||||
l, err = ldap.Dial("tcp", address)
|
||||
if err != nil {
|
||||
lc.Log.Error().Err(err).Str("address", address).Msg("non-TLS connection failed")
|
||||
lc.Log.Error().Err(err).Str("address", address).Msg("failed to establish a TCP connection")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func (lc *LDAPClient) Connect() error {
|
|||
err = l.StartTLS(config)
|
||||
|
||||
if err != nil {
|
||||
lc.Log.Error().Err(err).Str("address", address).Msg("TLS connection failed")
|
||||
lc.Log.Error().Err(err).Str("address", address).Msg("failed to establish a TLS connection")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ func (lc *LDAPClient) Connect() error {
|
|||
}
|
||||
l, err = ldap.DialTLS("tcp", address, config)
|
||||
if err != nil {
|
||||
lc.Log.Error().Err(err).Str("address", address).Msg("TLS connection failed")
|
||||
lc.Log.Error().Err(err).Str("address", address).Msg("failed to establish a TLS connection")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
|
|||
if lc.BindPassword != "" {
|
||||
err := lc.Conn.Bind(lc.BindDN, lc.BindPassword)
|
||||
if err != nil {
|
||||
lc.Log.Error().Err(err).Str("bindDN", lc.BindDN).Msg("bind failed")
|
||||
lc.Log.Error().Err(err).Str("bindDN", lc.BindDN).Msg("failed to bind")
|
||||
// clean up the cached conn, so we can retry
|
||||
lc.Conn.Close()
|
||||
lc.Conn = nil
|
||||
|
@ -153,7 +153,7 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
|
|||
} else {
|
||||
err := lc.Conn.UnauthenticatedBind(lc.BindDN)
|
||||
if err != nil {
|
||||
lc.Log.Error().Err(err).Str("bindDN", lc.BindDN).Msg("bind failed")
|
||||
lc.Log.Error().Err(err).Str("bindDN", lc.BindDN).Msg("failed to bind")
|
||||
// clean up the cached conn, so we can retry
|
||||
lc.Conn.Close()
|
||||
lc.Conn = nil
|
||||
|
@ -167,7 +167,7 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
|
|||
|
||||
// exhausted all retries?
|
||||
if !connected {
|
||||
lc.Log.Error().Err(errors.ErrLDAPBadConn).Msg("exhausted all retries")
|
||||
lc.Log.Error().Err(errors.ErrLDAPBadConn).Msg("failed to authenticate, exhausted all retries")
|
||||
|
||||
return false, nil, nil, errors.ErrLDAPBadConn
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
|
|||
if err != nil {
|
||||
fmt.Printf("%v\n", err)
|
||||
lc.Log.Error().Err(err).Str("bindDN", lc.BindDN).Str("username", username).
|
||||
Str("baseDN", lc.Base).Msg("search failed")
|
||||
Str("baseDN", lc.Base).Msg("failed to perform a search request")
|
||||
|
||||
return false, nil, nil, err
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
|
|||
if len(search.Entries) < 1 {
|
||||
err := errors.ErrBadUser
|
||||
lc.Log.Error().Err(err).Str("bindDN", lc.BindDN).Str("username", username).
|
||||
Str("baseDN", lc.Base).Msg("entries not found")
|
||||
Str("baseDN", lc.Base).Msg("failed to find entry")
|
||||
|
||||
return false, nil, nil, err
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
|
|||
if len(search.Entries) > 1 {
|
||||
err := errors.ErrEntriesExceeded
|
||||
lc.Log.Error().Err(err).Str("bindDN", lc.BindDN).Str("username", username).
|
||||
Str("baseDN", lc.Base).Msg("too many entries")
|
||||
Str("baseDN", lc.Base).Msg("failed to retrieve due to an excessive amount of entries")
|
||||
|
||||
return false, nil, nil, err
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ func (lc *LDAPClient) Authenticate(username, password string) (bool, map[string]
|
|||
// Bind as the user to verify their password
|
||||
err = lc.Conn.Bind(userDN, password)
|
||||
if err != nil {
|
||||
lc.Log.Error().Err(err).Str("bindDN", userDN).Msg("user bind failed")
|
||||
lc.Log.Error().Err(err).Str("bindDN", userDN).Msg("failed to bind user")
|
||||
|
||||
return false, user, userGroups, err
|
||||
}
|
||||
|
|
|
@ -549,7 +549,7 @@ func getReferrers(ctx context.Context, routeHandler *RouteHandler,
|
|||
if errSync := routeHandler.c.SyncOnDemand.SyncReference(ctx, name, digest.String(),
|
||||
syncConstants.OCI); errSync != nil {
|
||||
routeHandler.c.Log.Err(errSync).Str("repository", name).Str("reference", digest.String()).
|
||||
Msg("error encounter while syncing OCI reference for image")
|
||||
Msg("failed to sync OCI reference for image")
|
||||
}
|
||||
|
||||
refs, err = imgStore.GetReferrers(name, digest, artifactTypes)
|
||||
|
@ -604,10 +604,12 @@ func (rh *RouteHandler) GetReferrers(response http.ResponseWriter, request *http
|
|||
referrers, err := getReferrers(request.Context(), rh, imgStore, name, digest, artifactTypes)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrManifestNotFound) || errors.Is(err, zerr.ErrRepoNotFound) {
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("manifest not found")
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).
|
||||
Msg("failed to get manifest")
|
||||
response.WriteHeader(http.StatusNotFound)
|
||||
} else {
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to get references")
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).
|
||||
Msg("failed to get references")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
|
@ -616,7 +618,7 @@ func (rh *RouteHandler) GetReferrers(response http.ResponseWriter, request *http
|
|||
|
||||
out, err := json.Marshal(referrers)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to marshal json")
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("failed to marshal json")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
|
@ -706,7 +708,7 @@ func (rh *RouteHandler) UpdateManifest(response http.ResponseWriter, request *ht
|
|||
zcommon.WriteJSON(response, http.StatusBadRequest, apiErr.NewErrorList(e))
|
||||
} else {
|
||||
// could be syscall.EMFILE (Err:0x18 too many opened files), etc
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error: performing cleanup")
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error, performing cleanup")
|
||||
|
||||
if err = imgStore.DeleteImageManifest(name, reference, false); err != nil {
|
||||
// deletion of image manifest is important, but not critical for image repo consistency
|
||||
|
@ -1223,8 +1225,6 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request *
|
|||
return
|
||||
}
|
||||
|
||||
rh.c.Log.Info().Int64("r.ContentLength", request.ContentLength).Msg("DEBUG")
|
||||
|
||||
digestStr := digests[0]
|
||||
|
||||
digest := godigest.Digest(digestStr)
|
||||
|
@ -1249,7 +1249,8 @@ func (rh *RouteHandler) CreateBlobUpload(response http.ResponseWriter, request *
|
|||
|
||||
sessionID, size, err := imgStore.FullBlobUpload(name, request.Body, digest)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Int64("actual", size).Int64("expected", contentLength).Msg("failed full upload")
|
||||
rh.c.Log.Error().Err(err).Int64("actual", size).Int64("expected", contentLength).
|
||||
Msg("failed to full blob upload")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
|
@ -1430,7 +1431,7 @@ func (rh *RouteHandler) PatchBlobUpload(response http.ResponseWriter, request *h
|
|||
zcommon.WriteJSON(response, http.StatusNotFound, apiErr.NewErrorList(e))
|
||||
} else {
|
||||
// could be io.ErrUnexpectedEOF, syscall.EMFILE (Err:0x18 too many opened files), etc
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error: removing .uploads/ files")
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error, removing .uploads/ files")
|
||||
|
||||
if err = imgStore.DeleteBlobUpload(name, sessionID); err != nil {
|
||||
rh.c.Log.Error().Err(err).Str("blobUpload", sessionID).Str("repository", name).
|
||||
|
@ -1496,8 +1497,6 @@ func (rh *RouteHandler) UpdateBlobUpload(response http.ResponseWriter, request *
|
|||
return
|
||||
}
|
||||
|
||||
rh.c.Log.Info().Int64("r.ContentLength", request.ContentLength).Msg("DEBUG")
|
||||
|
||||
contentPresent := true
|
||||
|
||||
contentLen, err := strconv.ParseInt(request.Header.Get("Content-Length"), 10, 64)
|
||||
|
@ -1554,11 +1553,11 @@ func (rh *RouteHandler) UpdateBlobUpload(response http.ResponseWriter, request *
|
|||
zcommon.WriteJSON(response, http.StatusNotFound, apiErr.NewErrorList(e))
|
||||
} else {
|
||||
// could be io.ErrUnexpectedEOF, syscall.EMFILE (Err:0x18 too many opened files), etc
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error: removing .uploads/ files")
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error, removing .uploads/ files")
|
||||
|
||||
if err = imgStore.DeleteBlobUpload(name, sessionID); err != nil {
|
||||
rh.c.Log.Error().Err(err).Str("blobUpload", sessionID).Str("repository", name).
|
||||
Msg("couldn't remove blobUpload in repo")
|
||||
Msg("failed to remove blobUpload in repo")
|
||||
}
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
|
@ -1589,11 +1588,11 @@ finish:
|
|||
zcommon.WriteJSON(response, http.StatusNotFound, apiErr.NewErrorList(e))
|
||||
} else {
|
||||
// could be io.ErrUnexpectedEOF, syscall.EMFILE (Err:0x18 too many opened files), etc
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error: removing .uploads/ files")
|
||||
rh.c.Log.Error().Err(err).Msg("unexpected error, removing .uploads/ files")
|
||||
|
||||
if err = imgStore.DeleteBlobUpload(name, sessionID); err != nil {
|
||||
rh.c.Log.Error().Err(err).Str("blobUpload", sessionID).Str("repository", name).
|
||||
Msg("couldn't remove blobUpload in repo")
|
||||
Msg("failed to remove blobUpload in repo")
|
||||
}
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
|
@ -1813,7 +1812,7 @@ func (rh *RouteHandler) OpenIDCodeExchangeCallback() rp.CodeExchangeUserinfoCall
|
|||
) {
|
||||
email := info.GetEmail()
|
||||
if email == "" {
|
||||
rh.c.Log.Error().Msg("couldn't set user record for empty email value")
|
||||
rh.c.Log.Error().Msg("failed to set user record for empty email value")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
|
@ -1823,7 +1822,7 @@ func (rh *RouteHandler) OpenIDCodeExchangeCallback() rp.CodeExchangeUserinfoCall
|
|||
|
||||
val, ok := info.GetClaim("groups").([]interface{})
|
||||
if !ok {
|
||||
rh.c.Log.Info().Msgf("couldn't find any 'groups' claim for user %s", email)
|
||||
rh.c.Log.Info().Msgf("failed to find any 'groups' claim for user %s", email)
|
||||
}
|
||||
|
||||
for _, group := range val {
|
||||
|
@ -1887,7 +1886,7 @@ func WriteDataFromReader(response http.ResponseWriter, status int, length int64,
|
|||
break
|
||||
} else if err != nil {
|
||||
// other kinds of intermittent errors can occur, e.g, io.ErrShortWrite
|
||||
logger.Error().Err(err).Msg("copying data into http response")
|
||||
logger.Error().Err(err).Msg("failed to copy data into http response")
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -1920,7 +1919,7 @@ func getImageManifest(ctx context.Context, routeHandler *RouteHandler, imgStore
|
|||
|
||||
if errSync := routeHandler.c.SyncOnDemand.SyncImage(ctx, name, reference); errSync != nil {
|
||||
routeHandler.c.Log.Err(errSync).Str("repository", name).Str("reference", reference).
|
||||
Msg("error encounter while syncing image")
|
||||
Msg("failed to sync image")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1941,7 +1940,7 @@ func getOrasReferrers(ctx context.Context, routeHandler *RouteHandler,
|
|||
if errSync := routeHandler.c.SyncOnDemand.SyncReference(ctx, name, digest.String(),
|
||||
syncConstants.Oras); errSync != nil {
|
||||
routeHandler.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).
|
||||
Msg("unable to get references")
|
||||
Msg("failed to get references")
|
||||
}
|
||||
|
||||
refs, err = imgStore.GetOrasReferrers(name, digest, artifactType)
|
||||
|
@ -2008,10 +2007,12 @@ func (rh *RouteHandler) GetOrasReferrers(response http.ResponseWriter, request *
|
|||
refs, err := getOrasReferrers(request.Context(), rh, imgStore, name, digest, artifactType) //nolint:contextcheck
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrManifestNotFound) || errors.Is(err, zerr.ErrRepoNotFound) {
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("manifest not found")
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).
|
||||
Msg("failed to get manifest")
|
||||
response.WriteHeader(http.StatusNotFound)
|
||||
} else {
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).Msg("unable to get references")
|
||||
rh.c.Log.Error().Err(err).Str("name", name).Str("digest", digest.String()).
|
||||
Msg("failed to get references")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
|
@ -2041,7 +2042,7 @@ type APIKeyPayload struct { //nolint:revive
|
|||
func (rh *RouteHandler) GetAPIKeys(resp http.ResponseWriter, req *http.Request) {
|
||||
apiKeys, err := rh.c.MetaDB.GetUserAPIKeys(req.Context())
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("error getting list of API keys for user")
|
||||
rh.c.Log.Error().Err(err).Msg("failed to get list of api keys for user")
|
||||
resp.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
|
@ -2057,7 +2058,7 @@ func (rh *RouteHandler) GetAPIKeys(resp http.ResponseWriter, req *http.Request)
|
|||
|
||||
data, err := json.Marshal(apiKeyResponse)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("unable to marshal api key response")
|
||||
rh.c.Log.Error().Err(err).Msg("failed to marshal api key response")
|
||||
|
||||
resp.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
|
@ -2085,7 +2086,7 @@ func (rh *RouteHandler) CreateAPIKey(resp http.ResponseWriter, req *http.Request
|
|||
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Msg("unable to read request body")
|
||||
rh.c.Log.Error().Msg("failed to read request body")
|
||||
resp.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
|
@ -2141,7 +2142,7 @@ func (rh *RouteHandler) CreateAPIKey(resp http.ResponseWriter, req *http.Request
|
|||
|
||||
err = rh.c.MetaDB.AddUserAPIKey(req.Context(), hashedAPIKey, apiKeyDetails)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("error storing API key")
|
||||
rh.c.Log.Error().Err(err).Msg("failed to store api key")
|
||||
resp.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
|
@ -2159,7 +2160,7 @@ func (rh *RouteHandler) CreateAPIKey(resp http.ResponseWriter, req *http.Request
|
|||
|
||||
data, err := json.Marshal(apiKeyResponse)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Msg("unable to marshal api key response")
|
||||
rh.c.Log.Error().Err(err).Msg("failed to marshal api key response")
|
||||
|
||||
resp.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
|
@ -2194,7 +2195,7 @@ func (rh *RouteHandler) RevokeAPIKey(resp http.ResponseWriter, req *http.Request
|
|||
|
||||
err := rh.c.MetaDB.DeleteUserAPIKey(req.Context(), keyID)
|
||||
if err != nil {
|
||||
rh.c.Log.Error().Err(err).Str("keyID", keyID).Msg("error deleting API key")
|
||||
rh.c.Log.Error().Err(err).Str("keyID", keyID).Msg("failed to delete api key")
|
||||
resp.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
|
|
|
@ -121,7 +121,8 @@ func SessionLogger(ctlr *Controller) mux.MiddlewareFunc {
|
|||
monitoring.ObserveHTTPMethodLatency(ctlr.Metrics, method, latency) // histogram
|
||||
}
|
||||
|
||||
log.Str("clientIP", clientIP).
|
||||
log.Str("component", "session").
|
||||
Str("clientIP", clientIP).
|
||||
Str("method", method).
|
||||
Str("path", path).
|
||||
Int("statusCode", statusCode).
|
||||
|
@ -172,6 +173,7 @@ func SessionAuditLogger(audit *log.Logger) mux.MiddlewareFunc {
|
|||
method == http.MethodPatch || method == http.MethodDelete) &&
|
||||
(statusCode == http.StatusOK || statusCode == http.StatusCreated || statusCode == http.StatusAccepted) {
|
||||
audit.Info().
|
||||
Str("component", "session").
|
||||
Str("clientIP", clientIP).
|
||||
Str("subject", username).
|
||||
Str("action", method).
|
||||
|
|
|
@ -221,7 +221,7 @@ func TestConfigCmdMain(t *testing.T) {
|
|||
cmd.SetArgs(args)
|
||||
err := cmd.Execute()
|
||||
So(err, ShouldNotBeNil)
|
||||
So(buff.String(), ShouldContainSubstring, "invalid config")
|
||||
So(buff.String(), ShouldContainSubstring, "invalid server config")
|
||||
})
|
||||
|
||||
Convey("Test remove config bad permissions", t, func() {
|
||||
|
|
|
@ -181,7 +181,8 @@ func TestNegativeServerResponse(t *testing.T) {
|
|||
|
||||
test.WaitTillServerReady(url)
|
||||
|
||||
_, err = test.ReadLogFileAndSearchString(logPath, "DB update completed, next update scheduled", 90*time.Second)
|
||||
_, err = test.ReadLogFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval",
|
||||
90*time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -263,7 +264,8 @@ func TestServerCVEResponse(t *testing.T) {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
_, err = test.ReadLogFileAndSearchString(logPath, "DB update completed, next update scheduled", 90*time.Second)
|
||||
_, err = test.ReadLogFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval",
|
||||
90*time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -367,7 +369,7 @@ func TestServerCVEResponse(t *testing.T) {
|
|||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
Convey("Test CVE by image name - GQL - invalid output format", t, func() {
|
||||
Convey("Test CVE by image name - GQL - invalid cli output format", t, func() {
|
||||
args := []string{"list", "zot-cve-test:0.0.1", "-f", "random", "--config", "cvetest"}
|
||||
configPath := makeConfigFile(fmt.Sprintf(`{"configs":[{"_name":"cvetest","url":"%s","showspinner":false}]}`, url))
|
||||
defer os.Remove(configPath)
|
||||
|
@ -378,7 +380,7 @@ func TestServerCVEResponse(t *testing.T) {
|
|||
cveCmd.SetArgs(args)
|
||||
err = cveCmd.Execute()
|
||||
So(err, ShouldNotBeNil)
|
||||
So(buff.String(), ShouldContainSubstring, "invalid output format")
|
||||
So(buff.String(), ShouldContainSubstring, "invalid cli output format")
|
||||
})
|
||||
|
||||
Convey("Test images by CVE ID - GQL - positive", t, func() {
|
||||
|
@ -415,7 +417,7 @@ func TestServerCVEResponse(t *testing.T) {
|
|||
So(str, ShouldNotContainSubstring, "REPOSITORY TAG OS/ARCH DIGEST SIGNED SIZE")
|
||||
})
|
||||
|
||||
Convey("Test images by CVE ID - GQL - invalid output format", t, func() {
|
||||
Convey("Test images by CVE ID - GQL - invalid cli output format", t, func() {
|
||||
args := []string{"affected", "CVE-2019-9923", "-f", "random", "--config", "cvetest"}
|
||||
configPath := makeConfigFile(fmt.Sprintf(`{"configs":[{"_name":"cvetest","url":"%s","showspinner":false}]}`, url))
|
||||
defer os.Remove(configPath)
|
||||
|
@ -426,7 +428,7 @@ func TestServerCVEResponse(t *testing.T) {
|
|||
cveCmd.SetArgs(args)
|
||||
err = cveCmd.Execute()
|
||||
So(err, ShouldNotBeNil)
|
||||
So(buff.String(), ShouldContainSubstring, "invalid output format")
|
||||
So(buff.String(), ShouldContainSubstring, "invalid cli output format")
|
||||
})
|
||||
|
||||
Convey("Test fixed tags by image name and CVE ID - GQL - positive", t, func() {
|
||||
|
@ -530,7 +532,7 @@ func TestServerCVEResponse(t *testing.T) {
|
|||
So(strings.TrimSpace(str), ShouldNotContainSubstring, "REPOSITORY TAG OS/ARCH SIGNED SIZE")
|
||||
})
|
||||
|
||||
Convey("Test CVE by name and CVE ID - GQL - invalid output format", t, func() {
|
||||
Convey("Test CVE by name and CVE ID - GQL - invalid cli output format", t, func() {
|
||||
args := []string{"affected", "CVE-2019-9923", "--repo", "zot-cve-test", "-f", "random", "--config", "cvetest"}
|
||||
configPath := makeConfigFile(fmt.Sprintf(`{"configs":[{"_name":"cvetest","url":"%s","showspinner":false}]}`, url))
|
||||
defer os.Remove(configPath)
|
||||
|
@ -541,7 +543,7 @@ func TestServerCVEResponse(t *testing.T) {
|
|||
cveCmd.SetArgs(args)
|
||||
err = cveCmd.Execute()
|
||||
So(err, ShouldNotBeNil)
|
||||
So(buff.String(), ShouldContainSubstring, "invalid output format")
|
||||
So(buff.String(), ShouldContainSubstring, "invalid cli output format")
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -470,7 +470,7 @@ func TestOutputFormat(t *testing.T) {
|
|||
cmd.SetArgs(args)
|
||||
err := cmd.Execute()
|
||||
So(err, ShouldNotBeNil)
|
||||
So(buff.String(), ShouldContainSubstring, "invalid output format")
|
||||
So(buff.String(), ShouldContainSubstring, "invalid cli output format")
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -500,7 +500,7 @@ func TestOutputFormatGQL(t *testing.T) {
|
|||
cmd.SetArgs(args)
|
||||
err := cmd.Execute()
|
||||
So(err, ShouldNotBeNil)
|
||||
So(buff.String(), ShouldContainSubstring, "invalid output format")
|
||||
So(buff.String(), ShouldContainSubstring, "invalid cli output format")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -554,7 +554,7 @@ func TestServerResponseGQL(t *testing.T) {
|
|||
cmd.SetArgs(args)
|
||||
err := cmd.Execute()
|
||||
So(err, ShouldNotBeNil)
|
||||
So(buff.String(), ShouldContainSubstring, "invalid output format")
|
||||
So(buff.String(), ShouldContainSubstring, "invalid cli output format")
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -632,7 +632,7 @@ func TestServerResponseGQL(t *testing.T) {
|
|||
cmd.SetArgs(args)
|
||||
err := cmd.Execute()
|
||||
So(err, ShouldNotBeNil)
|
||||
So(buff.String(), ShouldContainSubstring, "invalid output format")
|
||||
So(buff.String(), ShouldContainSubstring, "invalid cli output format")
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -683,7 +683,7 @@ func TestServerResponseGQL(t *testing.T) {
|
|||
cmd.SetArgs(args)
|
||||
err := cmd.Execute()
|
||||
So(err, ShouldNotBeNil)
|
||||
So(buff.String(), ShouldContainSubstring, "invalid output format")
|
||||
So(buff.String(), ShouldContainSubstring, "invalid cli output format")
|
||||
})
|
||||
})
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ func TestSuggestions(t *testing.T) {
|
|||
suggestion := client.ShowSuggestionsIfUnknownCommand(
|
||||
client.NewRepoCommand(client.NewSearchService()), []string{"bad-command"})
|
||||
str := space.ReplaceAllString(suggestion.Error(), " ")
|
||||
So(str, ShouldContainSubstring, "unknown subcommand")
|
||||
So(str, ShouldContainSubstring, "unknown cli subcommand")
|
||||
|
||||
suggestion = client.ShowSuggestionsIfUnknownCommand(
|
||||
client.NewRepoCommand(client.NewSearchService()), []string{"listt"})
|
||||
|
|
|
@ -88,7 +88,7 @@ func (hr *HotReloader) Start() context.Context {
|
|||
|
||||
err := LoadConfiguration(newConfig, hr.filePath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("couldn't reload config, retry writing it.")
|
||||
log.Error().Err(err).Msg("failed to reload config, retry writing it.")
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ func (hr *HotReloader) Start() context.Context {
|
|||
}()
|
||||
|
||||
if err := hr.watcher.Add(hr.filePath); err != nil {
|
||||
log.Panic().Err(err).Str("config", hr.filePath).Msg("error adding config file to FsNotify watcher")
|
||||
log.Panic().Err(err).Str("config", hr.filePath).Msg("failed to add config file to fsnotity watcher")
|
||||
}
|
||||
|
||||
<-done
|
||||
|
|
|
@ -491,7 +491,7 @@ func TestConfigReloader(t *testing.T) {
|
|||
time.Sleep(5 * time.Second)
|
||||
|
||||
found, err := test.ReadLogFileAndSearchString(logFile.Name(),
|
||||
"Error downloading Trivy DB to destination dir", 30*time.Second)
|
||||
"failed to download trivy-db to destination dir", 30*time.Second)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
|
|
|
@ -805,7 +805,7 @@ func TestServeScrubExtension(t *testing.T) {
|
|||
So(dataStr, ShouldContainSubstring,
|
||||
"\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":null,\"Scrub\":{\"Enable\":true,\"Interval\":86400000000000},\"Lint\":null") //nolint:lll // gofumpt conflicts with lll
|
||||
So(dataStr, ShouldNotContainSubstring,
|
||||
"Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.")
|
||||
"scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.")
|
||||
})
|
||||
|
||||
Convey("scrub implicitly enabled, but with scrub interval param set", t, func(c C) {
|
||||
|
@ -837,7 +837,7 @@ func TestServeScrubExtension(t *testing.T) {
|
|||
dataStr := string(data)
|
||||
So(dataStr, ShouldContainSubstring, "\"Scrub\":{\"Enable\":true,\"Interval\":3600000000000}")
|
||||
So(dataStr, ShouldContainSubstring,
|
||||
"Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.")
|
||||
"scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.")
|
||||
})
|
||||
|
||||
Convey("scrub explicitly enabled, but without scrub interval param set", t, func(c C) {
|
||||
|
@ -869,7 +869,7 @@ func TestServeScrubExtension(t *testing.T) {
|
|||
So(dataStr, ShouldContainSubstring,
|
||||
"\"Extensions\":{\"Search\":null,\"Sync\":null,\"Metrics\":null,\"Scrub\":{\"Enable\":true,\"Interval\":86400000000000},\"Lint\":null") //nolint:lll // gofumpt conflicts with lll
|
||||
So(dataStr, ShouldNotContainSubstring,
|
||||
"Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.")
|
||||
"scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.")
|
||||
})
|
||||
|
||||
Convey("scrub explicitly disabled", t, func(c C) {
|
||||
|
@ -899,9 +899,9 @@ func TestServeScrubExtension(t *testing.T) {
|
|||
defer os.Remove(logPath) // clean up
|
||||
dataStr := string(data)
|
||||
So(dataStr, ShouldContainSubstring, "\"Scrub\":{\"Enable\":false,\"Interval\":86400000000000}")
|
||||
So(dataStr, ShouldContainSubstring, "Scrub config not provided, skipping scrub")
|
||||
So(dataStr, ShouldContainSubstring, "scrub config not provided, skipping scrub")
|
||||
So(dataStr, ShouldNotContainSubstring,
|
||||
"Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.")
|
||||
"scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.")
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1066,7 +1066,7 @@ func TestServeSearchEnabledCVE(t *testing.T) {
|
|||
So(found, ShouldBeTrue)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
found, err = ReadLogFileAndSearchString(logPath, "updating the CVE database", readLogFileTimeout)
|
||||
found, err = ReadLogFileAndSearchString(logPath, "updating cve-db", readLogFileTimeout)
|
||||
So(found, ShouldBeTrue)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
@ -1151,8 +1151,8 @@ func TestServeSearchDisabled(t *testing.T) {
|
|||
defer os.Remove(logPath) // clean up
|
||||
dataStr := string(data)
|
||||
So(dataStr, ShouldContainSubstring,
|
||||
"\"Search\":{\"Enable\":false,\"CVE\":{\"UpdateInterval\":10800000000000,\"Trivy\":null}")
|
||||
So(dataStr, ShouldContainSubstring, "CVE config not provided, skipping CVE update")
|
||||
`"Search":{"Enable":false,"CVE":{"UpdateInterval":10800000000000,"Trivy":null}`)
|
||||
So(dataStr, ShouldContainSubstring, "cve config not provided, skipping cve-db update")
|
||||
So(dataStr, ShouldNotContainSubstring,
|
||||
"CVE update interval set to too-short interval < 2h, changing update duration to 2 hours and continuing.")
|
||||
})
|
||||
|
|
|
@ -72,7 +72,9 @@ func newServeCmd(conf *config.Config) *cobra.Command {
|
|||
}
|
||||
|
||||
if err := ctlr.Run(reloaderCtx); err != nil {
|
||||
log.Error().Err(err).Msg("unable to start controller, exiting")
|
||||
log.Error().Err(err).Msg("failed to start controller, exiting")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -108,7 +110,7 @@ func newScrubCmd(conf *config.Config) *cobra.Command {
|
|||
fmt.Sprintf("http://%s/v2", net.JoinHostPort(conf.HTTP.Address, conf.HTTP.Port)),
|
||||
nil)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("unable to create a new http request")
|
||||
log.Error().Err(err).Msg("failed to create a new http request")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -116,7 +118,8 @@ func newScrubCmd(conf *config.Config) *cobra.Command {
|
|||
response, err := http.DefaultClient.Do(req)
|
||||
if err == nil {
|
||||
response.Body.Close()
|
||||
log.Warn().Msg("The server is running, in order to perform the scrub command the server should be shut down")
|
||||
log.Warn().Err(zerr.ErrServerIsRunning).
|
||||
Msg("server is running, in order to perform the scrub command the server should be shut down")
|
||||
|
||||
return zerr.ErrServerIsRunning
|
||||
} else {
|
||||
|
@ -153,12 +156,12 @@ func newVerifyCmd(conf *config.Config) *cobra.Command {
|
|||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
if err := LoadConfiguration(conf, args[0]); err != nil {
|
||||
log.Error().Str("config", args[0]).Msg("Config file is invalid")
|
||||
log.Error().Str("config", args[0]).Msg("invalid config file")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Str("config", args[0]).Msg("Config file is valid")
|
||||
log.Info().Str("config", args[0]).Msg("config file is valid")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -209,7 +212,8 @@ func validateStorageConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
|
||||
for _, storageConfig := range cfg.Storage.SubPaths {
|
||||
if strings.EqualFold(defaultRootDir, storageConfig.RootDirectory) {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("storage subpaths cannot use default storage root directory")
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Msg("invalid storage config, storage subpaths cannot use default storage root directory")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -218,7 +222,8 @@ func validateStorageConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
if ok {
|
||||
equal := expConfig.ParamsEqual(storageConfig)
|
||||
if !equal {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("storage config with same root directory should have same parameters")
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Msg("invalid storage config, storage config with same root directory should have same parameters")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -236,7 +241,7 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
//nolint: lll
|
||||
if cfg.Storage.Dedupe && cfg.Storage.StorageDriver != nil && cfg.Storage.RemoteCache && cfg.Storage.CacheDriver == nil {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg(
|
||||
"dedupe set to true with remote storage and caching, but no remote cache configured!")
|
||||
"invalid cache config, dedupe set to true with remote storage and caching, but no remote cache configured")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -244,7 +249,7 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
if cfg.Storage.CacheDriver != nil && cfg.Storage.RemoteCache {
|
||||
// local storage with remote caching
|
||||
if cfg.Storage.StorageDriver == nil {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("cannot have local storage driver with remote caching!")
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("invalid cache config, cannot have local storage driver with remote caching!")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -252,7 +257,7 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
// unsupported cache driver
|
||||
if cfg.Storage.CacheDriver["name"] != storageConstants.DynamoDBDriverName {
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Interface("cacheDriver", cfg.Storage.CacheDriver["name"]).Msg("unsupported cache driver")
|
||||
Interface("cacheDriver", cfg.Storage.CacheDriver["name"]).Msg("invalid cache config, unsupported cache driver")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -260,7 +265,7 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
|
||||
if !cfg.Storage.RemoteCache && cfg.Storage.CacheDriver != nil {
|
||||
log.Warn().Err(zerr.ErrBadConfig).Str("directory", cfg.Storage.RootDirectory).
|
||||
Msg("remoteCache set to false but cacheDriver config (remote caching) provided for directory" +
|
||||
Msg("invalid cache config, remoteCache set to false but cacheDriver config (remote caching) provided for directory" +
|
||||
"will ignore and use local caching")
|
||||
}
|
||||
|
||||
|
@ -269,7 +274,8 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
// dedupe true, remote storage, remoteCache true, but no cacheDriver (remote)
|
||||
//nolint: lll
|
||||
if subPath.Dedupe && subPath.StorageDriver != nil && subPath.RemoteCache && subPath.CacheDriver == nil {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("dedupe set to true with remote storage and caching, but no remote cache configured!")
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Msg("invalid cache config, dedupe set to true with remote storage and caching, but no remote cache configured!")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -277,7 +283,8 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
if subPath.CacheDriver != nil && subPath.RemoteCache {
|
||||
// local storage with remote caching
|
||||
if subPath.StorageDriver == nil {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("cannot have local storage driver with remote caching!")
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Msg("invalid cache config, cannot have local storage driver with remote caching!")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -285,7 +292,7 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
// unsupported cache driver
|
||||
if subPath.CacheDriver["name"] != storageConstants.DynamoDBDriverName {
|
||||
log.Error().Err(zerr.ErrBadConfig).Interface("cacheDriver", cfg.Storage.CacheDriver["name"]).
|
||||
Msg("unsupported cache driver")
|
||||
Msg("invalid cache config, unsupported cache driver")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -293,8 +300,8 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
|
||||
if !subPath.RemoteCache && subPath.CacheDriver != nil {
|
||||
log.Warn().Err(zerr.ErrBadConfig).Str("directory", cfg.Storage.RootDirectory).
|
||||
Msg("remoteCache set to false but cacheDriver config (remote caching) provided for directory," +
|
||||
"will ignore and use local caching")
|
||||
Msg("invalid cache config, remoteCache set to false but cacheDriver config (remote caching)" +
|
||||
"provided for directory, will ignore and use local caching")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -303,11 +310,11 @@ func validateCacheConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
|
||||
func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error {
|
||||
if cfg.Extensions != nil && cfg.Extensions.Mgmt != nil {
|
||||
log.Warn().Msg("The mgmt extensions configuration option has been made redundant and will be ignored.")
|
||||
log.Warn().Msg("mgmt extensions configuration option has been made redundant and will be ignored.")
|
||||
}
|
||||
|
||||
if cfg.Extensions != nil && cfg.Extensions.APIKey != nil {
|
||||
log.Warn().Msg("The apikey extension configuration will be ignored as API keys " +
|
||||
log.Warn().Msg("apikey extension configuration will be ignored as API keys " +
|
||||
"are now configurable in the HTTP settings.")
|
||||
}
|
||||
|
||||
|
@ -315,7 +322,8 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
// it would make sense to also check for mgmt and user prefs to be enabled,
|
||||
// but those are both enabled by having the search and ui extensions enabled
|
||||
if cfg.Extensions.Search == nil || !*cfg.Extensions.Search.Enable {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("UI functionality can't be used without search extension.")
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Msg("failed to enable ui, search extension must be enabled")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -324,7 +332,8 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
//nolint:lll
|
||||
if cfg.Storage.StorageDriver != nil && cfg.Extensions != nil && cfg.Extensions.Search != nil &&
|
||||
cfg.Extensions.Search.Enable != nil && *cfg.Extensions.Search.Enable && cfg.Extensions.Search.CVE != nil {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("CVE functionality can't be used with remote storage. Please disable CVE")
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Msg("failed to enable cve scanning due to incompatibility with remote storage, please disable cve")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -333,7 +342,8 @@ func validateExtensionsConfig(cfg *config.Config, log zlog.Logger) error {
|
|||
//nolint:lll
|
||||
if subPath.StorageDriver != nil && cfg.Extensions != nil && cfg.Extensions.Search != nil &&
|
||||
cfg.Extensions.Search.Enable != nil && *cfg.Extensions.Search.Enable && cfg.Extensions.Search.CVE != nil {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("CVE functionality can't be used with remote storage. Please disable CVE")
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Msg("failed to enable cve scanning due to incompatibility with remote storage, please disable cve")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -432,7 +442,8 @@ func validateConfiguration(config *config.Config, log zlog.Logger) error {
|
|||
for pattern := range config.HTTP.AccessControl.Repositories {
|
||||
ok := glob.ValidatePattern(pattern)
|
||||
if !ok {
|
||||
log.Error().Err(glob.ErrBadPattern).Str("pattern", pattern).Msg("authorization pattern could not be compiled")
|
||||
log.Error().Err(glob.ErrBadPattern).Str("pattern", pattern).
|
||||
Msg("failed to compile authorization pattern")
|
||||
|
||||
return glob.ErrBadPattern
|
||||
}
|
||||
|
@ -555,7 +566,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
|
|||
if config.Extensions.Search.CVE.UpdateInterval < defaultUpdateInterval {
|
||||
config.Extensions.Search.CVE.UpdateInterval = defaultUpdateInterval
|
||||
|
||||
log.Warn().Msg("CVE update interval set to too-short interval < 2h, " +
|
||||
log.Warn().Msg("cve update interval set to too-short interval < 2h, " +
|
||||
"changing update duration to 2 hours and continuing.")
|
||||
}
|
||||
|
||||
|
@ -565,15 +576,15 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
|
|||
|
||||
if config.Extensions.Search.CVE.Trivy.DBRepository == "" {
|
||||
defaultDBDownloadURL := "ghcr.io/aquasecurity/trivy-db"
|
||||
log.Info().Str("trivyDownloadURL", defaultDBDownloadURL).
|
||||
Msg("Config: using default Trivy DB download URL.")
|
||||
log.Info().Str("url", defaultDBDownloadURL).Str("component", "config").
|
||||
Msg("using default trivy-db download URL.")
|
||||
config.Extensions.Search.CVE.Trivy.DBRepository = defaultDBDownloadURL
|
||||
}
|
||||
|
||||
if config.Extensions.Search.CVE.Trivy.JavaDBRepository == "" {
|
||||
defaultJavaDBDownloadURL := "ghcr.io/aquasecurity/trivy-java-db"
|
||||
log.Info().Str("trivyJavaDownloadURL", defaultJavaDBDownloadURL).
|
||||
Msg("Config: using default Trivy Java DB download URL.")
|
||||
log.Info().Str("url", defaultJavaDBDownloadURL).Str("component", "config").
|
||||
Msg("using default trivy-java-db download URL.")
|
||||
config.Extensions.Search.CVE.Trivy.JavaDBRepository = defaultJavaDBDownloadURL
|
||||
}
|
||||
}
|
||||
|
@ -649,7 +660,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
|
|||
cachePath := path.Join(cacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName)
|
||||
|
||||
if _, err := os.Stat(cachePath); err == nil {
|
||||
log.Info().Msg("Config: dedupe set to false for s3 driver but used to be true.")
|
||||
log.Info().Str("component", "config").Msg("dedupe set to false for s3 driver but used to be true.")
|
||||
log.Info().Str("cache path", cachePath).Msg("found cache database")
|
||||
|
||||
config.Storage.RemoteCache = false
|
||||
|
@ -670,7 +681,7 @@ func applyDefaultValues(config *config.Config, viperInstance *viper.Viper, log z
|
|||
subpathCachePath := path.Join(subpathCacheDir, storageConstants.BoltdbName+storageConstants.DBExtensionName)
|
||||
|
||||
if _, err := os.Stat(subpathCachePath); err == nil {
|
||||
log.Info().Msg("Config: dedupe set to false for s3 driver but used to be true. ")
|
||||
log.Info().Str("component", "config").Msg("dedupe set to false for s3 driver but used to be true. ")
|
||||
log.Info().Str("cache path", subpathCachePath).Msg("found cache database")
|
||||
|
||||
storageConfig.RemoteCache = false
|
||||
|
@ -732,14 +743,14 @@ func LoadConfiguration(config *config.Config, configPath string) error {
|
|||
viperInstance.SetConfigFile(configPath)
|
||||
|
||||
if err := viperInstance.ReadInConfig(); err != nil {
|
||||
log.Error().Err(err).Msg("error while reading configuration")
|
||||
log.Error().Err(err).Msg("failed to read configuration")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
metaData := &mapstructure.Metadata{}
|
||||
if err := viperInstance.UnmarshalExact(&config, metadataConfig(metaData)); err != nil {
|
||||
log.Error().Err(err).Msg("error while unmarshaling new config")
|
||||
log.Error().Err(err).Msg("failed to unmarshaling new config")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -747,13 +758,15 @@ func LoadConfiguration(config *config.Config, configPath string) error {
|
|||
log := zlog.NewLogger(config.Log.Level, config.Log.Output)
|
||||
|
||||
if len(metaData.Keys) == 0 {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("config doesn't contain any key:value pair")
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Msg("failed to load config due to the absence of any key:value pair")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
||||
if len(metaData.Unused) > 0 {
|
||||
log.Error().Err(zerr.ErrBadConfig).Strs("keys", metaData.Unused).Msg("unknown keys")
|
||||
log.Error().Err(zerr.ErrBadConfig).Strs("keys", metaData.Unused).
|
||||
Msg("failed to load config due to unknown keys")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -804,7 +817,7 @@ func readLDAPCredentials(ldapConfigPath string) (config.LDAPCredentials, error)
|
|||
viperInstance.SetConfigFile(ldapConfigPath)
|
||||
|
||||
if err := viperInstance.ReadInConfig(); err != nil {
|
||||
log.Error().Err(err).Msg("error while reading configuration")
|
||||
log.Error().Err(err).Msg("failed to read configuration")
|
||||
|
||||
return config.LDAPCredentials{}, err
|
||||
}
|
||||
|
@ -812,7 +825,7 @@ func readLDAPCredentials(ldapConfigPath string) (config.LDAPCredentials, error)
|
|||
var ldapCredentials config.LDAPCredentials
|
||||
|
||||
if err := viperInstance.Unmarshal(&ldapCredentials); err != nil {
|
||||
log.Error().Err(err).Msg("error while unmarshaling new config")
|
||||
log.Error().Err(err).Msg("failed to unmarshal new config")
|
||||
|
||||
return config.LDAPCredentials{}, err
|
||||
}
|
||||
|
@ -1014,8 +1027,8 @@ func validateSync(config *config.Config, log zlog.Logger) error {
|
|||
|
||||
if content.StripPrefix && !strings.Contains(content.Prefix, "/*") && content.Destination == "/" {
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Interface("sync content", content).
|
||||
Msg("sync config: can not use stripPrefix true and destination '/' without using glob patterns in prefix")
|
||||
Interface("sync content", content).Str("component", "sync").
|
||||
Msg("can not use stripPrefix true and destination '/' without using glob patterns in prefix")
|
||||
|
||||
return zerr.ErrBadConfig
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ func (zc Collector) Collect(ch chan<- prometheus.Metric) {
|
|||
|
||||
func panicOnDuplicateMetricName(m map[string]*prometheus.Desc, name string, log log.Logger) {
|
||||
if _, present := m[name]; present {
|
||||
log.Fatal().Msg("Duplicate keys: metric " + name + " already present")
|
||||
log.Fatal().Str("metric", name).Msg("duplicate key found")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -180,16 +180,16 @@ func runExporter(c *Controller) {
|
|||
|
||||
err := prometheus.Register(GetCollector(c))
|
||||
if err != nil {
|
||||
c.Log.Error().Err(err).Msg("Expected error in testing")
|
||||
c.Log.Debug().Err(err).Msg("ignoring error")
|
||||
}
|
||||
|
||||
http.Handle(c.Config.Exporter.Metrics.Path, promhttp.Handler())
|
||||
c.Log.Info().Str("exporter addr", exporterAddr).
|
||||
Str("exporter metrics path", c.Config.Exporter.Metrics.Path).
|
||||
Msg("Exporter is listening on exporter addr & exposes metrics on exporter metrics path")
|
||||
c.Log.Info().Str("addr", exporterAddr).
|
||||
Str("path", c.Config.Exporter.Metrics.Path).
|
||||
Msg("exporter listening")
|
||||
|
||||
serverAddr := fmt.Sprintf("%s://%s:%s", c.Config.Server.Protocol,
|
||||
c.Config.Server.Host, c.Config.Server.Port)
|
||||
c.Log.Info().Str("serverAddr", serverAddr).Msg("Scraping metrics")
|
||||
c.Log.Fatal().Err(server.ListenAndServe()).Msg("Exporter stopped")
|
||||
c.Log.Info().Str("serverAddr", serverAddr).Msg("scraping metrics")
|
||||
c.Log.Fatal().Err(server.ListenAndServe()).Msg("exporter stopped")
|
||||
}
|
||||
|
|
|
@ -60,15 +60,20 @@ func loadConfiguration(config *api.Config, configPath string) {
|
|||
viper.SetConfigFile(configPath)
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
log.Panic().Err(err).Msg("Error while reading configuration")
|
||||
log.Panic().Err(err).Str("config", configPath).Msg("failed to read configuration")
|
||||
}
|
||||
|
||||
metaData := &mapstructure.Metadata{}
|
||||
if err := viper.Unmarshal(&config, metadataConfig(metaData)); err != nil {
|
||||
log.Panic().Err(err).Msg("Error while unmarshaling new config")
|
||||
log.Panic().Err(err).Str("config", configPath).Msg("failed to unmarshal config")
|
||||
}
|
||||
|
||||
if len(metaData.Keys) == 0 || len(metaData.Unused) > 0 {
|
||||
log.Panic().Err(zerr.ErrBadConfig).Msg("Bad configuration, retry writing it")
|
||||
if len(metaData.Keys) == 0 {
|
||||
log.Panic().Err(zerr.ErrBadConfig).Str("config", configPath).Msg("bad configuration")
|
||||
}
|
||||
|
||||
if len(metaData.Unused) > 0 {
|
||||
log.Panic().Err(zerr.ErrBadConfig).Interface("unknown fields", metaData.Unused).
|
||||
Str("config", configPath).Msg("bad configuration")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ type ImageTrust struct {
|
|||
func (trust *ImageTrust) HandleCosignPublicKeyUpload(response http.ResponseWriter, request *http.Request) {
|
||||
body, err := io.ReadAll(request.Body)
|
||||
if err != nil {
|
||||
trust.Log.Error().Err(err).Msg("image trust: couldn't read cosign key body")
|
||||
trust.Log.Error().Err(err).Str("component", "image-trust").Msg("failed to read cosign key body")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
|
@ -95,7 +95,7 @@ func (trust *ImageTrust) HandleCosignPublicKeyUpload(response http.ResponseWrite
|
|||
if errors.Is(err, zerr.ErrInvalidPublicKeyContent) {
|
||||
response.WriteHeader(http.StatusBadRequest)
|
||||
} else {
|
||||
trust.Log.Error().Err(err).Msg("image trust: failed to save cosign key")
|
||||
trust.Log.Error().Err(err).Str("component", "image-trust").Msg("failed to save cosign key")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ func (trust *ImageTrust) HandleNotationCertificateUpload(response http.ResponseW
|
|||
|
||||
body, err := io.ReadAll(request.Body)
|
||||
if err != nil {
|
||||
trust.Log.Error().Err(err).Msg("image trust: couldn't read notation certificate body")
|
||||
trust.Log.Error().Err(err).Str("component", "image-trust").Msg("failed to read notation certificate body")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
|
@ -139,7 +139,7 @@ func (trust *ImageTrust) HandleNotationCertificateUpload(response http.ResponseW
|
|||
errors.Is(err, zerr.ErrInvalidCertificateContent) {
|
||||
response.WriteHeader(http.StatusBadRequest)
|
||||
} else {
|
||||
trust.Log.Error().Err(err).Msg("image trust: failed to save notation certificate")
|
||||
trust.Log.Error().Err(err).Str("component", "image-trust").Msg("failed to save notation certificate")
|
||||
response.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
|
|
|
@ -18,10 +18,10 @@ func EnableMetricsExtension(config *config.Config, log log.Logger, rootDir strin
|
|||
if config.Extensions.Metrics.Prometheus.Path == "" {
|
||||
config.Extensions.Metrics.Prometheus.Path = "/metrics"
|
||||
|
||||
log.Warn().Msg("Prometheus instrumentation Path not set, changing to '/metrics'.")
|
||||
log.Warn().Msg("prometheus instrumentation path not set, changing to '/metrics'.")
|
||||
}
|
||||
} else {
|
||||
log.Info().Msg("Metrics config not provided, skipping Metrics config update")
|
||||
log.Info().Msg("metrics config not provided, skipping metrics config update")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ func (mgmt *Mgmt) HandleGetConfig(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
buf, err := zcommon.MarshalThroughStruct(sanitizedConfig, &StrippedConfig{})
|
||||
if err != nil {
|
||||
mgmt.Log.Error().Err(err).Msg("mgmt: couldn't marshal config response")
|
||||
mgmt.Log.Error().Err(err).Str("component", "mgmt").Msg("failed to marshal config response")
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ func EnableScrubExtension(config *config.Config, log log.Logger, storeController
|
|||
if config.Extensions.Scrub.Interval < minScrubInterval {
|
||||
config.Extensions.Scrub.Interval = minScrubInterval
|
||||
|
||||
log.Warn().Msg("Scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll
|
||||
log.Warn().Msg("scrub interval set to too-short interval < 2h, changing scrub duration to 2 hours and continuing.") //nolint:lll // gofumpt conflicts with lll
|
||||
}
|
||||
|
||||
generator := &taskGenerator{
|
||||
|
@ -44,7 +44,7 @@ func EnableScrubExtension(config *config.Config, log log.Logger, storeController
|
|||
}
|
||||
}
|
||||
} else {
|
||||
log.Info().Msg("Scrub config not provided, skipping scrub")
|
||||
log.Info().Msg("scrub config not provided, skipping scrub")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -52,14 +52,14 @@ func EnableSearchExtension(conf *config.Config, storeController storage.StoreCon
|
|||
downloadTrivyDB(updateInterval, taskScheduler, cveScanner, log)
|
||||
startScanner(scanInterval, metaDB, taskScheduler, cveScanner, log)
|
||||
} else {
|
||||
log.Info().Msg("CVE config not provided, skipping CVE update")
|
||||
log.Info().Msg("cve config not provided, skipping cve-db update")
|
||||
}
|
||||
}
|
||||
|
||||
func downloadTrivyDB(interval time.Duration, sch *scheduler.Scheduler, cveScanner CveScanner, log log.Logger) {
|
||||
generator := cveinfo.NewDBUpdateTaskGenerator(interval, cveScanner, log)
|
||||
|
||||
log.Info().Msg("Submitting CVE DB update scheduler")
|
||||
log.Info().Msg("submitting cve-db update generator to scheduler")
|
||||
sch.SubmitGenerator(generator, interval, scheduler.HighPriority)
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ func startScanner(interval time.Duration, metaDB mTypes.MetaDB, sch *scheduler.S
|
|||
) {
|
||||
generator := cveinfo.NewScanTaskGenerator(metaDB, cveScanner, log)
|
||||
|
||||
log.Info().Msg("Submitting CVE scan scheduler")
|
||||
log.Info().Msg("submitting cve-scan generator to scheduler")
|
||||
sch.SubmitGenerator(generator, interval, scheduler.MediumPriority)
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ func EnableSyncExtension(config *config.Config, metaDB mTypes.MetaDB,
|
|||
}
|
||||
|
||||
if len(registryConfig.URLs) == 0 {
|
||||
log.Error().Err(zerr.ErrSyncNoURLsLeft).Msg("unable to start sync extension")
|
||||
log.Error().Err(zerr.ErrSyncNoURLsLeft).Msg("failed to start sync extension")
|
||||
|
||||
return nil, zerr.ErrSyncNoURLsLeft
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func EnableSyncExtension(config *config.Config, metaDB mTypes.MetaDB,
|
|||
return onDemand, nil
|
||||
}
|
||||
|
||||
log.Info().Msg("Sync registries config not provided or disabled, skipping sync")
|
||||
log.Info().Msg("sync config not provided or disabled, so not enabling sync")
|
||||
|
||||
return nil, nil //nolint: nilnil
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ func (uih uiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
_, err := w.Write(buf)
|
||||
if err != nil {
|
||||
uih.log.Error().Err(err).Msg("unable to serve index.html")
|
||||
uih.log.Error().Err(err).Msg("failed to serve index.html")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ func TestEnableExtension(t *testing.T) {
|
|||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring,
|
||||
"Sync registries config not provided or disabled, skipping sync")
|
||||
"sync config not provided or disabled, so not enabling sync")
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ func TestMetricsExtension(t *testing.T) {
|
|||
data, _ := os.ReadFile(logFile.Name())
|
||||
|
||||
So(string(data), ShouldContainSubstring,
|
||||
"Prometheus instrumentation Path not set, changing to '/metrics'.")
|
||||
"prometheus instrumentation path not set, changing to '/metrics'.")
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ func (imgTrustStore *ImageTrustStore) VerifySignature(
|
|||
}
|
||||
|
||||
if manifestDigest.String() == "" {
|
||||
return "", time.Time{}, false, zerr.ErrBadManifestDigest
|
||||
return "", time.Time{}, false, zerr.ErrBadSignatureManifestDigest
|
||||
}
|
||||
|
||||
switch signatureType {
|
||||
|
@ -264,7 +264,7 @@ func (validityT *validityTask) DoWork(ctx context.Context) error {
|
|||
if len(sigs[zcommon.CosignSignature]) != 0 || len(sigs[zcommon.NotationSignature]) != 0 {
|
||||
err := validityT.metaDB.UpdateSignaturesValidity(ctx, validityT.repo.Name, godigest.Digest(signedManifest))
|
||||
if err != nil {
|
||||
validityT.log.Info().Msg("error while verifying signatures")
|
||||
validityT.log.Info().Msg("failed to verify signatures")
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -156,7 +156,7 @@ func TestVerifySignatures(t *testing.T) {
|
|||
imgTrustStore := &imagetrust.ImageTrustStore{}
|
||||
_, _, _, err := imgTrustStore.VerifySignature("", []byte(""), "", "", image.AsImageMeta(), "repo")
|
||||
So(err, ShouldNotBeNil)
|
||||
So(err, ShouldEqual, zerr.ErrBadManifestDigest)
|
||||
So(err, ShouldEqual, zerr.ErrBadSignatureManifestDigest)
|
||||
})
|
||||
|
||||
Convey("wrong signature type", t, func() {
|
||||
|
|
|
@ -43,7 +43,7 @@ func (linter *Linter) CheckMandatoryAnnotations(repo string, manifestDigest godi
|
|||
|
||||
content, err := imgStore.GetBlobContent(repo, manifestDigest)
|
||||
if err != nil {
|
||||
linter.log.Error().Err(err).Msg("linter: unable to get image manifest")
|
||||
linter.log.Error().Err(err).Str("component", "linter").Msg("failed to get image manifest")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func (linter *Linter) CheckMandatoryAnnotations(repo string, manifestDigest godi
|
|||
var manifest ispec.Manifest
|
||||
|
||||
if err := json.Unmarshal(content, &manifest); err != nil {
|
||||
linter.log.Error().Err(err).Msg("linter: couldn't unmarshal manifest JSON")
|
||||
linter.log.Error().Err(err).Str("component", "linter").Msg("failed to unmarshal manifest JSON")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ func (linter *Linter) CheckMandatoryAnnotations(repo string, manifestDigest godi
|
|||
|
||||
content, err = imgStore.GetBlobContent(repo, configDigest)
|
||||
if err != nil {
|
||||
linter.log.Error().Err(err).Msg("linter: couldn't get config JSON " +
|
||||
linter.log.Error().Err(err).Str("component", "linter").Msg("failed to get config JSON " +
|
||||
configDigest.String())
|
||||
|
||||
return false, err
|
||||
|
@ -86,7 +86,7 @@ func (linter *Linter) CheckMandatoryAnnotations(repo string, manifestDigest godi
|
|||
|
||||
var imageConfig ispec.Image
|
||||
if err := json.Unmarshal(content, &imageConfig); err != nil {
|
||||
linter.log.Error().Err(err).Msg("linter: couldn't unmarshal config JSON " + configDigest.String())
|
||||
linter.log.Error().Err(err).Str("component", "linter").Msg("failed to unmarshal config JSON " + configDigest.String())
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
|
|
@ -157,7 +157,8 @@ func (ms *metricServer) SendMetric(mfunc interface{}) {
|
|||
if ms.enabled {
|
||||
mfn, ok := mfunc.(func())
|
||||
if !ok {
|
||||
ms.log.Error().Err(errors.ErrInvalidMetric).Msg("type conversion")
|
||||
ms.log.Error().Err(errors.ErrInvalidMetric).
|
||||
Msgf("failed to cast type, expected '%T' but got '%T'", func() {}, mfunc)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -169,7 +170,8 @@ func (ms *metricServer) SendMetric(mfunc interface{}) {
|
|||
func (ms *metricServer) ForceSendMetric(mfunc interface{}) {
|
||||
mfn, ok := mfunc.(func())
|
||||
if !ok {
|
||||
ms.log.Error().Err(errors.ErrInvalidMetric).Msg("type conversion")
|
||||
ms.log.Error().Err(errors.ErrInvalidMetric).
|
||||
Msgf("failed to cast type, expected '%T' but got '%T'", func() {}, mfunc)
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -354,7 +354,7 @@ func (ms *metricServer) CounterInc(cv *CounterValue) {
|
|||
if err != nil {
|
||||
// The last thing we want is to panic/stop the server due to instrumentation
|
||||
// thus log a message (should be detected during development of new metrics)
|
||||
ms.log.Error().Err(err).Msg("Instrumentation error")
|
||||
ms.log.Error().Err(err).Msg("failed due to instrumentation error")
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -374,7 +374,7 @@ func (ms *metricServer) GaugeSet(gv *GaugeValue) {
|
|||
|
||||
err := sanityChecks(gv.Name, labels, ok, gv.LabelNames, gv.LabelValues)
|
||||
if err != nil {
|
||||
ms.log.Error().Err(err).Msg("Instrumentation error")
|
||||
ms.log.Error().Err(err).Msg("failed due to instrumentation error")
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -393,7 +393,7 @@ func (ms *metricServer) SummaryObserve(sv *SummaryValue) {
|
|||
|
||||
err := sanityChecks(sv.Name, labels, ok, sv.LabelNames, sv.LabelValues)
|
||||
if err != nil {
|
||||
ms.log.Error().Err(err).Msg("Instrumentation error")
|
||||
ms.log.Error().Err(err).Msg("failed due to instrumentation error")
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ func (ms *metricServer) HistogramObserve(hv *HistogramValue) {
|
|||
|
||||
err := sanityChecks(hv.Name, labels, ok, hv.LabelNames, hv.LabelValues)
|
||||
if err != nil {
|
||||
ms.log.Error().Err(err).Msg("Instrumentation error")
|
||||
ms.log.Error().Err(err).Msg("failed due to instrumentation error")
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -70,12 +70,12 @@ func (mc *MetricsClient) GetMetrics() (*MetricsInfo, error) {
|
|||
func (mc *MetricsClient) makeGETRequest(url string, resultsPtr interface{}) (http.Header, error) {
|
||||
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric scraping: %w", err)
|
||||
return nil, fmt.Errorf("metric scraping failed: %w", err)
|
||||
}
|
||||
|
||||
resp, err := mc.config.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric scraping error: %w", err)
|
||||
return nil, fmt.Errorf("metric scraping failed: %w", err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
|
|
@ -477,11 +477,11 @@ func TestPopulateStorageMetrics(t *testing.T) {
|
|||
|
||||
// Wait for storage metrics to update
|
||||
found, err := test.ReadLogFileAndSearchString(logPath,
|
||||
"monitoring: computed storage usage for repo alpine", time.Minute)
|
||||
"computed storage usage for repo alpine", time.Minute)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
found, err = test.ReadLogFileAndSearchString(logPath,
|
||||
"monitoring: computed storage usage for repo busybox", time.Minute)
|
||||
"computed storage usage for repo busybox", time.Minute)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ func RunScrubRepo(ctx context.Context, imgStore storageTypes.ImageStore, repo st
|
|||
|
||||
results, err := storage.CheckRepo(ctx, repo, imgStore)
|
||||
if err != nil {
|
||||
errMessage := fmt.Sprintf("error while running scrub for %s", path.Join(imgStore.RootDir(), repo))
|
||||
errMessage := fmt.Sprintf("failed to run scrub for %s", path.Join(imgStore.RootDir(), repo))
|
||||
log.Error().Err(err).Msg(errMessage)
|
||||
log.Info().Msg(fmt.Sprintf("scrub unsuccessfully completed for %s", path.Join(imgStore.RootDir(), repo)))
|
||||
|
||||
|
@ -33,7 +33,8 @@ func RunScrubRepo(ctx context.Context, imgStore storageTypes.ImageStore, repo st
|
|||
Str("image", result.ImageName).
|
||||
Str("tag", result.Tag).
|
||||
Str("status", result.Status).
|
||||
Msg("scrub: blobs/manifest ok")
|
||||
Str("component", "scrub").
|
||||
Msg("blobs/manifest ok")
|
||||
} else {
|
||||
log.Warn().
|
||||
Str("image", result.ImageName).
|
||||
|
@ -41,7 +42,8 @@ func RunScrubRepo(ctx context.Context, imgStore storageTypes.ImageStore, repo st
|
|||
Str("status", result.Status).
|
||||
Str("affected blob", result.AffectedBlob).
|
||||
Str("error", result.Error).
|
||||
Msg("scrub: blobs/manifest affected")
|
||||
Str("component", "scrub").
|
||||
Msg("blobs/manifest affected")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ func TestScrubExtension(t *testing.T) {
|
|||
|
||||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring, "scrub: blobs/manifest ok")
|
||||
So(string(data), ShouldContainSubstring, "blobs/manifest ok")
|
||||
})
|
||||
|
||||
Convey("Blobs integrity affected", t, func(c C) {
|
||||
|
@ -128,7 +128,7 @@ func TestScrubExtension(t *testing.T) {
|
|||
|
||||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring, "scrub: blobs/manifest affected")
|
||||
So(string(data), ShouldContainSubstring, "blobs/manifest affected")
|
||||
})
|
||||
|
||||
Convey("Generator error - not enough permissions to access root directory", t, func(c C) {
|
||||
|
@ -176,7 +176,7 @@ func TestScrubExtension(t *testing.T) {
|
|||
|
||||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring, "error while executing generator")
|
||||
So(string(data), ShouldContainSubstring, "failed to execute generator")
|
||||
|
||||
So(os.Chmod(path.Join(dir, repoName), 0o755), ShouldBeNil)
|
||||
})
|
||||
|
@ -215,7 +215,7 @@ func TestRunScrubRepo(t *testing.T) {
|
|||
|
||||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring, "scrub: blobs/manifest ok")
|
||||
So(string(data), ShouldContainSubstring, "blobs/manifest ok")
|
||||
})
|
||||
|
||||
Convey("Blobs integrity affected", t, func(c C) {
|
||||
|
@ -258,7 +258,7 @@ func TestRunScrubRepo(t *testing.T) {
|
|||
|
||||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring, "scrub: blobs/manifest affected")
|
||||
So(string(data), ShouldContainSubstring, "blobs/manifest affected")
|
||||
})
|
||||
|
||||
Convey("CheckRepo error - not enough permissions to access root directory", t, func(c C) {
|
||||
|
@ -295,7 +295,7 @@ func TestRunScrubRepo(t *testing.T) {
|
|||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring,
|
||||
fmt.Sprintf("error while running scrub for %s", imgStore.RootDir()))
|
||||
fmt.Sprintf("failed to run scrub for %s", imgStore.RootDir()))
|
||||
So(os.Chmod(path.Join(dir, repoName), 0o755), ShouldBeNil)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -141,8 +141,8 @@ func RepoMeta2ExpandedRepoInfo(ctx context.Context, repoMeta mTypes.RepoMeta,
|
|||
|
||||
imageSummary, _, err := FullImageMeta2ImageSummary(ctx, GetFullImageMeta(tag, repoMeta, imageMeta))
|
||||
if err != nil {
|
||||
log.Error().Str("repository", repoName).Str("reference", tag).
|
||||
Msg("metadb: error while converting descriptor for image")
|
||||
log.Error().Str("repository", repoName).Str("reference", tag).Str("component", "metadb").
|
||||
Msg("error while converting descriptor for image")
|
||||
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func (cveinfo BaseCveInfo) GetImageListForCVE(ctx context.Context, repo, cveID s
|
|||
repoMeta, err := cveinfo.MetaDB.GetRepoMeta(ctx, repo)
|
||||
if err != nil {
|
||||
cveinfo.Log.Error().Err(err).Str("repository", repo).Str("cve-id", cveID).
|
||||
Msg("unable to get list of tags from repo")
|
||||
Msg("failed to get list of tags from repo")
|
||||
|
||||
return imgList, err
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(ctx context.Context, repo, c
|
|||
repoMeta, err := cveinfo.MetaDB.GetRepoMeta(ctx, repo)
|
||||
if err != nil {
|
||||
cveinfo.Log.Error().Err(err).Str("repository", repo).Str("cve-id", cveID).
|
||||
Msg("unable to get list of tags from repo")
|
||||
Msg("failed to get list of tags from repo")
|
||||
|
||||
return []cvemodel.TagInfo{}, err
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(ctx context.Context, repo, c
|
|||
tagInfo, err := getTagInfoForManifest(tag, manifestDigestStr, cveinfo.MetaDB)
|
||||
if err != nil {
|
||||
cveinfo.Log.Error().Err(err).Str("repository", repo).Str("tag", tag).
|
||||
Str("cve-id", cveID).Msg("unable to retrieve manifest and config")
|
||||
Str("cve-id", cveID).Msg("failed to retrieve manifest and config")
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(ctx context.Context, repo, c
|
|||
tagInfo, err := getTagInfoForManifest(tag, manifest.Digest.String(), cveinfo.MetaDB)
|
||||
if err != nil {
|
||||
cveinfo.Log.Error().Err(err).Str("repository", repo).Str("tag", tag).
|
||||
Str("cve-id", cveID).Msg("unable to retrieve manifest and config")
|
||||
Str("cve-id", cveID).Msg("failed to retrieve manifest and config")
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -210,10 +210,10 @@ func (cveinfo BaseCveInfo) GetImageListWithCVEFixed(ctx context.Context, repo, c
|
|||
|
||||
if len(vulnerableTags) != 0 {
|
||||
cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID).
|
||||
Interface("vulnerableTags", vulnerableTags).Msg("Vulnerable tags")
|
||||
Interface("tags", vulnerableTags).Msg("vulnerable tags")
|
||||
fixedTags = GetFixedTags(allTags, vulnerableTags)
|
||||
cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID).
|
||||
Interface("fixedTags", fixedTags).Msg("Fixed tags")
|
||||
Interface("tags", fixedTags).Msg("fixed tags")
|
||||
} else {
|
||||
cveinfo.Log.Info().Str("repository", repo).Str("cve-id", cveID).
|
||||
Msg("image does not contain any tag that have given cve")
|
||||
|
|
|
@ -462,23 +462,23 @@ func TestCVESearchDisabled(t *testing.T) {
|
|||
ctrlManager.StartAndWait(port)
|
||||
|
||||
// Wait for trivy db to download
|
||||
found, err := test.ReadLogFileAndSearchString(logPath, "CVE config not provided, skipping CVE update", 90*time.Second)
|
||||
found, err := test.ReadLogFileAndSearchString(logPath, "cve config not provided, skipping cve-db update", 90*time.Second)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
defer ctrlManager.StopServer()
|
||||
|
||||
resp, _ := resty.R().SetBasicAuth(username, password).Get(baseURL + constants.FullSearchPrefix + "?query={CVEListForImage(image:\"zot-test\"){Tag%20CVEList{Id%20Description%20Severity%20PackageList{Name%20InstalledVersion%20FixedVersion}}}}")
|
||||
So(string(resp.Body()), ShouldContainSubstring, "search: CVE search is disabled")
|
||||
So(string(resp.Body()), ShouldContainSubstring, "cve search is disabled")
|
||||
So(resp.StatusCode(), ShouldEqual, 200)
|
||||
|
||||
resp, _ = resty.R().SetBasicAuth(username, password).Get(baseURL + constants.FullSearchPrefix + "?query={ImageListForCVE(id:\"CVE-201-20482\"){Results{RepoName%20Tag}}}")
|
||||
So(string(resp.Body()), ShouldContainSubstring, "search: CVE search is disabled")
|
||||
So(string(resp.Body()), ShouldContainSubstring, "cve search is disabled")
|
||||
So(resp.StatusCode(), ShouldEqual, 200)
|
||||
|
||||
resp, _ = resty.R().SetBasicAuth(username, password).Get(baseURL + constants.FullSearchPrefix + "?query={ImageListWithCVEFixed(id:\"" + "randomId" + "\",image:\"zot-test\"){Results{RepoName%20LastUpdated}}}")
|
||||
So(resp, ShouldNotBeNil)
|
||||
So(string(resp.Body()), ShouldContainSubstring, "search: CVE search is disabled")
|
||||
So(string(resp.Body()), ShouldContainSubstring, "cve search is disabled")
|
||||
So(resp.StatusCode(), ShouldEqual, 200)
|
||||
})
|
||||
}
|
||||
|
@ -542,7 +542,7 @@ func TestCVESearch(t *testing.T) {
|
|||
// trivy db download fail
|
||||
err = os.Mkdir(dbDir+"/_trivy", 0o000)
|
||||
So(err, ShouldBeNil)
|
||||
found, err := test.ReadLogFileAndSearchString(logPath, "Error downloading Trivy DB to destination dir", 180*time.Second)
|
||||
found, err := test.ReadLogFileAndSearchString(logPath, "failed to download trivy-db to destination dir", 180*time.Second)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
|
@ -550,7 +550,7 @@ func TestCVESearch(t *testing.T) {
|
|||
So(err, ShouldBeNil)
|
||||
|
||||
// Wait for trivy db to download
|
||||
found, err = test.ReadLogFileAndSearchString(logPath, "DB update completed, next update scheduled", 180*time.Second)
|
||||
found, err = test.ReadLogFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval", 180*time.Second)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
|
@ -1541,7 +1541,7 @@ func TestFixedTagsWithIndex(t *testing.T) {
|
|||
So(err, ShouldBeNil)
|
||||
|
||||
// Wait for trivy db to download
|
||||
found, err := test.ReadLogFileAndSearchString(logPath, "DB update completed, next update scheduled", 180*time.Second)
|
||||
found, err := test.ReadLogFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval", 180*time.Second)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
|
|
|
@ -14,10 +14,12 @@ import (
|
|||
func NewScanTaskGenerator(
|
||||
metaDB mTypes.MetaDB,
|
||||
scanner Scanner,
|
||||
log log.Logger,
|
||||
logC log.Logger,
|
||||
) scheduler.TaskGenerator {
|
||||
sublogger := logC.With().Str("component", "cve").Logger()
|
||||
|
||||
return &scanTaskGenerator{
|
||||
log: log,
|
||||
log: log.Logger{Logger: sublogger},
|
||||
metaDB: metaDB,
|
||||
scanner: scanner,
|
||||
lock: &sync.Mutex{},
|
||||
|
@ -126,13 +128,13 @@ func (gen *scanTaskGenerator) Next() (scheduler.Task, error) {
|
|||
if err != nil {
|
||||
// Do not crash the generator for potential metadb inconsistencies
|
||||
// as there may be scannable images not yet scanned
|
||||
gen.log.Warn().Err(err).Msg("Scheduled CVE scan: error while obtaining repo metadata")
|
||||
gen.log.Warn().Err(err).Msg("failed to obtain repo metadata during scheduled cve scan")
|
||||
}
|
||||
|
||||
// no imageMeta are returned, all results are in already in cache
|
||||
// or manifests cannot be scanned
|
||||
if len(imageMeta) == 0 {
|
||||
gen.log.Info().Msg("Scheduled CVE scan: finished for available images")
|
||||
gen.log.Info().Msg("finished scanning available images during scheduled cve scan")
|
||||
|
||||
gen.done = true
|
||||
|
||||
|
@ -185,13 +187,13 @@ func (st *scanTask) DoWork(ctx context.Context) error {
|
|||
// We cache the results internally in the scanner
|
||||
// so we can discard the actual results for now
|
||||
if _, err := st.generator.scanner.ScanImage(ctx, image); err != nil {
|
||||
st.generator.log.Error().Err(err).Str("image", image).Msg("Scheduled CVE scan errored for image")
|
||||
st.generator.log.Error().Err(err).Str("image", image).Msg("failed to perform scheduled cve scan for image")
|
||||
st.generator.addError(st.digest, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
st.generator.log.Debug().Str("image", image).Msg("Scheduled CVE scan completed successfully for image")
|
||||
st.generator.log.Debug().Str("image", image).Msg("scheduled cve scan completed successfully for image")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -440,7 +440,7 @@ func TestScanGeneratorWithMockedData(t *testing.T) { //nolint: gocyclo
|
|||
|
||||
// Make sure the scanner generator has completed despite errors
|
||||
found, err := test.ReadLogFileAndSearchString(logPath,
|
||||
"Scheduled CVE scan: finished for available images", 40*time.Second)
|
||||
"finished scanning available images during scheduled cve scan", 40*time.Second)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
|
@ -462,19 +462,19 @@ func TestScanGeneratorWithMockedData(t *testing.T) { //nolint: gocyclo
|
|||
}
|
||||
|
||||
found, err = test.ReadLogFileAndSearchString(logPath,
|
||||
"Scheduled CVE scan: error while obtaining repo metadata", 20*time.Second)
|
||||
"failed to obtain repo metadata during scheduled cve scan", 20*time.Second)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
// Make sure the scanner generator is catching the scanning error for repo7
|
||||
found, err = test.ReadLogFileAndSearchString(logPath,
|
||||
"Scheduled CVE scan errored for image", 20*time.Second)
|
||||
"failed to perform scheduled cve scan for image", 20*time.Second)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
// Make sure the scanner generator is triggered at least twice
|
||||
found, err = test.ReadLogFileAndCountStringOccurence(logPath,
|
||||
"Scheduled CVE scan: finished for available images", 30*time.Second, 2)
|
||||
"finished scanning available images during scheduled cve scan", 30*time.Second, 2)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
})
|
||||
|
@ -537,7 +537,7 @@ func TestScanGeneratorWithRealData(t *testing.T) {
|
|||
|
||||
// Make sure the scanner generator has completed
|
||||
found, err := test.ReadLogFileAndSearchString(logPath,
|
||||
"Scheduled CVE scan: finished for available images", 120*time.Second)
|
||||
"finished scanning available images during scheduled cve scan", 120*time.Second)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
|
@ -547,7 +547,7 @@ func TestScanGeneratorWithRealData(t *testing.T) {
|
|||
So(found, ShouldBeTrue)
|
||||
|
||||
found, err = test.ReadLogFileAndSearchString(logPath,
|
||||
"Scheduled CVE scan completed successfully for image", 120*time.Second)
|
||||
"scheduled cve scan completed successfully for image", 120*time.Second)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
|
||||
|
|
|
@ -356,7 +356,7 @@ func (scanner Scanner) ScanImage(ctx context.Context, image string) (map[string]
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
scanner.log.Error().Err(err).Str("image", originalImageInput).Msg("unable to scan image")
|
||||
scanner.log.Error().Err(err).Str("image", originalImageInput).Msg("failed to scan image")
|
||||
|
||||
return map[string]cvemodel.CVE{}, err
|
||||
}
|
||||
|
@ -537,16 +537,16 @@ func (scanner Scanner) UpdateDB(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (scanner Scanner) updateDB(ctx context.Context, dbDir string) error {
|
||||
scanner.log.Debug().Str("dbDir", dbDir).Msg("Download Trivy DB to destination dir")
|
||||
scanner.log.Debug().Str("dbDir", dbDir).Msg("download Trivy DB to destination dir")
|
||||
|
||||
registryOpts := fanalTypes.RegistryOptions{Insecure: false}
|
||||
|
||||
scanner.log.Debug().Str("dbDir", dbDir).Msg("Started downloading Trivy DB to destination dir")
|
||||
scanner.log.Debug().Str("dbDir", dbDir).Msg("started downloading trivy-db to destination dir")
|
||||
|
||||
err := operation.DownloadDB(ctx, "dev", dbDir, scanner.dbRepository, false, false, registryOpts)
|
||||
if err != nil {
|
||||
scanner.log.Error().Err(err).Str("dbDir", dbDir).
|
||||
Str("dbRepository", scanner.dbRepository).Msg("Error downloading Trivy DB to destination dir")
|
||||
Str("dbRepository", scanner.dbRepository).Msg("failed to download trivy-db to destination dir")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -556,13 +556,13 @@ func (scanner Scanner) updateDB(ctx context.Context, dbDir string) error {
|
|||
|
||||
if err := javadb.Update(); err != nil {
|
||||
scanner.log.Error().Err(err).Str("dbDir", dbDir).
|
||||
Str("javaDBRepository", scanner.javaDBRepository).Msg("Error downloading Trivy Java DB to destination dir")
|
||||
Str("javaDBRepository", scanner.javaDBRepository).Msg("failed to download trivy-java-db to destination dir")
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
scanner.log.Debug().Str("dbDir", dbDir).Msg("Finished downloading Trivy DB to destination dir")
|
||||
scanner.log.Debug().Str("dbDir", dbDir).Msg("finished downloading trivy-db to destination dir")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ func newDBUpdadeTask(interval time.Duration, scanner Scanner,
|
|||
}
|
||||
|
||||
func (dbt *dbUpdateTask) DoWork(ctx context.Context) error {
|
||||
dbt.log.Info().Msg("updating the CVE database")
|
||||
dbt.log.Info().Msg("updating cve-db")
|
||||
|
||||
err := dbt.scanner.UpdateDB(ctx)
|
||||
if err != nil {
|
||||
|
@ -115,7 +115,8 @@ func (dbt *dbUpdateTask) DoWork(ctx context.Context) error {
|
|||
dbt.generator.lastTaskTime = time.Now()
|
||||
dbt.generator.status = done
|
||||
dbt.generator.lock.Unlock()
|
||||
dbt.log.Info().Str("DB update completed, next update scheduled after", dbt.interval.String()).Msg("")
|
||||
|
||||
dbt.log.Info().Interface("interval", dbt.interval).Msg("cve-db update completed, next update scheduled after interval")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ func TestCVEDBGenerator(t *testing.T) {
|
|||
|
||||
// Wait for trivy db to download
|
||||
found, err := test.ReadLogFileAndCountStringOccurence(logPath,
|
||||
"DB update completed, next update scheduled", 140*time.Second, 2)
|
||||
"cve-db update completed, next update scheduled after interval", 140*time.Second, 2)
|
||||
So(err, ShouldBeNil)
|
||||
So(found, ShouldBeTrue)
|
||||
})
|
||||
|
|
|
@ -339,12 +339,12 @@ func getImageListForCVE(
|
|||
for _, repoMeta := range reposMeta {
|
||||
repo := repoMeta.Name
|
||||
|
||||
log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags affected by CVE")
|
||||
log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags affected by this cve")
|
||||
|
||||
tagsInfo, err := cveInfo.GetImageListForCVE(ctx, repo, cveID)
|
||||
if err != nil {
|
||||
log.Error().Str("repository", repo).Str("CVE", cveID).Err(err).
|
||||
Msg("error getting image list for CVE from repo")
|
||||
Msg("failed to get image list for this cve from repository")
|
||||
|
||||
return &gql_generated.PaginatedImagesResult{}, err
|
||||
}
|
||||
|
@ -412,12 +412,12 @@ func getImageListWithCVEFixed(
|
|||
) (*gql_generated.PaginatedImagesResult, error) {
|
||||
imageList := make([]*gql_generated.ImageSummary, 0)
|
||||
|
||||
log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags where CVE is fixed")
|
||||
log.Info().Str("repository", repo).Str("CVE", cveID).Msg("extracting list of tags where this cve is fixed")
|
||||
|
||||
tagsInfo, err := cveInfo.GetImageListWithCVEFixed(ctx, repo, cveID)
|
||||
if err != nil {
|
||||
log.Error().Str("repository", repo).Str("CVE", cveID).Err(err).
|
||||
Msg("error getting image list with CVE fixed from repo")
|
||||
Msg("failed to get image list with this cve fixed from repository")
|
||||
|
||||
return &gql_generated.PaginatedImagesResult{
|
||||
Page: &gql_generated.PageInfo{},
|
||||
|
@ -790,7 +790,7 @@ func derivedImageList(ctx context.Context, image string, digest *string, metaDB
|
|||
searchedImage, err := getImageSummary(ctx, imageRepo, imageTag, digest, skipReferenceImage, metaDB, cveInfo, log)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrRepoMetaNotFound) {
|
||||
return &gql_generated.PaginatedImagesResult{}, gqlerror.Errorf("repository: not found")
|
||||
return &gql_generated.PaginatedImagesResult{}, gqlerror.Errorf("repository not found")
|
||||
}
|
||||
|
||||
return &gql_generated.PaginatedImagesResult{}, err
|
||||
|
@ -891,7 +891,7 @@ func baseImageList(ctx context.Context, image string, digest *string, metaDB mTy
|
|||
searchedImage, err := getImageSummary(ctx, imageRepo, imageTag, digest, skipReferenceImage, metaDB, cveInfo, log)
|
||||
if err != nil {
|
||||
if errors.Is(err, zerr.ErrRepoMetaNotFound) {
|
||||
return &gql_generated.PaginatedImagesResult{}, gqlerror.Errorf("repository: not found")
|
||||
return &gql_generated.PaginatedImagesResult{}, gqlerror.Errorf("repository not found")
|
||||
}
|
||||
|
||||
return &gql_generated.PaginatedImagesResult{}, err
|
||||
|
@ -963,7 +963,7 @@ func validateGlobalSearchInput(query string, filter *gql_generated.Filter,
|
|||
requestedPage *gql_generated.PageInput,
|
||||
) error {
|
||||
if len(query) > querySizeLimit {
|
||||
return fmt.Errorf("global-search: max string size limit exceeded for query parameter. max=%d current=%d %w",
|
||||
return fmt.Errorf("max string size limit exceeded for query parameter. max=%d current=%d %w",
|
||||
querySizeLimit, len(query), zerr.ErrInvalidRequestParams)
|
||||
}
|
||||
|
||||
|
@ -987,14 +987,14 @@ func checkFilter(filter *gql_generated.Filter) error {
|
|||
|
||||
for _, arch := range filter.Arch {
|
||||
if len(*arch) > querySizeLimit {
|
||||
return fmt.Errorf("global-search: max string size limit exceeded for arch parameter. max=%d current=%d %w",
|
||||
return fmt.Errorf("max string size limit exceeded for arch parameter. max=%d current=%d %w",
|
||||
querySizeLimit, len(*arch), zerr.ErrInvalidRequestParams)
|
||||
}
|
||||
}
|
||||
|
||||
for _, osSys := range filter.Os {
|
||||
if len(*osSys) > querySizeLimit {
|
||||
return fmt.Errorf("global-search: max string size limit exceeded for os parameter. max=%d current=%d %w",
|
||||
return fmt.Errorf("max string size limit exceeded for os parameter. max=%d current=%d %w",
|
||||
querySizeLimit, len(*osSys), zerr.ErrInvalidRequestParams)
|
||||
}
|
||||
}
|
||||
|
@ -1008,12 +1008,12 @@ func checkRequestedPage(requestedPage *gql_generated.PageInput) error {
|
|||
}
|
||||
|
||||
if requestedPage.Limit != nil && *requestedPage.Limit < 0 {
|
||||
return fmt.Errorf("global-search: requested page limit parameter can't be negative %w",
|
||||
return fmt.Errorf("requested page limit parameter can't be negative %w",
|
||||
zerr.ErrInvalidRequestParams)
|
||||
}
|
||||
|
||||
if requestedPage.Offset != nil && *requestedPage.Offset < 0 {
|
||||
return fmt.Errorf("global-search: requested page offset parameter can't be negative %w",
|
||||
return fmt.Errorf("requested page offset parameter can't be negative %w",
|
||||
zerr.ErrInvalidRequestParams)
|
||||
}
|
||||
|
||||
|
@ -1081,14 +1081,16 @@ func deleteElementAt(slice []*string, i int) []*string {
|
|||
func expandedRepoInfo(ctx context.Context, repo string, metaDB mTypes.MetaDB, cveInfo cveinfo.CveInfo, log log.Logger,
|
||||
) (*gql_generated.RepoInfo, error) {
|
||||
if ok, err := reqCtx.RepoIsUserAvailable(ctx, repo); !ok || err != nil {
|
||||
log.Info().Err(err).Str("repository", repo).Bool("availability", ok).Msg("resolver: repo user availability")
|
||||
log.Info().Err(err).Str("repository", repo).Bool("availability", ok).Str("component", "graphql").
|
||||
Msg("repo user availability")
|
||||
|
||||
return &gql_generated.RepoInfo{}, nil //nolint:nilerr // don't give details to a potential attacker
|
||||
}
|
||||
|
||||
repoMeta, err := metaDB.GetRepoMeta(ctx, repo)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Msg("resolver: can't retrieve repoMeta for repo")
|
||||
log.Error().Err(err).Str("repository", repo).Str("component", "graphql").
|
||||
Msg("can't retrieve repoMeta for repository")
|
||||
|
||||
return &gql_generated.RepoInfo{}, err
|
||||
}
|
||||
|
@ -1105,7 +1107,8 @@ func expandedRepoInfo(ctx context.Context, repo string, metaDB mTypes.MetaDB, cv
|
|||
|
||||
imageMetaMap, err := metaDB.FilterImageMeta(ctx, tagsDigests)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Msg("resolver: can't retrieve imageMeta for repo")
|
||||
log.Error().Err(err).Str("repository", repo).Str("component", "graphql").
|
||||
Msg("can't retrieve imageMeta for repo")
|
||||
|
||||
return &gql_generated.RepoInfo{}, err
|
||||
}
|
||||
|
@ -1202,9 +1205,10 @@ func getReferrers(metaDB mTypes.MetaDB, repo string, referredDigest string, arti
|
|||
) ([]*gql_generated.Referrer, error) {
|
||||
refDigest := godigest.Digest(referredDigest)
|
||||
if err := refDigest.Validate(); err != nil {
|
||||
log.Error().Err(err).Str("digest", referredDigest).Msg("graphql: bad referenced digest string from request")
|
||||
log.Error().Err(err).Str("digest", referredDigest).Str("component", "graphql").
|
||||
Msg("bad referenced digest string from request")
|
||||
|
||||
return []*gql_generated.Referrer{}, fmt.Errorf("graphql: bad digest string from request '%s' %w",
|
||||
return []*gql_generated.Referrer{}, fmt.Errorf("bad digest string from request '%s' %w",
|
||||
referredDigest, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -56,11 +56,11 @@ func (r *queryResolver) ImageListForDigest(ctx context.Context, id string, reque
|
|||
|
||||
// RepoListWithNewestImage is the resolver for the RepoListWithNewestImage field.
|
||||
func (r *queryResolver) RepoListWithNewestImage(ctx context.Context, requestedPage *gql_generated.PageInput) (*gql_generated.PaginatedReposResult, error) {
|
||||
r.log.Info().Msg("extension api: finding image list")
|
||||
r.log.Info().Str("component", "graphql").Msg("finding image list")
|
||||
|
||||
paginatedReposResult, err := repoListWithNewestImage(ctx, r.cveInfo, r.log, requestedPage, r.metaDB)
|
||||
if err != nil {
|
||||
r.log.Error().Err(err).Msg("unable to retrieve repo list")
|
||||
r.log.Error().Err(err).Msg("failed to retrieve repo list")
|
||||
|
||||
return paginatedReposResult, err
|
||||
}
|
||||
|
@ -70,11 +70,11 @@ func (r *queryResolver) RepoListWithNewestImage(ctx context.Context, requestedPa
|
|||
|
||||
// ImageList is the resolver for the ImageList field.
|
||||
func (r *queryResolver) ImageList(ctx context.Context, repo string, requestedPage *gql_generated.PageInput) (*gql_generated.PaginatedImagesResult, error) {
|
||||
r.log.Info().Msg("extension api: getting a list of all images")
|
||||
r.log.Info().Str("component", "graphql").Msg("getting a list of all images")
|
||||
|
||||
imageList, err := getImageList(ctx, repo, r.metaDB, r.cveInfo, requestedPage, r.log)
|
||||
if err != nil {
|
||||
r.log.Error().Err(err).Str("repository", repo).Msg("unable to retrieve image list for repo")
|
||||
r.log.Error().Err(err).Str("repository", repo).Msg("failed to retrieve image list for repo")
|
||||
|
||||
return imageList, err
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ func (r *queryResolver) Image(ctx context.Context, image string) (*gql_generated
|
|||
func (r *queryResolver) Referrers(ctx context.Context, repo string, digest string, typeArg []string) ([]*gql_generated.Referrer, error) {
|
||||
referrers, err := getReferrers(r.metaDB, repo, digest, typeArg, r.log)
|
||||
if err != nil {
|
||||
r.log.Error().Err(err).Msg("unable to get referrers from default store")
|
||||
r.log.Error().Err(err).Msg("failed to get referrers from default store")
|
||||
|
||||
return []*gql_generated.Referrer{}, err
|
||||
}
|
||||
|
|
|
@ -703,11 +703,12 @@ func TestRepoListWithNewestImage(t *testing.T) {
|
|||
So(found, ShouldBeTrue)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
found, err = readFileAndSearchString(logPath, "updating the CVE database", 2*time.Minute)
|
||||
found, err = readFileAndSearchString(logPath, "updating cve-db", 2*time.Minute)
|
||||
So(found, ShouldBeTrue)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
found, err = readFileAndSearchString(logPath, "DB update completed, next update scheduled", 4*time.Minute)
|
||||
found, err = readFileAndSearchString(logPath,
|
||||
"cve-db update completed, next update scheduled after interval", 4*time.Minute)
|
||||
So(found, ShouldBeTrue)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
|
@ -2096,7 +2097,7 @@ func TestDerivedImageList(t *testing.T) {
|
|||
}`
|
||||
|
||||
resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query))
|
||||
So(strings.Contains(string(resp.Body()), "repository: not found"), ShouldBeTrue)
|
||||
So(string(resp.Body()), ShouldContainSubstring, "repository not found")
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
|
@ -2179,7 +2180,7 @@ func TestDerivedImageListNoRepos(t *testing.T) {
|
|||
So(err, ShouldBeNil)
|
||||
So(resp.StatusCode(), ShouldEqual, 200)
|
||||
|
||||
So(strings.Contains(string(resp.Body()), "repository: not found"), ShouldBeTrue)
|
||||
So(string(resp.Body()), ShouldContainSubstring, "repository not found")
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
}
|
||||
|
@ -2742,7 +2743,7 @@ func TestBaseImageList(t *testing.T) {
|
|||
}`
|
||||
|
||||
resp, err := resty.R().Get(baseURL + graphqlQueryPrefix + "?query=" + url.QueryEscape(query))
|
||||
So(strings.Contains(string(resp.Body()), "repository: not found"), ShouldBeTrue)
|
||||
So(string(resp.Body()), ShouldContainSubstring, "repository not found")
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
|
@ -3395,11 +3396,12 @@ func TestGlobalSearch(t *testing.T) {
|
|||
So(found, ShouldBeTrue)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
found, err = readFileAndSearchString(logPath, "updating the CVE database", 2*time.Minute)
|
||||
found, err = readFileAndSearchString(logPath, "updating cve-db", 2*time.Minute)
|
||||
So(found, ShouldBeTrue)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
found, err = readFileAndSearchString(logPath, "DB update completed, next update scheduled", 4*time.Minute)
|
||||
found, err = readFileAndSearchString(logPath, "cve-db update completed, next update scheduled after interval",
|
||||
4*time.Minute)
|
||||
So(found, ShouldBeTrue)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
|
@ -6118,7 +6120,7 @@ func TestImageSummary(t *testing.T) {
|
|||
|
||||
So(len(imgSummaryResponse.Errors), ShouldEqual, 1)
|
||||
So(imgSummaryResponse.Errors[0].Message,
|
||||
ShouldContainSubstring, "metadb: repo metadata not found for given repo name")
|
||||
ShouldContainSubstring, "repo metadata not found for given repo name")
|
||||
|
||||
t.Log("starting Test retrieve image with bad tag")
|
||||
// gql is parametrized with the repo.
|
||||
|
|
|
@ -113,7 +113,7 @@ func (cm ContentManager) getContentByUpstreamRepo(repo string) *syncconf.Content
|
|||
if err != nil {
|
||||
cm.log.Error().Str("errorType", common.TypeOf(err)).
|
||||
Err(err).Str("pattern",
|
||||
prefix).Msg("error while parsing glob pattern, skipping it...")
|
||||
prefix).Msg("failed to parse glob pattern, skipping it")
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ func filterTagsByRegex(tags []string, regex string, log log.Logger) ([]string, e
|
|||
|
||||
tagReg, err := regexp.Compile(regex)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("regex", regex).Msg("couldn't compile regex")
|
||||
log.Error().Err(err).Str("regex", regex).Msg("failed to compile regex")
|
||||
|
||||
return filteredTags, err
|
||||
}
|
||||
|
|
|
@ -173,10 +173,11 @@ func (registry *DestinationRegistry) CommitImage(imageReference types.ImageRefer
|
|||
err = meta.SetImageMetaFromInput(context.Background(), repo, reference, mediaType,
|
||||
manifestDigest, manifestBlob, imageStore, registry.metaDB, registry.log)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metaDB: failed to set metadata for image '%s %s': %w", repo, reference, err)
|
||||
return fmt.Errorf("failed to set metadata for image '%s %s': %w", repo, reference, err)
|
||||
}
|
||||
|
||||
registry.log.Debug().Str("repo", repo).Str("reference", reference).Msg("metaDB: successfully set metadata for image")
|
||||
registry.log.Debug().Str("repo", repo).Str("reference", reference).Str("component", "metadb").
|
||||
Msg("successfully set metadata for image")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -87,8 +87,8 @@ func (httpClient *Client) Ping() bool {
|
|||
|
||||
resp, err := httpClient.client.Do(req)
|
||||
if err != nil {
|
||||
httpClient.log.Error().Err(err).Str("url", pingURL.String()).
|
||||
Msg("sync: failed to ping registry")
|
||||
httpClient.log.Error().Err(err).Str("url", pingURL.String()).Str("component", "sync").
|
||||
Msg("failed to ping registry")
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ func (httpClient *Client) Ping() bool {
|
|||
}
|
||||
|
||||
httpClient.log.Error().Str("url", pingURL.String()).Str("body", string(body)).Int("statusCode", resp.StatusCode).
|
||||
Msg("sync: failed to ping registry")
|
||||
Str("component", "sync").Msg("failed to ping registry")
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ func (onDemand *BaseOnDemand) syncImage(ctx context.Context, repo, reference str
|
|||
}()
|
||||
|
||||
onDemand.log.Info().Str("repo", repo).Str(reference, "reference").Str("err", err.Error()).
|
||||
Msg("sync routine: starting routine to copy image, because of error")
|
||||
Str("component", "sync").Msg("starting routine to copy image, because of error")
|
||||
|
||||
time.Sleep(retryOptions.Delay)
|
||||
|
||||
|
@ -159,7 +159,7 @@ func (onDemand *BaseOnDemand) syncImage(ctx context.Context, repo, reference str
|
|||
return err
|
||||
}, retryOptions); err != nil {
|
||||
onDemand.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).Str("reference", reference).
|
||||
Err(err).Msg("sync routine: error while copying image")
|
||||
Err(err).Str("component", "sync").Msg("failed to copy image")
|
||||
}
|
||||
}(service)
|
||||
}
|
||||
|
|
|
@ -150,8 +150,8 @@ func (ref CosignReference) SyncReferences(ctx context.Context, localRepo, remote
|
|||
Msg("successfully synced cosign reference for image")
|
||||
|
||||
if ref.metaDB != nil {
|
||||
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
|
||||
Msg("metaDB: trying to sync cosign reference for image")
|
||||
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).Str("component", "metadb").
|
||||
Msg("trying to sync cosign reference for image")
|
||||
|
||||
err = meta.SetImageMetaFromInput(ctx, localRepo, cosignTag, ispec.MediaTypeImageManifest,
|
||||
referenceDigest, manifestBuf, ref.storeController.GetImageStore(localRepo),
|
||||
|
@ -162,8 +162,8 @@ func (ref CosignReference) SyncReferences(ctx context.Context, localRepo, remote
|
|||
localRepo, subjectDigestStr, err)
|
||||
}
|
||||
|
||||
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
|
||||
Msg("metaDB: successfully added cosign reference for image")
|
||||
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).Str("component", "metadb").
|
||||
Msg("successfully added cosign reference for image")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -134,8 +134,8 @@ func (ref OciReferences) SyncReferences(ctx context.Context, localRepo, remoteRe
|
|||
refsDigests = append(refsDigests, referenceDigest)
|
||||
|
||||
if ref.metaDB != nil {
|
||||
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
|
||||
Msg("metaDB: trying to add oci references for image")
|
||||
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).Str("component", "metadb").
|
||||
Msg("trying to add oci references for image")
|
||||
|
||||
err = meta.SetImageMetaFromInput(ctx, localRepo, referenceDigest.String(), referrer.MediaType,
|
||||
referenceDigest, referenceBuf, ref.storeController.GetImageStore(localRepo),
|
||||
|
@ -145,8 +145,8 @@ func (ref OciReferences) SyncReferences(ctx context.Context, localRepo, remoteRe
|
|||
localRepo, subjectDigestStr, err)
|
||||
}
|
||||
|
||||
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
|
||||
Msg("metaDB: successfully added oci references to MetaDB for image")
|
||||
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).Str("component", "metadb").
|
||||
Msg("successfully added oci references to MetaDB for image")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -151,20 +151,20 @@ func (ref ORASReferences) SyncReferences(ctx context.Context, localRepo, remoteR
|
|||
refsDigests = append(refsDigests, referenceDigest)
|
||||
|
||||
if ref.metaDB != nil {
|
||||
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
|
||||
Msg("metaDB: trying to sync oras artifact for image")
|
||||
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).Str("component", "metadb").
|
||||
Msg("trying to sync oras artifact for image")
|
||||
|
||||
err := meta.SetImageMetaFromInput(context.Background(), localRepo, //nolint:contextcheck
|
||||
referenceDigest.String(), referrer.MediaType,
|
||||
referenceDigest, orasBuf, ref.storeController.GetImageStore(localRepo),
|
||||
ref.metaDB, ref.log)
|
||||
if err != nil {
|
||||
return refsDigests, fmt.Errorf("metaDB: failed to set metadata for oras artifact '%s@%s': %w",
|
||||
return refsDigests, fmt.Errorf("failed to set metadata in db for oras artifact '%s@%s': %w",
|
||||
localRepo, subjectDigestStr, err)
|
||||
}
|
||||
|
||||
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
|
||||
Msg("metaDB: successfully added oras artifacts to MetaDB for image")
|
||||
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).Str("component", "metadb").
|
||||
Msg("successfully added oras artifacts to MetaDB for image")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -110,8 +110,8 @@ func (ref TagReferences) SyncReferences(ctx context.Context, localRepo, remoteRe
|
|||
refsDigests = append(refsDigests, referenceDigest)
|
||||
|
||||
if ref.metaDB != nil {
|
||||
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).
|
||||
Msg("metaDB: trying to add oci references for image")
|
||||
ref.log.Debug().Str("repository", localRepo).Str("subject", subjectDigestStr).Str("component", "metadb").
|
||||
Msg("trying to add oci references for image")
|
||||
|
||||
err = meta.SetImageMetaFromInput(ctx, localRepo, referenceDigest.String(), referrer.MediaType,
|
||||
referenceDigest, referenceBuf, ref.storeController.GetImageStore(localRepo),
|
||||
|
@ -121,8 +121,8 @@ func (ref TagReferences) SyncReferences(ctx context.Context, localRepo, remoteRe
|
|||
localRepo, subjectDigestStr, err)
|
||||
}
|
||||
|
||||
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).
|
||||
Msg("metaDB: successfully added oci references to MetaDB for image")
|
||||
ref.log.Info().Str("repository", localRepo).Str("subject", subjectDigestStr).Str("component", "metadb").
|
||||
Msg("successfully added oci references to MetaDB for image")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ func (service *BaseService) SetNextAvailableClient() error {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
service.log.Error().Err(err).Str("url", url).Msg("sync: failed to initialize http client")
|
||||
service.log.Error().Err(err).Str("url", url).Msg("failed to initialize http client")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ func (service *BaseService) GetNextRepo(lastRepo string) (string, error) {
|
|||
return err
|
||||
}, service.retryOptions); err != nil {
|
||||
service.log.Error().Str("errorType", common.TypeOf(err)).Str("remote registry", service.client.GetConfig().URL).
|
||||
Err(err).Msg("error while getting repositories from remote registry")
|
||||
Err(err).Msg("failed to get repository list from remote registry")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
@ -234,15 +234,15 @@ func (service *BaseService) SyncReference(ctx context.Context, repo string,
|
|||
if len(service.config.Content) > 0 {
|
||||
remoteRepo = service.contentManager.GetRepoSource(repo)
|
||||
if remoteRepo == "" {
|
||||
service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("subject", subjectDigestStr).
|
||||
service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("subject", subjectDigestStr).
|
||||
Str("reference type", referenceType).Msg("will not sync reference for image, filtered out by content")
|
||||
|
||||
return zerr.ErrSyncImageFilteredOut
|
||||
}
|
||||
}
|
||||
|
||||
service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("subject", subjectDigestStr).
|
||||
Str("reference type", referenceType).Msg("sync: syncing reference for image")
|
||||
service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("subject", subjectDigestStr).
|
||||
Str("reference type", referenceType).Msg("syncing reference for image")
|
||||
|
||||
return service.references.SyncReference(ctx, repo, remoteRepo, subjectDigestStr, referenceType)
|
||||
}
|
||||
|
@ -256,15 +256,15 @@ func (service *BaseService) SyncImage(ctx context.Context, repo, reference strin
|
|||
if len(service.config.Content) > 0 {
|
||||
remoteRepo = service.contentManager.GetRepoSource(repo)
|
||||
if remoteRepo == "" {
|
||||
service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("reference", reference).
|
||||
service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("reference", reference).
|
||||
Msg("will not sync image, filtered out by content")
|
||||
|
||||
return zerr.ErrSyncImageFilteredOut
|
||||
}
|
||||
}
|
||||
|
||||
service.log.Info().Str("remote", remoteURL).Str("repo", repo).Str("reference", reference).
|
||||
Msg("sync: syncing image")
|
||||
service.log.Info().Str("remote", remoteURL).Str("repository", repo).Str("reference", reference).
|
||||
Msg("syncing image")
|
||||
|
||||
manifestDigest, err := service.syncTag(ctx, repo, remoteRepo, reference)
|
||||
if err != nil {
|
||||
|
@ -281,8 +281,8 @@ func (service *BaseService) SyncImage(ctx context.Context, repo, reference strin
|
|||
|
||||
// sync repo periodically.
|
||||
func (service *BaseService) SyncRepo(ctx context.Context, repo string) error {
|
||||
service.log.Info().Str("repo", repo).Str("registry", service.client.GetConfig().URL).
|
||||
Msg("sync: syncing repo")
|
||||
service.log.Info().Str("repository", repo).Str("registry", service.client.GetConfig().URL).
|
||||
Msg("syncing repo")
|
||||
|
||||
var err error
|
||||
|
||||
|
@ -293,8 +293,8 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error {
|
|||
|
||||
return err
|
||||
}, service.retryOptions); err != nil {
|
||||
service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).
|
||||
Err(err).Msg("error while getting tags for repo")
|
||||
service.log.Error().Str("errorType", common.TypeOf(err)).Str("repository", repo).
|
||||
Err(err).Msg("failed to get tags for repository")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -305,7 +305,7 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
service.log.Info().Str("repo", repo).Msgf("sync: syncing tags %v", tags)
|
||||
service.log.Info().Str("repository", repo).Msgf("syncing tags %v", tags)
|
||||
|
||||
// apply content.destination rule
|
||||
destinationRepo := service.contentManager.GetRepoDestination(repo)
|
||||
|
@ -331,8 +331,8 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error {
|
|||
continue
|
||||
}
|
||||
|
||||
service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).
|
||||
Err(err).Msg("error while syncing tags for repo")
|
||||
service.log.Error().Str("errorType", common.TypeOf(err)).Str("repository", repo).
|
||||
Err(err).Msg("failed to sync tags for repository")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -346,15 +346,15 @@ func (service *BaseService) SyncRepo(ctx context.Context, repo string) error {
|
|||
|
||||
return err
|
||||
}, service.retryOptions); err != nil {
|
||||
service.log.Error().Str("errorType", common.TypeOf(err)).Str("repo", repo).
|
||||
Err(err).Msg("error while syncing tags for repo")
|
||||
service.log.Error().Str("errorType", common.TypeOf(err)).Str("repository", repo).
|
||||
Err(err).Msg("failed to sync tags for repository")
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
service.log.Info().Str("repo", repo).Msg("sync: finished syncing repo")
|
||||
service.log.Info().Str("component", "sync").Str("repository", repo).Msg("finished syncing repository")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -375,14 +375,14 @@ func (service *BaseService) syncTag(ctx context.Context, destinationRepo, remote
|
|||
remoteImageRef, err := service.remote.GetImageReference(remoteRepo, tag)
|
||||
if err != nil {
|
||||
service.log.Error().Err(err).Str("errortype", common.TypeOf(err)).
|
||||
Str("repo", remoteRepo).Str("reference", tag).Msg("couldn't get a remote image reference")
|
||||
Str("repository", remoteRepo).Str("reference", tag).Msg("couldn't get a remote image reference")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, mediaType, manifestDigest, err := service.remote.GetManifestContent(remoteImageRef)
|
||||
if err != nil {
|
||||
service.log.Error().Err(err).Str("repo", remoteRepo).Str("reference", tag).
|
||||
service.log.Error().Err(err).Str("repository", remoteRepo).Str("reference", tag).
|
||||
Msg("couldn't get upstream image manifest details")
|
||||
|
||||
return "", err
|
||||
|
@ -407,7 +407,7 @@ func (service *BaseService) syncTag(ctx context.Context, destinationRepo, remote
|
|||
skipImage, err := service.destination.CanSkipImage(destinationRepo, tag, manifestDigest)
|
||||
if err != nil {
|
||||
service.log.Error().Err(err).Str("errortype", common.TypeOf(err)).
|
||||
Str("repo", destinationRepo).Str("reference", tag).
|
||||
Str("repository", destinationRepo).Str("reference", tag).
|
||||
Msg("couldn't check if the local image can be skipped")
|
||||
}
|
||||
|
||||
|
@ -415,7 +415,7 @@ func (service *BaseService) syncTag(ctx context.Context, destinationRepo, remote
|
|||
localImageRef, err := service.destination.GetImageReference(destinationRepo, tag)
|
||||
if err != nil {
|
||||
service.log.Error().Err(err).Str("errortype", common.TypeOf(err)).
|
||||
Str("repo", destinationRepo).Str("reference", tag).Msg("couldn't get a local image reference")
|
||||
Str("repository", destinationRepo).Str("reference", tag).Msg("couldn't get a local image reference")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
@ -435,7 +435,7 @@ func (service *BaseService) syncTag(ctx context.Context, destinationRepo, remote
|
|||
err = service.destination.CommitImage(localImageRef, destinationRepo, tag)
|
||||
if err != nil {
|
||||
service.log.Error().Err(err).Str("errortype", common.TypeOf(err)).
|
||||
Str("repo", destinationRepo).Str("reference", tag).Msg("couldn't commit image to local image store")
|
||||
Str("repository", destinationRepo).Str("reference", tag).Msg("couldn't commit image to local image store")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
@ -444,7 +444,8 @@ func (service *BaseService) syncTag(ctx context.Context, destinationRepo, remote
|
|||
Msg("skipping image because it's already synced")
|
||||
}
|
||||
|
||||
service.log.Info().Str("image", remoteImageRef.DockerReference().String()).Msg("sync: finished syncing image")
|
||||
service.log.Info().Str("component", "sync").
|
||||
Str("image", remoteImageRef.DockerReference().String()).Msg("finished syncing image")
|
||||
|
||||
return manifestDigest, nil
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ func (gen *TaskGenerator) Next() (scheduler.Task, error) {
|
|||
}
|
||||
|
||||
if repo == "" {
|
||||
gen.log.Info().Msg("sync: finished syncing all repos")
|
||||
gen.log.Info().Str("component", "sync").Msg("finished syncing all repos")
|
||||
gen.done = true
|
||||
|
||||
return nil, nil
|
||||
|
|
|
@ -2091,7 +2091,7 @@ func TestConfigReloader(t *testing.T) {
|
|||
data, err := os.ReadFile(logFile.Name())
|
||||
t.Logf("downstream log: %s", string(data))
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring, "unable to start sync extension")
|
||||
So(string(data), ShouldContainSubstring, "failed to start sync extension")
|
||||
So(string(data), ShouldContainSubstring, "\"TLSVerify\":false")
|
||||
So(string(data), ShouldContainSubstring, "\"OnDemand\":false")
|
||||
})
|
||||
|
@ -2852,7 +2852,7 @@ func TestInvalidRegex(t *testing.T) {
|
|||
defer dcm.StopServer()
|
||||
|
||||
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
|
||||
"couldn't compile regex", 15*time.Second)
|
||||
"failed to compile regex", 15*time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -4974,7 +4974,7 @@ func TestOnDemandRetryGoroutineErr(t *testing.T) {
|
|||
So(resp.StatusCode(), ShouldEqual, http.StatusNotFound)
|
||||
|
||||
found, err := test.ReadLogFileAndSearchString(dctlr.Config.Log.Output,
|
||||
"sync routine: error while copying image", 15*time.Second)
|
||||
"failed to copy image", 15*time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
@ -61,3 +61,5 @@ For example, lookup a cache (fast path) and it throws a not-found error, and we
|
|||
expect to handle it and perform a slow path lookup. Instead of logging the
|
||||
lookup failure at ERROR level, it may be more appropriate to log at DEBUG level
|
||||
and then handle the error.
|
||||
|
||||
Also, instead of `Msg("error at something")` standardize on `Msg("failed at something")`.
|
||||
|
|
|
@ -22,7 +22,7 @@ type Logger struct {
|
|||
}
|
||||
|
||||
func (l Logger) Println(v ...interface{}) {
|
||||
l.Logger.Error().Msg("panic recovered")
|
||||
l.Logger.Error().Msg("panic recovered") //nolint: check-logs
|
||||
}
|
||||
|
||||
func NewLogger(level, output string) Logger {
|
||||
|
|
|
@ -154,12 +154,12 @@ func (bdw *BoltDB) SetImageMeta(digest godigest.Digest, imageMeta mTypes.ImageMe
|
|||
|
||||
pImageMetaBlob, err := proto.Marshal(protoImageMeta)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metadb: error while calculating blob for manifest with digest %s %w", digest, err)
|
||||
return fmt.Errorf("failed to calculate blob for manifest with digest %s %w", digest, err)
|
||||
}
|
||||
|
||||
err = buck.Put([]byte(digest), pImageMetaBlob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metadb: error while setting manifest data with for digest %s %w", digest, err)
|
||||
return fmt.Errorf("failed to set manifest data with for digest %s %w", digest, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -494,7 +494,7 @@ func (bdw *BoltDB) SearchTags(ctx context.Context, searchText string,
|
|||
searchedRepo, searchedTag, err := common.GetRepoTag(searchText)
|
||||
if err != nil {
|
||||
return []mTypes.FullImageMeta{},
|
||||
fmt.Errorf("metadb: error while parsing search text, invalid format %w", err)
|
||||
fmt.Errorf("failed to parse search text, invalid format %w", err)
|
||||
}
|
||||
|
||||
err = bdw.DB.View(func(transaction *bbolt.Tx) error {
|
||||
|
@ -538,7 +538,7 @@ func (bdw *BoltDB) SearchTags(ctx context.Context, searchText string,
|
|||
|
||||
imageManifestData, err := getProtoImageMeta(imageBuck, manifestDigest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metadb: error fetching manifest meta for manifest with digest %s %w",
|
||||
return fmt.Errorf("failed to fetch manifest meta for manifest with digest %s %w",
|
||||
manifestDigest, err)
|
||||
}
|
||||
|
||||
|
@ -548,7 +548,7 @@ func (bdw *BoltDB) SearchTags(ctx context.Context, searchText string,
|
|||
|
||||
imageIndexData, err := getProtoImageMeta(imageBuck, indexDigest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metadb: error fetching manifest meta for manifest with digest %s %w",
|
||||
return fmt.Errorf("failed to fetch manifest meta for manifest with digest %s %w",
|
||||
indexDigest, err)
|
||||
}
|
||||
|
||||
|
@ -567,7 +567,7 @@ func (bdw *BoltDB) SearchTags(ctx context.Context, searchText string,
|
|||
|
||||
protoImageMeta = imageIndexData
|
||||
default:
|
||||
bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type")
|
||||
bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type")
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -673,7 +673,7 @@ func (bdw *BoltDB) FilterTags(ctx context.Context, filterRepoTag mTypes.FilterRe
|
|||
images = append(images, mConvert.GetFullImageMetaFromProto(tag, protoRepoMeta, protoImageIndexMeta))
|
||||
}
|
||||
default:
|
||||
bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type")
|
||||
bdw.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type")
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -997,7 +997,7 @@ func (bdw *BoltDB) DeleteSignature(repo string, signedManifestDigest godigest.Di
|
|||
|
||||
repoMetaBlob := repoMetaBuck.Get([]byte(repo))
|
||||
if len(repoMetaBlob) == 0 {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
return zerr.ErrImageMetaNotFound
|
||||
}
|
||||
|
||||
protoRepoMeta, err := unmarshalProtoRepoMeta(repo, repoMetaBlob)
|
||||
|
@ -1007,7 +1007,7 @@ func (bdw *BoltDB) DeleteSignature(repo string, signedManifestDigest godigest.Di
|
|||
|
||||
manifestSignatures, found := protoRepoMeta.Signatures[signedManifestDigest.String()]
|
||||
if !found {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
return zerr.ErrImageMetaNotFound
|
||||
}
|
||||
|
||||
signatureSlice := manifestSignatures.Map[sigMeta.SignatureType]
|
||||
|
@ -1209,7 +1209,7 @@ func (bdw *BoltDB) UpdateStatsOnDownload(repo string, reference string) error {
|
|||
descriptor, found := protoRepoMeta.Tags[reference]
|
||||
|
||||
if !found {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
return zerr.ErrImageMetaNotFound
|
||||
}
|
||||
|
||||
manifestDigest = descriptor.Digest
|
||||
|
@ -1217,7 +1217,7 @@ func (bdw *BoltDB) UpdateStatsOnDownload(repo string, reference string) error {
|
|||
|
||||
manifestStatistics, ok := protoRepoMeta.Statistics[manifestDigest]
|
||||
if !ok {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
return zerr.ErrImageMetaNotFound
|
||||
}
|
||||
|
||||
manifestStatistics.DownloadCount++
|
||||
|
@ -1817,7 +1817,7 @@ func (bdw *BoltDB) AddUserAPIKey(ctx context.Context, hashedKey string, apiKeyDe
|
|||
|
||||
err := apiKeysbuck.Put([]byte(hashedKey), []byte(userid))
|
||||
if err != nil {
|
||||
return fmt.Errorf("metaDB: error while setting userData for identity %s %w", userid, err)
|
||||
return fmt.Errorf("failed to set userData for identity %s %w", userid, err)
|
||||
}
|
||||
|
||||
err = bdw.getUserData(userid, transaction, &userData)
|
||||
|
@ -1870,7 +1870,7 @@ func (bdw *BoltDB) DeleteUserAPIKey(ctx context.Context, keyID string) error {
|
|||
|
||||
err := apiKeysbuck.Delete([]byte(hash))
|
||||
if err != nil {
|
||||
return fmt.Errorf("userDB: error while deleting userAPIKey entry for hash %s %w", hash, err)
|
||||
return fmt.Errorf("failed to delete userAPIKey entry for hash %s %w", hash, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1975,7 +1975,7 @@ func (bdw *BoltDB) setUserData(userid string, transaction *bbolt.Tx, userData mT
|
|||
|
||||
err = buck.Put([]byte(userid), upBlob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metaDB: error while setting userData for identity %s %w", userid, err)
|
||||
return fmt.Errorf("failed to set userData for identity %s %w", userid, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -2001,7 +2001,7 @@ func (bdw *BoltDB) DeleteUserData(ctx context.Context) error {
|
|||
|
||||
err := buck.Delete([]byte(userid))
|
||||
if err != nil {
|
||||
return fmt.Errorf("metaDB: error while deleting userData for identity %s %w", userid, err)
|
||||
return fmt.Errorf("failed to delete userData for identity %s %w", userid, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -592,7 +592,7 @@ func (dwr *DynamoDB) SearchTags(ctx context.Context, searchText string) ([]mType
|
|||
searchedRepo, searchedTag, err := common.GetRepoTag(searchText)
|
||||
if err != nil {
|
||||
return []mTypes.FullImageMeta{},
|
||||
fmt.Errorf("metadb: error while parsing search text, invalid format %w", err)
|
||||
fmt.Errorf("failed to parse search text, invalid format %w", err)
|
||||
}
|
||||
|
||||
if ok, err := reqCtx.RepoIsUserAvailable(ctx, searchedRepo); !ok || err != nil {
|
||||
|
@ -627,7 +627,7 @@ func (dwr *DynamoDB) SearchTags(ctx context.Context, searchText string) ([]mType
|
|||
imageManifestData, err := dwr.GetProtoImageMeta(ctx, godigest.Digest(manifestDigest))
|
||||
if err != nil {
|
||||
return []mTypes.FullImageMeta{},
|
||||
fmt.Errorf("metadb: error fetching manifest meta for manifest with digest %s %w", manifestDigest, err)
|
||||
fmt.Errorf("error fetching manifest meta for manifest with digest %s %w", manifestDigest, err)
|
||||
}
|
||||
|
||||
protoImageMeta = imageManifestData
|
||||
|
@ -637,7 +637,7 @@ func (dwr *DynamoDB) SearchTags(ctx context.Context, searchText string) ([]mType
|
|||
imageIndexData, err := dwr.GetProtoImageMeta(ctx, indexDigest)
|
||||
if err != nil {
|
||||
return []mTypes.FullImageMeta{},
|
||||
fmt.Errorf("metadb: error fetching manifest meta for manifest with digest %s %w", indexDigest, err)
|
||||
fmt.Errorf("error fetching manifest meta for manifest with digest %s %w", indexDigest, err)
|
||||
}
|
||||
|
||||
manifestDataList := make([]*proto_go.ManifestMeta, 0, len(imageIndexData.Index.Index.Manifests))
|
||||
|
@ -657,7 +657,7 @@ func (dwr *DynamoDB) SearchTags(ctx context.Context, searchText string) ([]mType
|
|||
|
||||
protoImageMeta = imageIndexData
|
||||
default:
|
||||
dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type")
|
||||
dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type")
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -763,7 +763,7 @@ func (dwr *DynamoDB) FilterTags(ctx context.Context, filterRepoTag mTypes.Filter
|
|||
images = append(images, mConvert.GetFullImageMetaFromProto(tag, protoRepoMeta, protoImageIndexMeta))
|
||||
}
|
||||
default:
|
||||
dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("Unsupported media type")
|
||||
dwr.Log.Error().Str("mediaType", descriptor.MediaType).Msg("unsupported media type")
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -1112,7 +1112,7 @@ func (dwr *DynamoDB) UpdateStatsOnDownload(repo string, reference string) error
|
|||
descriptor, found := repoMeta.Tags[reference]
|
||||
|
||||
if !found {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
return zerr.ErrImageMetaNotFound
|
||||
}
|
||||
|
||||
descriptorDigest = descriptor.Digest
|
||||
|
@ -1120,7 +1120,7 @@ func (dwr *DynamoDB) UpdateStatsOnDownload(repo string, reference string) error
|
|||
|
||||
manifestStatistics, ok := repoMeta.Statistics[descriptorDigest]
|
||||
if !ok {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
return zerr.ErrImageMetaNotFound
|
||||
}
|
||||
|
||||
manifestStatistics.DownloadCount++
|
||||
|
@ -1294,7 +1294,7 @@ func (dwr *DynamoDB) DeleteSignature(repo string, signedManifestDigest godigest.
|
|||
)
|
||||
|
||||
if manifestSignatures, found = protoRepoMeta.Signatures[signedManifestDigest.String()]; !found {
|
||||
return zerr.ErrManifestMetaNotFound
|
||||
return zerr.ErrImageMetaNotFound
|
||||
}
|
||||
|
||||
signatureSlice := manifestSignatures.Map[sigType]
|
||||
|
@ -1728,7 +1728,7 @@ func (dwr DynamoDB) GetUserAPIKeys(ctx context.Context) ([]mTypes.APIKeyDetails,
|
|||
|
||||
userData, err := dwr.GetUserData(ctx)
|
||||
if err != nil && !errors.Is(err, zerr.ErrUserDataNotFound) {
|
||||
return nil, fmt.Errorf("metaDB: error while getting userData for identity %s %w", userid, err)
|
||||
return nil, fmt.Errorf("failed to get userData for identity %s %w", userid, err)
|
||||
}
|
||||
|
||||
for hashedKey, apiKeyDetails := range userData.APIKeys {
|
||||
|
@ -1764,7 +1764,7 @@ func (dwr DynamoDB) AddUserAPIKey(ctx context.Context, hashedKey string, apiKeyD
|
|||
|
||||
userData, err := dwr.GetUserData(ctx)
|
||||
if err != nil && !errors.Is(err, zerr.ErrUserDataNotFound) {
|
||||
return fmt.Errorf("metaDB: error while getting userData for identity %s %w", userid, err)
|
||||
return fmt.Errorf("failed to get userData for identity %s %w", userid, err)
|
||||
}
|
||||
|
||||
if userData.APIKeys == nil {
|
||||
|
@ -1825,7 +1825,7 @@ func (dwr DynamoDB) AddUserAPIKey(ctx context.Context, hashedKey string, apiKeyD
|
|||
func (dwr DynamoDB) DeleteUserAPIKey(ctx context.Context, keyID string) error {
|
||||
userData, err := dwr.GetUserData(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metaDB: error while getting userData %w", err)
|
||||
return fmt.Errorf("failed to get userData %w", err)
|
||||
}
|
||||
|
||||
for hash, apiKeyDetails := range userData.APIKeys {
|
||||
|
@ -1839,7 +1839,7 @@ func (dwr DynamoDB) DeleteUserAPIKey(ctx context.Context, keyID string) error {
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("metaDB: error while deleting userAPIKey entry for hash %s %w", hash, err)
|
||||
return fmt.Errorf("failed to delete userAPIKey entry for hash %s %w", hash, err)
|
||||
}
|
||||
|
||||
err := dwr.SetUserData(ctx, userData)
|
||||
|
|
|
@ -31,7 +31,7 @@ func OnUpdateManifest(ctx context.Context, repo, reference, mediaType string, di
|
|||
|
||||
if err := imgStore.DeleteImageManifest(repo, reference, false); err != nil {
|
||||
log.Error().Err(err).Str("reference", reference).Str("repository", repo).
|
||||
Msg("couldn't remove image manifest in repo")
|
||||
Msg("failed to remove image manifest in repo")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ func OnDeleteManifest(repo, reference, mediaType string, digest godigest.Digest,
|
|||
isSignature, signatureType, signedManifestDigest, err := storage.CheckIsImageSignature(repo, manifestBlob,
|
||||
reference)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("can't check if image is a signature or not")
|
||||
log.Error().Err(err).Msg("failed to check if image is a signature or not")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -70,18 +70,20 @@ func OnDeleteManifest(repo, reference, mediaType string, digest godigest.Digest,
|
|||
SignatureType: signatureType,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("metadb: can't check if image is a signature or not")
|
||||
log.Error().Err(err).Str("component", "metadb").
|
||||
Msg("failed to check if image is a signature or not")
|
||||
manageRepoMetaSuccessfully = false
|
||||
}
|
||||
} else {
|
||||
err = metaDB.RemoveRepoReference(repo, reference, digest)
|
||||
if err != nil {
|
||||
log.Info().Msg("metadb: restoring image store")
|
||||
log.Info().Str("component", "metadb").Msg("restoring image store")
|
||||
|
||||
// restore image store
|
||||
_, _, err := imgStore.PutImageManifest(repo, reference, mediaType, manifestBlob)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("metadb: error while restoring image store, database is not consistent")
|
||||
log.Error().Err(err).Str("component", "metadb").
|
||||
Msg("failed to restore manifest to image store, database is not consistent")
|
||||
}
|
||||
|
||||
manageRepoMetaSuccessfully = false
|
||||
|
@ -89,8 +91,8 @@ func OnDeleteManifest(repo, reference, mediaType string, digest godigest.Digest,
|
|||
}
|
||||
|
||||
if !manageRepoMetaSuccessfully {
|
||||
log.Info().Str("tag", reference).Str("repository", repo).
|
||||
Msg("metadb: deleting image meta was unsuccessful for tag in repo")
|
||||
log.Info().Str("tag", reference).Str("repository", repo).Str("component", "metadb").
|
||||
Msg("failed to delete image meta was unsuccessful for tag in repo")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -105,7 +107,7 @@ func OnGetManifest(name, reference, mediaType string, body []byte,
|
|||
// check if image is a signature
|
||||
isSignature, _, _, err := storage.CheckIsImageSignature(name, body, reference)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("can't check if manifest is a signature or not")
|
||||
log.Error().Err(err).Msg("failed to check if manifest is a signature or not")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -121,7 +123,7 @@ func OnGetManifest(name, reference, mediaType string, body []byte,
|
|||
err = metaDB.UpdateStatsOnDownload(name, reference)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", name).Str("reference", reference).
|
||||
Msg("unexpected error for image")
|
||||
Msg("failed to update stats on download image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -43,8 +43,8 @@ func Create(dbtype string, dbDriver, parameters interface{}, log log.Logger, //n
|
|||
{
|
||||
properDriver, ok := dbDriver.(*bbolt.DB)
|
||||
if !ok {
|
||||
log.Error().Err(errors.ErrTypeAssertionFailed).Msgf("expected type '%T' but got '%T'",
|
||||
&bbolt.DB{}, dbDriver)
|
||||
log.Error().Err(errors.ErrTypeAssertionFailed).
|
||||
Msgf("failed to cast type, expected type '%T' but got '%T'", &bbolt.DB{}, dbDriver)
|
||||
|
||||
return nil, errors.ErrTypeAssertionFailed
|
||||
}
|
||||
|
@ -55,16 +55,17 @@ func Create(dbtype string, dbDriver, parameters interface{}, log log.Logger, //n
|
|||
{
|
||||
properDriver, ok := dbDriver.(*dynamodb.Client)
|
||||
if !ok {
|
||||
log.Error().Err(errors.ErrTypeAssertionFailed).Msgf("expected type '%T' but got '%T'",
|
||||
&dynamodb.Client{}, dbDriver)
|
||||
log.Error().Err(errors.ErrTypeAssertionFailed).
|
||||
Msgf("failed to cast type, expected type '%T' but got '%T'", &dynamodb.Client{}, dbDriver)
|
||||
|
||||
return nil, errors.ErrTypeAssertionFailed
|
||||
}
|
||||
|
||||
properParameters, ok := parameters.(mdynamodb.DBDriverParameters)
|
||||
if !ok {
|
||||
log.Error().Err(errors.ErrTypeAssertionFailed).Msgf("expected type '%T' but got '%T'",
|
||||
mdynamodb.DBDriverParameters{}, parameters)
|
||||
log.Error().Err(errors.ErrTypeAssertionFailed).
|
||||
Msgf("failed to cast type, expected type '%T' but got '%T'", mdynamodb.DBDriverParameters{},
|
||||
parameters)
|
||||
|
||||
return nil, errors.ErrTypeAssertionFailed
|
||||
}
|
||||
|
@ -125,7 +126,7 @@ func toStringIfOk(cacheDriverConfig map[string]interface{}, param string, log lo
|
|||
val, ok := cacheDriverConfig[param]
|
||||
|
||||
if !ok {
|
||||
log.Error().Str("field", param).Msg("parsing CacheDriver config failed, field is not present")
|
||||
log.Error().Str("field", param).Msg("failed to parse CacheDriver config, field is not present")
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
@ -133,13 +134,13 @@ func toStringIfOk(cacheDriverConfig map[string]interface{}, param string, log lo
|
|||
str, ok := val.(string)
|
||||
|
||||
if !ok {
|
||||
log.Error().Str("parameter", param).Msg("parsing CacheDriver config failed, parameter isn't a string")
|
||||
log.Error().Str("parameter", param).Msg("failed to parse CacheDriver config, parameter isn't a string")
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
if str == "" {
|
||||
log.Error().Str("field", param).Msg("parsing CacheDriver config failed, field is empty")
|
||||
log.Error().Str("field", param).Msg("failed to parse CacheDriver config, field is empty")
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ const (
|
|||
// ParseStorage will sync all repos found in the rootdirectory of the oci layout that zot was deployed on with the
|
||||
// ParseStorage database.
|
||||
func ParseStorage(metaDB mTypes.MetaDB, storeController stypes.StoreController, log log.Logger) error {
|
||||
log.Info().Msg("Started parsing storage and updating MetaDB")
|
||||
log.Info().Str("component", "metadb").Msg("parsing storage and initializing")
|
||||
|
||||
allStorageRepos, err := getAllRepos(storeController, log)
|
||||
if err != nil {
|
||||
|
@ -34,8 +34,9 @@ func ParseStorage(metaDB mTypes.MetaDB, storeController stypes.StoreController,
|
|||
|
||||
allMetaDBRepos, err := metaDB.GetAllRepoNames()
|
||||
if err != nil {
|
||||
log.Error().Err(err).
|
||||
Msg("load-metadb-layout: failed to get all repo names present in metadb")
|
||||
rootDir := storeController.GetDefaultImageStore().RootDir()
|
||||
log.Error().Err(err).Str("component", "metadb").Str("rootDir", rootDir).
|
||||
Msg("failed to get all repo names present under rootDir")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -43,8 +44,8 @@ func ParseStorage(metaDB mTypes.MetaDB, storeController stypes.StoreController,
|
|||
for _, repo := range getReposToBeDeleted(allStorageRepos, allMetaDBRepos) {
|
||||
err := metaDB.DeleteRepoMeta(repo)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("rootDir", storeController.GetImageStore(repo).RootDir()).
|
||||
Str("repo", repo).Msg("load-metadb-layout: failed to get all repo names present in metadb")
|
||||
log.Error().Err(err).Str("rootDir", storeController.GetImageStore(repo).RootDir()).Str("component", "metadb").
|
||||
Str("repo", repo).Msg("failed to delete repo meta")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -58,7 +59,8 @@ func ParseStorage(metaDB mTypes.MetaDB, storeController stypes.StoreController,
|
|||
|
||||
_, _, storageLastUpdated, err := imgStore.StatIndex(repo)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Msg("failed to sync repo")
|
||||
log.Error().Err(err).Str("rootDir", imgStore.RootDir()).
|
||||
Str("repo", repo).Msg("failed to sync repo")
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -71,13 +73,13 @@ func ParseStorage(metaDB mTypes.MetaDB, storeController stypes.StoreController,
|
|||
|
||||
err = ParseRepo(repo, metaDB, storeController, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Msg("failed to sync repo")
|
||||
log.Error().Err(err).Str("repo", repo).Str("rootDir", imgStore.RootDir()).Msg("failed to sync repo")
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Msg("Done parsing storage and updating MetaDB")
|
||||
log.Info().Str("component", "metadb").Msg("successfully initialized")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -112,7 +114,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController stypes.StoreCo
|
|||
|
||||
indexBlob, err := imageStore.GetIndexContent(repo)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to read index.json for repo")
|
||||
log.Error().Err(err).Str("repository", repo).Msg("failed to read index.json for repo")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -121,14 +123,14 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController stypes.StoreCo
|
|||
|
||||
err = json.Unmarshal(indexBlob, &indexContent)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to unmarshal index.json for repo")
|
||||
log.Error().Err(err).Str("repository", repo).Msg("failed to unmarshal index.json for repo")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err = metaDB.ResetRepoReferences(repo)
|
||||
if err != nil && !errors.Is(err, zerr.ErrRepoMetaNotFound) {
|
||||
log.Error().Err(err).Str("repository", repo).Msg("load-repo: failed to reset tag field in RepoMetadata for repo")
|
||||
log.Error().Err(err).Str("repository", repo).Msg("failed to reset tag field in RepoMetadata for repo")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -143,7 +145,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController stypes.StoreCo
|
|||
manifestBlob, _, _, err := imageStore.GetImageManifest(repo, manifest.Digest.String())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("digest", manifest.Digest.String()).
|
||||
Msg("load-repo: failed to get blob for image")
|
||||
Msg("failed to get blob for image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -158,7 +160,7 @@ func ParseRepo(repo string, metaDB mTypes.MetaDB, storeController stypes.StoreCo
|
|||
imageStore, metaDB, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("tag", tag).
|
||||
Msg("load-repo: failed to set metadata for image")
|
||||
Msg("failed to set metadata for image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -171,7 +173,7 @@ func getAllRepos(storeController stypes.StoreController, log log.Logger) ([]stri
|
|||
allRepos, err := storeController.GetDefaultImageStore().GetRepositories()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("rootDir", storeController.GetDefaultImageStore().RootDir()).
|
||||
Msg("load-local-layout: failed to get all repo names present under rootDir")
|
||||
Msg("failed to get all repo names present under rootDir")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -181,7 +183,7 @@ func getAllRepos(storeController stypes.StoreController, log log.Logger) ([]stri
|
|||
substoreRepos, err := store.GetRepositories()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("rootDir", store.RootDir()).
|
||||
Msg("load-local-layout: failed to get all repo names present under rootDir")
|
||||
Msg("failed to get all repo names present under rootDir")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -214,7 +216,7 @@ func getCosignSignatureLayersInfo(
|
|||
var manifestContent ispec.Manifest
|
||||
if err := json.Unmarshal(manifestBlob, &manifestContent); err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("digest", manifestDigest).Msg(
|
||||
"load-repo: unable to marshal blob index")
|
||||
"failed to marshal blob index")
|
||||
|
||||
return layers, err
|
||||
}
|
||||
|
@ -228,7 +230,7 @@ func getCosignSignatureLayersInfo(
|
|||
layerContent, err := imageStore.GetBlobContent(repo, layer.Digest)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("layerDigest", layer.Digest.String()).Msg(
|
||||
"load-repo: unable to get cosign signature layer content")
|
||||
"failed to get cosign signature layer content")
|
||||
|
||||
return layers, err
|
||||
}
|
||||
|
@ -236,7 +238,7 @@ func getCosignSignatureLayersInfo(
|
|||
layerSigKey, ok := layer.Annotations[zcommon.CosignSigKey]
|
||||
if !ok {
|
||||
log.Error().Err(err).Str("repository", repo).Str("reference", tag).Str("layerDigest", layer.Digest.String()).Msg(
|
||||
"load-repo: unable to get specific annotation of cosign signature")
|
||||
"failed to get specific annotation of cosign signature")
|
||||
}
|
||||
|
||||
layers = append(layers, mTypes.LayerInfo{
|
||||
|
@ -257,7 +259,7 @@ func getNotationSignatureLayersInfo(
|
|||
var manifestContent ispec.Manifest
|
||||
if err := json.Unmarshal(manifestBlob, &manifestContent); err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("reference", manifestDigest).Msg(
|
||||
"load-repo: unable to marshal blob index")
|
||||
"failed to marshal blob index")
|
||||
|
||||
return layers, err
|
||||
}
|
||||
|
@ -269,7 +271,7 @@ func getNotationSignatureLayersInfo(
|
|||
|
||||
if len(manifestContent.Layers) != 1 {
|
||||
log.Error().Err(zerr.ErrBadManifest).Str("repository", repo).Str("reference", manifestDigest).
|
||||
Msg("load-repo: notation signature manifest requires exactly one layer but it does not")
|
||||
Msg("notation signature manifest requires exactly one layer but it does not")
|
||||
|
||||
return layers, zerr.ErrBadManifest
|
||||
}
|
||||
|
@ -284,7 +286,7 @@ func getNotationSignatureLayersInfo(
|
|||
layerContent, err := imageStore.GetBlobContent(repo, layer)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("reference", manifestDigest).Str("layerDigest", layer.String()).Msg(
|
||||
"load-repo: unable to get notation signature blob content")
|
||||
"failed to get notation signature blob content")
|
||||
|
||||
return layers, err
|
||||
}
|
||||
|
@ -314,7 +316,7 @@ func SetImageMetaFromInput(ctx context.Context, repo, reference, mediaType strin
|
|||
|
||||
err := json.Unmarshal(blob, &manifestContent)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("metadb: error while getting image data")
|
||||
log.Error().Err(err).Str("component", "metadb").Msg("failed to unmarshal image manifest")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -348,7 +350,7 @@ func SetImageMetaFromInput(ctx context.Context, repo, reference, mediaType strin
|
|||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("tag", reference).
|
||||
Str("manifestDigest", signedManifestDigest.String()).
|
||||
Msg("load-repo: failed set signature meta for signed image")
|
||||
Msg("failed set signature meta for signed image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -356,7 +358,7 @@ func SetImageMetaFromInput(ctx context.Context, repo, reference, mediaType strin
|
|||
err = metaDB.UpdateSignaturesValidity(ctx, repo, signedManifestDigest)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repository", repo).Str("reference", reference).Str("digest",
|
||||
signedManifestDigest.String()).Msg("load-repo: failed verify signatures validity for signed image")
|
||||
signedManifestDigest.String()).Msg("failed to verify signature validity for signed image")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -380,7 +382,7 @@ func SetImageMetaFromInput(ctx context.Context, repo, reference, mediaType strin
|
|||
|
||||
err := metaDB.SetRepoReference(ctx, repo, reference, imageMeta)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("metadb: error while setting repo meta")
|
||||
log.Error().Err(err).Str("component", "metadb").Msg("failed to set repo meta")
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ func (scheduler *Scheduler) poolWorker(ctx context.Context) {
|
|||
for task := range scheduler.workerChan {
|
||||
// leave below line here (for zot minimal metrics can be enabled on first scraping)
|
||||
metricsEnabled := scheduler.metricServer.IsEnabled()
|
||||
scheduler.log.Debug().Int("worker", workerID).Str("task", task.String()).Msg("scheduler: starting task")
|
||||
scheduler.log.Debug().Int("worker", workerID).Str("task", task.String()).Msg("starting task")
|
||||
|
||||
if metricsEnabled {
|
||||
scheduler.tasksLock.Lock()
|
||||
|
@ -139,7 +139,7 @@ func (scheduler *Scheduler) poolWorker(ctx context.Context) {
|
|||
|
||||
if err := task.DoWork(ctx); err != nil {
|
||||
scheduler.log.Error().Int("worker", workerID).Str("task", task.String()).Err(err).
|
||||
Msg("scheduler: error while executing task")
|
||||
Msg("failed to execute task")
|
||||
}
|
||||
|
||||
if metricsEnabled {
|
||||
|
@ -150,7 +150,7 @@ func (scheduler *Scheduler) poolWorker(ctx context.Context) {
|
|||
monitoring.ObserveWorkersTasksDuration(scheduler.metricServer, task.Name(), workDuration)
|
||||
}
|
||||
|
||||
scheduler.log.Debug().Int("worker", workerID).Str("task", task.String()).Msg("scheduler: finished task")
|
||||
scheduler.log.Debug().Int("worker", workerID).Str("task", task.String()).Msg("finished task")
|
||||
}
|
||||
}(i + 1)
|
||||
}
|
||||
|
@ -250,7 +250,7 @@ func (scheduler *Scheduler) RunScheduler(ctx context.Context) {
|
|||
scheduler.shutdown()
|
||||
}
|
||||
|
||||
scheduler.log.Debug().Msg("scheduler: received stop signal, gracefully shutting down...")
|
||||
scheduler.log.Debug().Msg("received stop signal, gracefully shutting down...")
|
||||
|
||||
return
|
||||
default:
|
||||
|
@ -261,7 +261,7 @@ func (scheduler *Scheduler) RunScheduler(ctx context.Context) {
|
|||
if task != nil {
|
||||
// push tasks into worker pool
|
||||
if !scheduler.inShutdown() {
|
||||
scheduler.log.Debug().Str("task", task.String()).Msg("scheduler: pushing task into worker pool")
|
||||
scheduler.log.Debug().Str("task", task.String()).Msg("pushing task into worker pool")
|
||||
scheduler.workerChan <- task
|
||||
}
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ func (scheduler *Scheduler) pushReadyGenerators() {
|
|||
scheduler.waitingGenerators = append(scheduler.waitingGenerators[:i], scheduler.waitingGenerators[i+1:]...)
|
||||
modified = true
|
||||
|
||||
scheduler.log.Debug().Msg("scheduler: waiting generator is ready, pushing to ready generators")
|
||||
scheduler.log.Debug().Msg("waiting generator is ready, pushing to ready generators")
|
||||
|
||||
break
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ func (scheduler *Scheduler) SubmitTask(task Task, priority Priority) {
|
|||
|
||||
select {
|
||||
case tasksQ <- task:
|
||||
scheduler.log.Info().Msg("scheduler: adding a new task")
|
||||
scheduler.log.Info().Msg("adding a new task")
|
||||
default:
|
||||
if scheduler.inShutdown() {
|
||||
return
|
||||
|
@ -434,7 +434,7 @@ func (gen *generator) generate(sch *Scheduler) {
|
|||
if gen.remainingTask == nil {
|
||||
nextTask, err := gen.taskGenerator.Next()
|
||||
if err != nil {
|
||||
sch.log.Error().Err(err).Msg("scheduler: error while executing generator")
|
||||
sch.log.Error().Err(err).Msg("failed to execute generator")
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ func TestScheduler(t *testing.T) {
|
|||
So(string(data), ShouldContainSubstring, "executing high priority task; index: 1")
|
||||
So(string(data), ShouldContainSubstring, "executing high priority task; index: 2")
|
||||
So(string(data), ShouldNotContainSubstring, "executing medium priority task; index: 1")
|
||||
So(string(data), ShouldNotContainSubstring, "error while executing task")
|
||||
So(string(data), ShouldNotContainSubstring, "failed to execute task")
|
||||
})
|
||||
|
||||
Convey("Test task returning an error", t, func() {
|
||||
|
@ -185,8 +185,8 @@ func TestScheduler(t *testing.T) {
|
|||
|
||||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring, "scheduler: adding a new task")
|
||||
So(string(data), ShouldContainSubstring, "error while executing task")
|
||||
So(string(data), ShouldContainSubstring, "adding a new task")
|
||||
So(string(data), ShouldContainSubstring, "failed to execute task")
|
||||
})
|
||||
|
||||
Convey("Test resubmit generator", t, func() {
|
||||
|
@ -229,7 +229,7 @@ func TestScheduler(t *testing.T) {
|
|||
|
||||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldNotContainSubstring, "scheduler: adding a new task")
|
||||
So(string(data), ShouldNotContainSubstring, "adding a new task")
|
||||
})
|
||||
|
||||
Convey("Test adding a new task when context is done", t, func() {
|
||||
|
@ -253,7 +253,7 @@ func TestScheduler(t *testing.T) {
|
|||
|
||||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldNotContainSubstring, "scheduler: adding a new task")
|
||||
So(string(data), ShouldNotContainSubstring, "adding a new task")
|
||||
})
|
||||
|
||||
Convey("Test scheduler Priority.String() method", t, func() {
|
||||
|
|
45
pkg/storage/cache/boltdb.go
vendored
45
pkg/storage/cache/boltdb.go
vendored
|
@ -31,7 +31,7 @@ type BoltDBDriverParameters struct {
|
|||
func NewBoltDBCache(parameters interface{}, log zlog.Logger) (*BoltDBDriver, error) {
|
||||
properParameters, ok := parameters.(BoltDBDriverParameters)
|
||||
if !ok {
|
||||
log.Error().Err(zerr.ErrTypeAssertionFailed).Msgf("expected type '%T' but got '%T'",
|
||||
log.Error().Err(zerr.ErrTypeAssertionFailed).Msgf("failed to cast type, expected type '%T' but got '%T'",
|
||||
BoltDBDriverParameters{}, parameters)
|
||||
|
||||
return nil, zerr.ErrTypeAssertionFailed
|
||||
|
@ -39,7 +39,7 @@ func NewBoltDBCache(parameters interface{}, log zlog.Logger) (*BoltDBDriver, err
|
|||
|
||||
err := os.MkdirAll(properParameters.RootDir, constants.DefaultDirPerms)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("directory", properParameters.RootDir).Msg("unable to create directory for cache db")
|
||||
log.Error().Err(err).Str("directory", properParameters.RootDir).Msg("failed to create directory for cache db")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -55,12 +55,12 @@ func NewBoltDBCache(parameters interface{}, log zlog.Logger) (*BoltDBDriver, err
|
|||
if strings.Contains(err.Error(), "timeout") {
|
||||
err := fmt.Errorf("%w: %w, path '%s'", zerr.ErrTimeout, zerr.ErrDatabaseFileAlreadyInUse, dbPath)
|
||||
|
||||
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create cache db")
|
||||
log.Error().Err(err).Str("dbPath", dbPath).Msg("failed to create cache db")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create cache db")
|
||||
log.Error().Err(err).Str("dbPath", dbPath).Msg("failed to create cache db")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func NewBoltDBCache(parameters interface{}, log zlog.Logger) (*BoltDBDriver, err
|
|||
if err := cacheDB.Update(func(tx *bbolt.Tx) error {
|
||||
if _, err := tx.CreateBucketIfNotExists([]byte(constants.BlobsCache)); err != nil {
|
||||
// this is a serious failure
|
||||
log.Error().Err(err).Str("dbPath", dbPath).Msg("unable to create a root bucket")
|
||||
log.Error().Err(err).Str("dbPath", dbPath).Msg("failed to create a root bucket")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func NewBoltDBCache(parameters interface{}, log zlog.Logger) (*BoltDBDriver, err
|
|||
return nil
|
||||
}); err != nil {
|
||||
// something went wrong
|
||||
log.Error().Err(err).Msg("unable to create a cache")
|
||||
log.Error().Err(err).Msg("failed to create a cache")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -99,7 +99,8 @@ func (d *BoltDBDriver) Name() string {
|
|||
|
||||
func (d *BoltDBDriver) PutBlob(digest godigest.Digest, path string) error {
|
||||
if path == "" {
|
||||
d.log.Error().Err(zerr.ErrEmptyValue).Str("digest", digest.String()).Msg("empty path provided")
|
||||
d.log.Error().Err(zerr.ErrEmptyValue).Str("digest", digest.String()).
|
||||
Msg("failed to put blob due to empty path being provided")
|
||||
|
||||
return zerr.ErrEmptyValue
|
||||
}
|
||||
|
@ -109,7 +110,7 @@ func (d *BoltDBDriver) PutBlob(digest godigest.Digest, path string) error {
|
|||
if d.useRelPaths {
|
||||
path, err = filepath.Rel(d.rootDir, path)
|
||||
if err != nil {
|
||||
d.log.Error().Err(err).Str("path", path).Msg("unable to get relative path")
|
||||
d.log.Error().Err(err).Str("path", path).Msg("failed to get relative path")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -118,7 +119,7 @@ func (d *BoltDBDriver) PutBlob(digest godigest.Digest, path string) error {
|
|||
if root == nil {
|
||||
// this is a serious failure
|
||||
err := zerr.ErrCacheRootBucket
|
||||
d.log.Error().Err(err).Msg("unable to access root bucket")
|
||||
d.log.Error().Err(err).Msg("failed to access root bucket")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -126,7 +127,7 @@ func (d *BoltDBDriver) PutBlob(digest godigest.Digest, path string) error {
|
|||
bucket, err := root.CreateBucketIfNotExists([]byte(digest.String()))
|
||||
if err != nil {
|
||||
// this is a serious failure
|
||||
d.log.Error().Err(err).Str("bucket", digest.String()).Msg("unable to create a bucket")
|
||||
d.log.Error().Err(err).Str("bucket", digest.String()).Msg("failed to create a bucket")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -135,13 +136,13 @@ func (d *BoltDBDriver) PutBlob(digest godigest.Digest, path string) error {
|
|||
deduped, err := bucket.CreateBucketIfNotExists([]byte(constants.DuplicatesBucket))
|
||||
if err != nil {
|
||||
// this is a serious failure
|
||||
d.log.Error().Err(err).Str("bucket", constants.DuplicatesBucket).Msg("unable to create a bucket")
|
||||
d.log.Error().Err(err).Str("bucket", constants.DuplicatesBucket).Msg("failed to create a bucket")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err := deduped.Put([]byte(path), nil); err != nil {
|
||||
d.log.Error().Err(err).Str("bucket", constants.DuplicatesBucket).Str("value", path).Msg("unable to put record")
|
||||
d.log.Error().Err(err).Str("bucket", constants.DuplicatesBucket).Str("value", path).Msg("failed to put record")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -153,13 +154,13 @@ func (d *BoltDBDriver) PutBlob(digest godigest.Digest, path string) error {
|
|||
origin, err := bucket.CreateBucket([]byte(constants.OriginalBucket))
|
||||
if err != nil {
|
||||
// this is a serious failure
|
||||
d.log.Error().Err(err).Str("bucket", constants.OriginalBucket).Msg("unable to create a bucket")
|
||||
d.log.Error().Err(err).Str("bucket", constants.OriginalBucket).Msg("failed to create a bucket")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err := origin.Put([]byte(path), nil); err != nil {
|
||||
d.log.Error().Err(err).Str("bucket", constants.OriginalBucket).Str("value", path).Msg("unable to put record")
|
||||
d.log.Error().Err(err).Str("bucket", constants.OriginalBucket).Str("value", path).Msg("failed to put record")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -181,7 +182,7 @@ func (d *BoltDBDriver) GetBlob(digest godigest.Digest) (string, error) {
|
|||
if root == nil {
|
||||
// this is a serious failure
|
||||
err := zerr.ErrCacheRootBucket
|
||||
d.log.Error().Err(err).Msg("unable to access root bucket")
|
||||
d.log.Error().Err(err).Msg("failed to access root bucket")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -208,7 +209,7 @@ func (d *BoltDBDriver) HasBlob(digest godigest.Digest, blob string) bool {
|
|||
if root == nil {
|
||||
// this is a serious failure
|
||||
err := zerr.ErrCacheRootBucket
|
||||
d.log.Error().Err(err).Msg("unable to access root bucket")
|
||||
d.log.Error().Err(err).Msg("failed to access root bucket")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -259,7 +260,7 @@ func (d *BoltDBDriver) DeleteBlob(digest godigest.Digest, path string) error {
|
|||
if d.useRelPaths {
|
||||
path, err = filepath.Rel(d.rootDir, path)
|
||||
if err != nil {
|
||||
d.log.Error().Err(err).Str("path", path).Msg("unable to get relative path")
|
||||
d.log.Error().Err(err).Str("path", path).Msg("failed to get relative path")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -268,7 +269,7 @@ func (d *BoltDBDriver) DeleteBlob(digest godigest.Digest, path string) error {
|
|||
if root == nil {
|
||||
// this is a serious failure
|
||||
err := zerr.ErrCacheRootBucket
|
||||
d.log.Error().Err(err).Msg("unable to access root bucket")
|
||||
d.log.Error().Err(err).Msg("failed to access root bucket")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -285,7 +286,7 @@ func (d *BoltDBDriver) DeleteBlob(digest godigest.Digest, path string) error {
|
|||
|
||||
if err := deduped.Delete([]byte(path)); err != nil {
|
||||
d.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", constants.DuplicatesBucket).
|
||||
Str("path", path).Msg("unable to delete")
|
||||
Str("path", path).Msg("failed to delete")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -296,7 +297,7 @@ func (d *BoltDBDriver) DeleteBlob(digest godigest.Digest, path string) error {
|
|||
if originBlob != nil {
|
||||
if err := origin.Delete([]byte(path)); err != nil {
|
||||
d.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", constants.OriginalBucket).
|
||||
Str("path", path).Msg("unable to delete")
|
||||
Str("path", path).Msg("failed to delete")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -306,7 +307,7 @@ func (d *BoltDBDriver) DeleteBlob(digest godigest.Digest, path string) error {
|
|||
if dedupedBlob != nil {
|
||||
if err := origin.Put(dedupedBlob, nil); err != nil {
|
||||
d.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", constants.OriginalBucket).Str("path", path).
|
||||
Msg("unable to put")
|
||||
Msg("failed to put")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -320,7 +321,7 @@ func (d *BoltDBDriver) DeleteBlob(digest godigest.Digest, path string) error {
|
|||
d.log.Debug().Str("digest", digest.String()).Str("path", path).Msg("deleting empty bucket")
|
||||
if err := root.DeleteBucket([]byte(digest)); err != nil {
|
||||
d.log.Error().Err(err).Str("digest", digest.String()).Str("bucket", digest.String()).Str("path", path).
|
||||
Msg("unable to delete")
|
||||
Msg("failed to delete")
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
19
pkg/storage/cache/dynamodb.go
vendored
19
pkg/storage/cache/dynamodb.go
vendored
|
@ -64,7 +64,7 @@ func (d *DynamoDBDriver) NewTable(tableName string) error {
|
|||
func NewDynamoDBCache(parameters interface{}, log zlog.Logger) (*DynamoDBDriver, error) {
|
||||
properParameters, ok := parameters.(DynamoDBDriverParameters)
|
||||
if !ok {
|
||||
log.Error().Err(zerr.ErrTypeAssertionFailed).Msgf("expected type '%T' but got '%T'",
|
||||
log.Error().Err(zerr.ErrTypeAssertionFailed).Msgf("failed to cast type, expected type '%T' but got '%T'",
|
||||
BoltDBDriverParameters{}, parameters)
|
||||
|
||||
return nil, zerr.ErrTypeAssertionFailed
|
||||
|
@ -86,7 +86,7 @@ func NewDynamoDBCache(parameters interface{}, log zlog.Logger) (*DynamoDBDriver,
|
|||
cfg, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(properParameters.Region),
|
||||
config.WithEndpointResolverWithOptions(customResolver))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("unable to load AWS SDK config for dynamodb")
|
||||
log.Error().Err(err).Msg("failed to load AWS SDK config for dynamodb")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ func NewDynamoDBCache(parameters interface{}, log zlog.Logger) (*DynamoDBDriver,
|
|||
|
||||
err = driver.NewTable(driver.tableName)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("tableName", driver.tableName).Msg("unable to create table for cache")
|
||||
log.Error().Err(err).Str("tableName", driver.tableName).Msg("failed to create table for cache")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -143,7 +143,8 @@ func (d *DynamoDBDriver) GetBlob(digest godigest.Digest) (string, error) {
|
|||
|
||||
func (d *DynamoDBDriver) PutBlob(digest godigest.Digest, path string) error {
|
||||
if path == "" {
|
||||
d.log.Error().Err(zerr.ErrEmptyValue).Str("digest", digest.String()).Msg("empty path provided")
|
||||
d.log.Error().Err(zerr.ErrEmptyValue).Str("digest", digest.String()).
|
||||
Msg("failed to put blob because the path provided is empty")
|
||||
|
||||
return zerr.ErrEmptyValue
|
||||
}
|
||||
|
@ -159,7 +160,7 @@ func (d *DynamoDBDriver) PutBlob(digest godigest.Digest, path string) error {
|
|||
attrPath := types.AttributeValueMemberSS{Value: []string{path}}
|
||||
|
||||
if err := d.updateItem(digest, expression, map[string]types.AttributeValue{":i": &attrPath}); err != nil {
|
||||
d.log.Error().Err(err).Str("digest", digest.String()).Str("path", path).Msg("unable to put blob")
|
||||
d.log.Error().Err(err).Str("digest", digest.String()).Str("path", path).Msg("failed to put blob")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -183,7 +184,7 @@ func (d *DynamoDBDriver) HasBlob(digest godigest.Digest, path string) bool {
|
|||
out := Blob{}
|
||||
|
||||
if resp.Item == nil {
|
||||
d.log.Debug().Err(zerr.ErrCacheMiss).Str("digest", string(digest)).Msg("unable to find blob in cache")
|
||||
d.log.Debug().Err(zerr.ErrCacheMiss).Str("digest", string(digest)).Msg("failed to find blob in cache")
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -200,7 +201,7 @@ func (d *DynamoDBDriver) HasBlob(digest godigest.Digest, path string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
d.log.Debug().Err(zerr.ErrCacheMiss).Str("digest", string(digest)).Msg("unable to find blob in cache")
|
||||
d.log.Debug().Err(zerr.ErrCacheMiss).Str("digest", string(digest)).Msg("failed to find blob in cache")
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -212,7 +213,7 @@ func (d *DynamoDBDriver) DeleteBlob(digest godigest.Digest, path string) error {
|
|||
attrPath := types.AttributeValueMemberSS{Value: []string{path}}
|
||||
|
||||
if err := d.updateItem(digest, expression, map[string]types.AttributeValue{":i": &attrPath}); err != nil {
|
||||
d.log.Error().Err(err).Str("digest", digest.String()).Str("path", path).Msg("unable to delete")
|
||||
d.log.Error().Err(err).Str("digest", digest.String()).Str("path", path).Msg("failed to delete")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -274,7 +275,7 @@ func (d *DynamoDBDriver) putOriginBlob(digest godigest.Digest, path string) erro
|
|||
attrPath := types.AttributeValueMemberS{Value: path}
|
||||
|
||||
if err := d.updateItem(digest, expression, map[string]types.AttributeValue{":s": &attrPath}); err != nil {
|
||||
d.log.Error().Err(err).Str("digest", digest.String()).Str("path", path).Msg("unable to put original blob")
|
||||
d.log.Error().Err(err).Str("digest", digest.String()).Str("path", path).Msg("failed to put original blob")
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -85,13 +85,13 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
|
|||
|
||||
// validate manifest
|
||||
if err := ValidateManifestSchema(body); err != nil {
|
||||
log.Error().Err(err).Msg("OCIv1 image manifest schema validation failed")
|
||||
log.Error().Err(err).Msg("failed to validate OCIv1 image manifest schema")
|
||||
|
||||
return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error())
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &manifest); err != nil {
|
||||
log.Error().Err(err).Msg("unable to unmarshal JSON")
|
||||
log.Error().Err(err).Msg("failed to unmarshal JSON")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
@ -102,7 +102,8 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
|
|||
// validate config blob - a lightweight check if the blob is present
|
||||
ok, _, _, err := imgStore.StatBlob(repo, manifest.Config.Digest)
|
||||
if !ok || err != nil {
|
||||
log.Error().Err(err).Str("digest", manifest.Config.Digest.String()).Msg("missing config blob")
|
||||
log.Error().Err(err).Str("digest", manifest.Config.Digest.String()).
|
||||
Msg("failed to stat blob due to missing config blob")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
@ -118,7 +119,8 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
|
|||
|
||||
ok, _, _, err := imgStore.StatBlob(repo, layer.Digest)
|
||||
if !ok || err != nil {
|
||||
log.Error().Err(err).Str("digest", layer.Digest.String()).Msg("missing layer blob")
|
||||
log.Error().Err(err).Str("digest", layer.Digest.String()).
|
||||
Msg("failed to validate manifest due to missing layer blob")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
@ -127,28 +129,29 @@ func ValidateManifest(imgStore storageTypes.ImageStore, repo, reference, mediaTy
|
|||
case oras.MediaTypeArtifactManifest:
|
||||
var m oras.Descriptor
|
||||
if err := json.Unmarshal(body, &m); err != nil {
|
||||
log.Error().Err(err).Msg("unable to unmarshal JSON")
|
||||
log.Error().Err(err).Msg("failed to unmarshal JSON")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
case ispec.MediaTypeImageIndex:
|
||||
// validate manifest
|
||||
if err := ValidateImageIndexSchema(body); err != nil {
|
||||
log.Error().Err(err).Msg("OCIv1 image index manifest schema validation failed")
|
||||
log.Error().Err(err).Msg("failed to validate OCIv1 image index manifest schema")
|
||||
|
||||
return "", zerr.NewError(zerr.ErrBadManifest).AddDetail("jsonSchemaValidation", err.Error())
|
||||
}
|
||||
|
||||
var indexManifest ispec.Index
|
||||
if err := json.Unmarshal(body, &indexManifest); err != nil {
|
||||
log.Error().Err(err).Msg("unable to unmarshal JSON")
|
||||
log.Error().Err(err).Msg("failed to unmarshal JSON")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
||||
for _, manifest := range indexManifest.Manifests {
|
||||
if ok, _, _, err := imgStore.StatBlob(repo, manifest.Digest); !ok || err != nil {
|
||||
log.Error().Err(err).Str("digest", manifest.Digest.String()).Msg("missing manifest blob")
|
||||
log.Error().Err(err).Str("digest", manifest.Digest.String()).
|
||||
Msg("failed to stat manifest due to missing manifest blob")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
@ -165,7 +168,7 @@ func GetAndValidateRequestDigest(body []byte, digestStr string, log zlog.Logger)
|
|||
if err == nil {
|
||||
if d.String() != bodyDigest.String() {
|
||||
log.Error().Str("actual", bodyDigest.String()).Str("expected", d.String()).
|
||||
Msg("manifest digest is not valid")
|
||||
Msg("failed to validate manifest digest")
|
||||
|
||||
return "", zerr.ErrBadManifest
|
||||
}
|
||||
|
@ -483,8 +486,8 @@ func isBlobReferencedInImageManifest(imgStore storageTypes.ImageStore, repo stri
|
|||
|
||||
manifestContent, err := GetImageManifest(imgStore, repo, mdigest, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repo", repo).Str("digest", mdigest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
log.Error().Err(err).Str("repo", repo).Str("digest", mdigest.String()).Str("component", "gc").
|
||||
Msg("failed to read manifest image")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -511,8 +514,8 @@ func isBlobReferencedInORASManifest(imgStore storageTypes.ImageStore, repo strin
|
|||
|
||||
manifestContent, err := GetOrasManifestByDigest(imgStore, repo, mdigest, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("repo", repo).Str("digest", mdigest.String()).
|
||||
Msg("gc: failed to read manifest image")
|
||||
log.Error().Err(err).Str("repo", repo).Str("digest", mdigest.String()).Str("component", "gc").
|
||||
Msg("failed to read manifest image")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -978,14 +981,14 @@ func (gen *DedupeTaskGenerator) Next() (scheduler.Task, error) {
|
|||
gen.repos, err = gen.ImgStore.GetRepositories()
|
||||
if err != nil {
|
||||
//nolint: dupword
|
||||
gen.Log.Error().Err(err).Msg("dedupe rebuild: unable to to get list of repositories")
|
||||
gen.Log.Error().Err(err).Str("component", "dedupe").Msg("failed to get list of repositories")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if still no repos
|
||||
if len(gen.repos) == 0 {
|
||||
gen.Log.Info().Msg("dedupe rebuild: no repositories found in storage, finished.")
|
||||
gen.Log.Info().Str("component", "dedupe").Msg("no repositories found in storage, finished.")
|
||||
|
||||
// no repositories in storage, no need to continue
|
||||
gen.done = true
|
||||
|
@ -997,14 +1000,14 @@ func (gen *DedupeTaskGenerator) Next() (scheduler.Task, error) {
|
|||
// get all blobs from storage.imageStore and group them by digest
|
||||
gen.digest, gen.duplicateBlobs, err = gen.ImgStore.GetNextDigestWithBlobPaths(gen.repos, gen.lastDigests)
|
||||
if err != nil {
|
||||
gen.Log.Error().Err(err).Msg("dedupe rebuild: failed to get next digest")
|
||||
gen.Log.Error().Err(err).Str("component", "dedupe").Msg("failed to get next digest")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if no digests left, then mark the task generator as done
|
||||
if gen.digest == "" {
|
||||
gen.Log.Info().Msg("dedupe rebuild: finished")
|
||||
gen.Log.Info().Str("component", "dedupe").Msg("finished")
|
||||
|
||||
gen.done = true
|
||||
|
||||
|
@ -1055,7 +1058,8 @@ func (dt *dedupeTask) DoWork(ctx context.Context) error {
|
|||
err := dt.imgStore.RunDedupeForDigest(ctx, dt.digest, dt.dedupe, dt.duplicateBlobs) //nolint: contextcheck
|
||||
if err != nil {
|
||||
// log it
|
||||
dt.log.Error().Err(err).Str("digest", dt.digest.String()).Msg("rebuild dedupe: failed to rebuild digest")
|
||||
dt.log.Error().Err(err).Str("digest", dt.digest.String()).Str("component", "dedupe").
|
||||
Msg("failed to rebuild digest")
|
||||
}
|
||||
|
||||
return err
|
||||
|
@ -1095,7 +1099,7 @@ func (gen *StorageMetricsInitGenerator) Next() (scheduler.Task, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
gen.Log.Debug().Str("repo", repo).Int("randomDelay", delay).Msg("StorageMetricsInitGenerator")
|
||||
gen.Log.Debug().Str("repo", repo).Int("randomDelay", delay).Msg("generate task for storage metrics")
|
||||
|
||||
if repo == "" {
|
||||
gen.done = true
|
||||
|
@ -1137,7 +1141,7 @@ func NewStorageMetricsTask(imgStore storageTypes.ImageStore, metrics monitoring.
|
|||
func (smt *smTask) DoWork(ctx context.Context) error {
|
||||
// run task
|
||||
monitoring.SetStorageUsage(smt.metrics, smt.imgStore.RootDir(), smt.repo)
|
||||
smt.log.Debug().Msg("monitoring: computed storage usage for repo " + smt.repo)
|
||||
smt.log.Debug().Str("component", "monitoring").Msg("computed storage usage for repo " + smt.repo)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ func (gc GarbageCollect) CleanRepo(ctx context.Context, repo string) error {
|
|||
Msg(fmt.Sprintf("executing GC of orphaned blobs for %s", path.Join(gc.imgStore.RootDir(), repo)))
|
||||
|
||||
if err := gc.cleanRepo(ctx, repo); err != nil {
|
||||
errMessage := fmt.Sprintf("error while running GC for %s", path.Join(gc.imgStore.RootDir(), repo))
|
||||
errMessage := fmt.Sprintf("failed to run GC for %s", path.Join(gc.imgStore.RootDir(), repo))
|
||||
gc.log.Error().Err(err).Str("module", "gc").Msg(errMessage)
|
||||
gc.log.Info().Str("module", "gc").
|
||||
Msg(fmt.Sprintf("GC unsuccessfully completed for %s", path.Join(gc.imgStore.RootDir(), repo)))
|
||||
|
@ -121,7 +121,7 @@ func (gc GarbageCollect) cleanRepo(ctx context.Context, repo string) error {
|
|||
*/
|
||||
index, err := common.GetIndex(gc.imgStore, repo, gc.log)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("unable to read index.json in repo")
|
||||
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("failed to read index.json in repo")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -360,7 +360,8 @@ func (gc GarbageCollect) removeTagsPerRetentionPolicy(ctx context.Context, repo
|
|||
|
||||
repoMeta, err := gc.metaDB.GetRepoMeta(ctx, repo)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("can't retrieve repoMeta for repo")
|
||||
gc.log.Error().Err(err).Str("module", "gc").Str("repository", repo).
|
||||
Msg("failed to get repoMeta")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -435,14 +436,16 @@ func (gc GarbageCollect) removeManifest(repo string, index *ispec.Index,
|
|||
SignatureType: signatureType,
|
||||
})
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("module", "gc").Msg("metadb: unable to remove signature in metaDB")
|
||||
gc.log.Error().Err(err).Str("module", "gc").Str("component", "metadb").
|
||||
Msg("failed to remove signature in metaDB")
|
||||
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
err := gc.metaDB.RemoveRepoReference(repo, reference, desc.Digest)
|
||||
if err != nil {
|
||||
gc.log.Error().Err(err).Str("module", "gc").Msg("metadb: unable to remove repo reference in metaDB")
|
||||
gc.log.Error().Err(err).Str("module", "gc").Str("component", "metadb").
|
||||
Msg("failed to remove repo reference in metaDB")
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
@ -553,14 +556,14 @@ func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duratio
|
|||
|
||||
index, err := common.GetIndex(gc.imgStore, repo, gc.log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("unable to read index.json in repo")
|
||||
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("failed to read index.json in repo")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err = gc.addIndexBlobsToReferences(repo, index, refBlobs)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("unable to get referenced blobs in repo")
|
||||
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("failed to get referenced blobs in repo")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -572,7 +575,7 @@ func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duratio
|
|||
return nil
|
||||
}
|
||||
|
||||
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("unable to get all blobs")
|
||||
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Msg("failed to get all blobs")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -583,7 +586,7 @@ func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duratio
|
|||
digest := godigest.NewDigestFromEncoded(godigest.SHA256, blob)
|
||||
if err = digest.Validate(); err != nil {
|
||||
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", blob).
|
||||
Msg("unable to parse digest")
|
||||
Msg("failed to parse digest")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -592,7 +595,7 @@ func (gc GarbageCollect) removeUnreferencedBlobs(repo string, delay time.Duratio
|
|||
canGC, err := isBlobOlderThan(gc.imgStore, repo, digest, delay, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("module", "gc").Str("repository", repo).Str("digest", blob).
|
||||
Msg("unable to determine GC delay")
|
||||
Msg("failed to determine GC delay")
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ func NewImageStore(rootDir string, cacheDir string, dedupe, commit bool, log zlo
|
|||
metrics monitoring.MetricServer, linter common.Lint, storeDriver storageTypes.Driver, cacheDriver cache.Cache,
|
||||
) storageTypes.ImageStore {
|
||||
if err := storeDriver.EnsureDir(rootDir); err != nil {
|
||||
log.Error().Err(err).Str("rootDir", rootDir).Msg("unable to create root dir")
|
||||
log.Error().Err(err).Str("rootDir", rootDir).Msg("failed to create root dir")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ func (is *ImageStore) initRepo(name string) error {
|
|||
repoDir := path.Join(is.rootDir, name)
|
||||
|
||||
if !utf8.ValidString(name) {
|
||||
is.log.Error().Msg("input is not valid UTF-8")
|
||||
is.log.Error().Msg("invalid UTF-8 input")
|
||||
|
||||
return zerr.ErrInvalidRepositoryName
|
||||
}
|
||||
|
@ -144,14 +144,14 @@ func (is *ImageStore) initRepo(name string) error {
|
|||
// create "blobs" subdir
|
||||
err := is.storeDriver.EnsureDir(path.Join(repoDir, "blobs"))
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("error creating blobs subdir")
|
||||
is.log.Error().Err(err).Str("repository", name).Str("dir", repoDir).Msg("failed to create blobs subdir")
|
||||
|
||||
return err
|
||||
}
|
||||
// create BlobUploadDir subdir
|
||||
err = is.storeDriver.EnsureDir(path.Join(repoDir, storageConstants.BlobUploadDir))
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("error creating blob upload subdir")
|
||||
is.log.Error().Err(err).Msg("failed to create blob upload subdir")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -163,13 +163,13 @@ func (is *ImageStore) initRepo(name string) error {
|
|||
|
||||
buf, err := json.Marshal(il)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("unable to marshal JSON")
|
||||
is.log.Error().Err(err).Msg("failed to marshal JSON")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := is.storeDriver.WriteFile(ilPath, buf); err != nil {
|
||||
is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file")
|
||||
is.log.Error().Err(err).Str("file", ilPath).Msg("failed to write file")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -183,13 +183,13 @@ func (is *ImageStore) initRepo(name string) error {
|
|||
|
||||
buf, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("unable to marshal JSON")
|
||||
is.log.Error().Err(err).Msg("failed to marshal JSON")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := is.storeDriver.WriteFile(indexPath, buf); err != nil {
|
||||
is.log.Error().Err(err).Str("file", ilPath).Msg("unable to write file")
|
||||
is.log.Error().Err(err).Str("file", ilPath).Msg("failed to write file")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ func (is *ImageStore) ValidateRepo(name string) (bool, error) {
|
|||
|
||||
files, err := is.storeDriver.List(dir)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("unable to read directory")
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("failed to read directory")
|
||||
|
||||
return false, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
@ -350,7 +350,7 @@ func (is *ImageStore) GetNextRepository(repo string) (string, error) {
|
|||
return "", nil
|
||||
}
|
||||
|
||||
is.log.Error().Err(err).Msg("failure walking storage root-dir")
|
||||
is.log.Error().Err(err).Str("root-dir", dir).Msg("failed to walk storage root-dir")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
@ -581,7 +581,7 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli
|
|||
manifestPath := path.Join(dir, mDigest.Encoded())
|
||||
|
||||
if _, err = is.storeDriver.WriteFile(manifestPath, body); err != nil {
|
||||
is.log.Error().Err(err).Str("file", manifestPath).Msg("unable to write")
|
||||
is.log.Error().Err(err).Str("file", manifestPath).Msg("failed to write")
|
||||
|
||||
return "", "", err
|
||||
}
|
||||
|
@ -600,7 +600,8 @@ func (is *ImageStore) PutImageManifest(repo, reference, mediaType string, //noli
|
|||
// apply linter only on images, not signatures
|
||||
pass, err := common.ApplyLinter(is, is.linter, repo, desc)
|
||||
if !pass {
|
||||
is.log.Error().Err(err).Str("repository", repo).Str("reference", reference).Msg("linter didn't pass")
|
||||
is.log.Error().Err(err).Str("repository", repo).Str("reference", reference).
|
||||
Msg("linter didn't pass")
|
||||
|
||||
return "", "", err
|
||||
}
|
||||
|
@ -678,7 +679,7 @@ func (is *ImageStore) deleteImageManifest(repo, reference string, detectCollisio
|
|||
}
|
||||
|
||||
if _, err := is.storeDriver.WriteFile(file, buf); err != nil {
|
||||
is.log.Debug().Str("reference", reference).Str("repo", repo).Msg("error while updating index.json")
|
||||
is.log.Debug().Str("reference", reference).Str("repository", repo).Msg("failed to update index.json")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -718,7 +719,7 @@ func (is *ImageStore) BlobUploadPath(repo, uuid string) string {
|
|||
// NewBlobUpload returns the unique ID for an upload in progress.
|
||||
func (is *ImageStore) NewBlobUpload(repo string) (string, error) {
|
||||
if err := is.InitRepo(repo); err != nil {
|
||||
is.log.Error().Err(err).Msg("error initializing repo")
|
||||
is.log.Error().Err(err).Msg("failed to initialize repo")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
@ -750,7 +751,7 @@ func (is *ImageStore) GetBlobUpload(repo, uuid string) (int64, error) {
|
|||
blobUploadPath := is.BlobUploadPath(repo, uuid)
|
||||
|
||||
if !utf8.ValidString(blobUploadPath) {
|
||||
is.log.Error().Msg("input is not valid UTF-8")
|
||||
is.log.Error().Msg("invalid UTF-8 input")
|
||||
|
||||
return -1, zerr.ErrInvalidRepositoryName
|
||||
}
|
||||
|
@ -900,7 +901,7 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig
|
|||
|
||||
err = is.storeDriver.EnsureDir(dir)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("error creating blobs/sha256 dir")
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("failed to create dir")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -916,14 +917,14 @@ func (is *ImageStore) FinishBlobUpload(repo, uuid string, body io.Reader, dstDig
|
|||
err = is.DedupeBlob(src, dstDigest, repo, dst)
|
||||
if err := inject.Error(err); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
||||
Str("dst", dst).Msg("unable to dedupe blob")
|
||||
Str("dst", dst).Msg("failed to dedupe blob")
|
||||
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := is.storeDriver.Move(src, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
||||
Str("dst", dst).Msg("unable to finish blob")
|
||||
Str("dst", dst).Msg("failed to finish blob")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -968,7 +969,7 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi
|
|||
|
||||
_, err = digester.Write(buf.Bytes())
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("digester failed to write")
|
||||
is.log.Error().Err(err).Str("component", "digester").Msg("failed to write")
|
||||
|
||||
return "", -1, err
|
||||
}
|
||||
|
@ -994,14 +995,14 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi
|
|||
if is.dedupe && fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
|
||||
if err := is.DedupeBlob(src, dstDigest, repo, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
||||
Str("dst", dst).Msg("unable to dedupe blob")
|
||||
Str("dst", dst).Msg("failed to dedupe blob")
|
||||
|
||||
return "", -1, err
|
||||
}
|
||||
} else {
|
||||
if err := is.storeDriver.Move(src, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Str("dstDigest", dstDigest.String()).
|
||||
Str("dst", dst).Msg("unable to finish blob")
|
||||
Str("dst", dst).Msg("failed to finish blob")
|
||||
|
||||
return "", -1, err
|
||||
}
|
||||
|
@ -1012,11 +1013,11 @@ func (is *ImageStore) FullBlobUpload(repo string, body io.Reader, dstDigest godi
|
|||
|
||||
func (is *ImageStore) DedupeBlob(src string, dstDigest godigest.Digest, dstRepo string, dst string) error {
|
||||
retry:
|
||||
is.log.Debug().Str("src", src).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: enter")
|
||||
is.log.Debug().Str("src", src).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe begin")
|
||||
|
||||
dstRecord, err := is.cache.GetBlob(dstDigest)
|
||||
if err := inject.Error(err); err != nil && !errors.Is(err, zerr.ErrCacheMiss) {
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to lookup blob record")
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Str("component", "dedupe").Msg("failed to lookup blob record")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1024,19 +1025,21 @@ retry:
|
|||
if dstRecord == "" {
|
||||
// cache record doesn't exist, so first disk and cache entry for this digest
|
||||
if err := is.cache.PutBlob(dstDigest, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record")
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Str("component", "dedupe").
|
||||
Msg("failed to insert blob record")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// move the blob from uploads to final dest
|
||||
if err := is.storeDriver.Move(src, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("dedupe: unable to rename blob")
|
||||
is.log.Error().Err(err).Str("src", src).Str("dst", dst).Str("component", "dedupe").
|
||||
Msg("failed to rename blob")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
is.log.Debug().Str("src", src).Str("dst", dst).Msg("dedupe: rename")
|
||||
is.log.Debug().Str("src", src).Str("dst", dst).Str("component", "dedupe").Msg("rename")
|
||||
} else {
|
||||
// cache record exists, but due to GC and upgrades from older versions,
|
||||
// disk content and cache records may go out of sync
|
||||
|
@ -1046,12 +1049,13 @@ retry:
|
|||
|
||||
blobInfo, err := is.storeDriver.Stat(dstRecord)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to stat")
|
||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Str("component", "dedupe").Msg("failed to stat")
|
||||
// the actual blob on disk may have been removed by GC, so sync the cache
|
||||
err := is.cache.DeleteBlob(dstDigest, dstRecord)
|
||||
if err = inject.Error(err); err != nil {
|
||||
//nolint:lll
|
||||
is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).Msg("dedupe: unable to delete blob record")
|
||||
is.log.Error().Err(err).Str("dstDigest", dstDigest.String()).Str("dst", dst).
|
||||
Str("component", "dedupe").Msg("failed to delete blob record")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1062,13 +1066,15 @@ retry:
|
|||
// prevent overwrite original blob
|
||||
if !is.storeDriver.SameFile(dst, dstRecord) {
|
||||
if err := is.storeDriver.Link(dstRecord, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to link blobs")
|
||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Str("component", "dedupe").
|
||||
Msg("failed to link blobs")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err := is.cache.PutBlob(dstDigest, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Msg("dedupe: unable to insert blob record")
|
||||
is.log.Error().Err(err).Str("blobPath", dst).Str("component", "dedupe").
|
||||
Msg("failed to insert blob record")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1078,12 +1084,13 @@ retry:
|
|||
// blob corrupted, replace content
|
||||
if desc.Size != blobInfo.Size() {
|
||||
if err := is.storeDriver.Move(src, dst); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Str("dst", dst).Msg("dedupe: unable to rename blob")
|
||||
is.log.Error().Err(err).Str("src", src).Str("dst", dst).Str("component", "dedupe").
|
||||
Msg("failed to rename blob")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
is.log.Debug().Str("src", src).Msg("dedupe: remove")
|
||||
is.log.Debug().Str("src", src).Str("component", "dedupe").Msg("remove")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1092,12 +1099,13 @@ retry:
|
|||
|
||||
// remove temp blobupload
|
||||
if err := is.storeDriver.Delete(src); err != nil {
|
||||
is.log.Error().Err(err).Str("src", src).Msg("dedupe: unable to remove blob")
|
||||
is.log.Error().Err(err).Str("src", src).Str("component", "dedupe").
|
||||
Msg("failed to remove blob")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
is.log.Debug().Str("src", src).Msg("dedupe: remove")
|
||||
is.log.Debug().Str("src", src).Str("component", "dedupe").Msg("remove")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -1119,7 +1127,7 @@ func (is *ImageStore) DeleteBlobUpload(repo, uuid string) error {
|
|||
defer writer.Close()
|
||||
|
||||
if err := writer.Cancel(); err != nil {
|
||||
is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("error deleting blob upload")
|
||||
is.log.Error().Err(err).Str("blobUploadPath", blobUploadPath).Msg("failed to delete blob upload")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1176,7 +1184,7 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6
|
|||
// Check blobs in cache
|
||||
dstRecord, err := is.checkCacheBlob(digest)
|
||||
if err != nil {
|
||||
is.log.Debug().Err(err).Str("digest", digest.String()).Msg("cache: not found")
|
||||
is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache")
|
||||
|
||||
return false, -1, zerr.ErrBlobNotFound
|
||||
}
|
||||
|
@ -1188,7 +1196,7 @@ func (is *ImageStore) CheckBlob(repo string, digest godigest.Digest) (bool, int6
|
|||
|
||||
// put deduped blob in cache
|
||||
if err := is.cache.PutBlob(digest, blobPath); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("dedupe: unable to insert blob record")
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Str("component", "dedupe").Msg("failed to insert blob record")
|
||||
|
||||
return false, -1, err
|
||||
}
|
||||
|
@ -1222,7 +1230,7 @@ func (is *ImageStore) StatBlob(repo string, digest godigest.Digest) (bool, int64
|
|||
// Check blobs in cache
|
||||
dstRecord, err := is.checkCacheBlob(digest)
|
||||
if err != nil {
|
||||
is.log.Debug().Err(err).Str("digest", digest.String()).Msg("cache: not found")
|
||||
is.log.Warn().Err(err).Str("digest", digest.String()).Msg("not found in cache")
|
||||
|
||||
return false, -1, time.Time{}, zerr.ErrBlobNotFound
|
||||
}
|
||||
|
@ -1261,7 +1269,7 @@ func (is *ImageStore) checkCacheBlob(digest godigest.Digest) (string, error) {
|
|||
// the actual blob on disk may have been removed by GC, so sync the cache
|
||||
if err := is.cache.DeleteBlob(digest, dstRecord); err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", dstRecord).
|
||||
Msg("unable to remove blob path from cache")
|
||||
Msg("failed to remove blob path from cache")
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
@ -1269,14 +1277,15 @@ func (is *ImageStore) checkCacheBlob(digest godigest.Digest) (string, error) {
|
|||
return "", zerr.ErrBlobNotFound
|
||||
}
|
||||
|
||||
is.log.Debug().Str("digest", digest.String()).Str("dstRecord", dstRecord).Msg("cache: found dedupe record")
|
||||
is.log.Debug().Str("digest", digest.String()).Str("dstRecord", dstRecord).Str("component", "cache").
|
||||
Msg("found dedupe record")
|
||||
|
||||
return dstRecord, nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) copyBlob(repo string, blobPath, dstRecord string) (int64, error) {
|
||||
if err := is.initRepo(repo); err != nil {
|
||||
is.log.Error().Err(err).Str("repository", repo).Msg("unable to initialize an empty repo")
|
||||
is.log.Error().Err(err).Str("repository", repo).Msg("failed to initialize an empty repo")
|
||||
|
||||
return -1, err
|
||||
}
|
||||
|
@ -1284,7 +1293,8 @@ func (is *ImageStore) copyBlob(repo string, blobPath, dstRecord string) (int64,
|
|||
_ = is.storeDriver.EnsureDir(filepath.Dir(blobPath))
|
||||
|
||||
if err := is.storeDriver.Link(dstRecord, blobPath); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Str("link", dstRecord).Msg("dedupe: unable to hard link")
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Str("link", dstRecord).Str("component", "dedupe").
|
||||
Msg("failed to hard link")
|
||||
|
||||
return -1, zerr.ErrBlobNotFound
|
||||
}
|
||||
|
@ -1325,7 +1335,7 @@ func (is *ImageStore) GetBlobPartial(repo string, digest godigest.Digest, mediaT
|
|||
// Check blobs in cache
|
||||
blobPath, err = is.checkCacheBlob(digest)
|
||||
if err != nil {
|
||||
is.log.Debug().Err(err).Str("digest", digest.String()).Msg("cache: not found")
|
||||
is.log.Debug().Err(err).Str("digest", digest.String()).Msg("not found in cache")
|
||||
|
||||
return nil, -1, -1, zerr.ErrBlobNotFound
|
||||
}
|
||||
|
@ -1395,7 +1405,7 @@ func (is *ImageStore) GetBlob(repo string, digest godigest.Digest, mediaType str
|
|||
// Check blobs in cache
|
||||
dstRecord, err := is.checkCacheBlob(digest)
|
||||
if err != nil {
|
||||
is.log.Debug().Err(err).Str("digest", digest.String()).Msg("cache: not found")
|
||||
is.log.Debug().Err(err).Str("digest", digest.String()).Msg("not found in cache")
|
||||
|
||||
return nil, -1, zerr.ErrBlobNotFound
|
||||
}
|
||||
|
@ -1448,7 +1458,7 @@ func (is *ImageStore) GetBlobContent(repo string, digest godigest.Digest) ([]byt
|
|||
// Check blobs in cache
|
||||
dstRecord, err := is.checkCacheBlob(digest)
|
||||
if err != nil {
|
||||
is.log.Debug().Err(err).Str("digest", digest.String()).Msg("cache: not found")
|
||||
is.log.Debug().Err(err).Str("digest", digest.String()).Msg("not found in cache")
|
||||
|
||||
return nil, zerr.ErrBlobNotFound
|
||||
}
|
||||
|
@ -1493,7 +1503,7 @@ func (is *ImageStore) GetIndexContent(repo string) ([]byte, error) {
|
|||
buf, err := is.storeDriver.ReadFile(path.Join(dir, "index.json"))
|
||||
if err != nil {
|
||||
if errors.Is(err, driver.PathNotFoundError{}) {
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("index.json doesn't exist")
|
||||
is.log.Error().Err(err).Str("dir", dir).Msg("failed to read index.json")
|
||||
|
||||
return []byte{}, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
@ -1512,7 +1522,7 @@ func (is *ImageStore) StatIndex(repo string) (bool, int64, time.Time, error) {
|
|||
fileInfo, err := is.storeDriver.Stat(repoIndexPath)
|
||||
if err != nil {
|
||||
if errors.As(err, &driver.PathNotFoundError{}) {
|
||||
is.log.Error().Err(err).Str("indexFile", repoIndexPath).Msg("index.json doesn't exist")
|
||||
is.log.Error().Err(err).Str("indexFile", repoIndexPath).Msg("failed to stat index.json")
|
||||
|
||||
return false, 0, time.Time{}, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
@ -1532,13 +1542,13 @@ func (is *ImageStore) PutIndexContent(repo string, index ispec.Index) error {
|
|||
|
||||
buf, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("file", indexPath).Msg("unable to marshal JSON")
|
||||
is.log.Error().Err(err).Str("file", indexPath).Msg("failed to marshal JSON")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = is.storeDriver.WriteFile(indexPath, buf); err != nil {
|
||||
is.log.Error().Err(err).Str("file", indexPath).Msg("unable to write")
|
||||
is.log.Error().Err(err).Str("file", indexPath).Msg("failed to write")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1578,14 +1588,14 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe
|
|||
continue
|
||||
}
|
||||
|
||||
is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("unable to delete manifest")
|
||||
is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("failed to delete manifest")
|
||||
|
||||
return count, err
|
||||
}
|
||||
|
||||
count++
|
||||
} else {
|
||||
is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("unable to delete blob")
|
||||
is.log.Error().Err(err).Str("repository", repo).Str("digest", digest.String()).Msg("failed to delete blob")
|
||||
|
||||
return count, err
|
||||
}
|
||||
|
@ -1596,7 +1606,7 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe
|
|||
|
||||
blobUploads, err := is.storeDriver.List(path.Join(is.RootDir(), repo, storageConstants.BlobUploadDir))
|
||||
if err != nil {
|
||||
is.log.Debug().Str("repository", repo).Msg("unable to list .uploads/ dir")
|
||||
is.log.Debug().Str("repository", repo).Msg("failed to list .uploads/ dir")
|
||||
}
|
||||
|
||||
// if removeRepo flag is true and we cleanup all blobs and there are no blobs currently being uploaded.
|
||||
|
@ -1604,7 +1614,7 @@ func (is *ImageStore) CleanupRepo(repo string, blobs []godigest.Digest, removeRe
|
|||
is.log.Info().Str("repository", repo).Msg("removed all blobs, removing repo")
|
||||
|
||||
if err := is.storeDriver.Delete(path.Join(is.rootDir, repo)); err != nil {
|
||||
is.log.Error().Err(err).Str("repository", repo).Msg("unable to remove repo")
|
||||
is.log.Error().Err(err).Str("repository", repo).Msg("failed to remove repo")
|
||||
|
||||
return count, err
|
||||
}
|
||||
|
@ -1631,7 +1641,8 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error {
|
|||
if fmt.Sprintf("%v", is.cache) != fmt.Sprintf("%v", nil) {
|
||||
dstRecord, err := is.cache.GetBlob(digest)
|
||||
if err != nil && !errors.Is(err, zerr.ErrCacheMiss) {
|
||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to lookup blob record")
|
||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Str("component", "dedupe").
|
||||
Msg("failed to lookup blob record")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1640,7 +1651,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error {
|
|||
if ok := is.cache.HasBlob(digest, blobPath); ok {
|
||||
if err := is.cache.DeleteBlob(digest, blobPath); err != nil {
|
||||
is.log.Error().Err(err).Str("digest", digest.String()).Str("blobPath", blobPath).
|
||||
Msg("unable to remove blob path from cache")
|
||||
Msg("failed to remove blob path from cache")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1651,7 +1662,8 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error {
|
|||
// get next candidate
|
||||
dstRecord, err := is.cache.GetBlob(digest)
|
||||
if err != nil && !errors.Is(err, zerr.ErrCacheMiss) {
|
||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Msg("dedupe: unable to lookup blob record")
|
||||
is.log.Error().Err(err).Str("blobPath", dstRecord).Str("component", "dedupe").
|
||||
Msg("failed to lookup blob record")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1662,14 +1674,16 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error {
|
|||
(in case of filesystem, this should not be needed */
|
||||
binfo, err := is.storeDriver.Stat(dstRecord)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: failed to stat blob")
|
||||
is.log.Error().Err(err).Str("path", blobPath).Str("component", "dedupe").
|
||||
Msg("failed to stat blob")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if binfo.Size() == 0 {
|
||||
if err := is.storeDriver.Move(blobPath, dstRecord); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path")
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Str("component", "dedupe").
|
||||
Msg("failed to remove blob path")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1681,7 +1695,7 @@ func (is *ImageStore) deleteBlob(repo string, digest godigest.Digest) error {
|
|||
}
|
||||
|
||||
if err := is.storeDriver.Delete(blobPath); err != nil {
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("unable to remove blob path")
|
||||
is.log.Error().Err(err).Str("blobPath", blobPath).Msg("failed to remove blob path")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1787,7 +1801,7 @@ func (is *ImageStore) getOriginalBlobFromDisk(duplicateBlobs []string) (string,
|
|||
for _, blobPath := range duplicateBlobs {
|
||||
binfo, err := is.storeDriver.Stat(blobPath)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: failed to stat blob")
|
||||
is.log.Error().Err(err).Str("path", blobPath).Str("component", "storage").Msg("failed to stat blob")
|
||||
|
||||
return "", zerr.ErrBlobNotFound
|
||||
}
|
||||
|
@ -1807,14 +1821,14 @@ func (is *ImageStore) getOriginalBlob(digest godigest.Digest, duplicateBlobs []s
|
|||
|
||||
originalBlob, err = is.checkCacheBlob(digest)
|
||||
if err != nil && !errors.Is(err, zerr.ErrBlobNotFound) && !errors.Is(err, zerr.ErrCacheMiss) {
|
||||
is.log.Error().Err(err).Msg("rebuild dedupe: unable to find blob in cache")
|
||||
is.log.Error().Err(err).Str("component", "dedupe").Msg("failed to find blob in cache")
|
||||
|
||||
return originalBlob, err
|
||||
}
|
||||
|
||||
// if we still don't have, search it
|
||||
if originalBlob == "" {
|
||||
is.log.Warn().Msg("rebuild dedupe: failed to find blob in cache, searching it in storage...")
|
||||
is.log.Warn().Str("component", "dedupe").Msg("failed to find blob in cache, searching it in storage...")
|
||||
// a rebuild dedupe was attempted in the past
|
||||
// get original blob, should be found otherwise exit with error
|
||||
|
||||
|
@ -1824,19 +1838,19 @@ func (is *ImageStore) getOriginalBlob(digest godigest.Digest, duplicateBlobs []s
|
|||
}
|
||||
}
|
||||
|
||||
is.log.Info().Str("originalBlob", originalBlob).Msg("rebuild dedupe: found original blob")
|
||||
is.log.Info().Str("originalBlob", originalBlob).Str("component", "dedupe").Msg("found original blob")
|
||||
|
||||
return originalBlob, nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) dedupeBlobs(ctx context.Context, digest godigest.Digest, duplicateBlobs []string) error {
|
||||
if fmt.Sprintf("%v", is.cache) == fmt.Sprintf("%v", nil) {
|
||||
is.log.Error().Err(zerr.ErrDedupeRebuild).Msg("no cache driver found, can not dedupe blobs")
|
||||
is.log.Error().Err(zerr.ErrDedupeRebuild).Msg("failed to dedupe blobs, no cache driver found")
|
||||
|
||||
return zerr.ErrDedupeRebuild
|
||||
}
|
||||
|
||||
is.log.Info().Str("digest", digest.String()).Msg("rebuild dedupe: deduping blobs for digest")
|
||||
is.log.Info().Str("digest", digest.String()).Str("component", "dedupe").Msg("deduping blobs for digest")
|
||||
|
||||
var originalBlob string
|
||||
|
||||
|
@ -1848,19 +1862,19 @@ func (is *ImageStore) dedupeBlobs(ctx context.Context, digest godigest.Digest, d
|
|||
|
||||
binfo, err := is.storeDriver.Stat(blobPath)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: failed to stat blob")
|
||||
is.log.Error().Err(err).Str("path", blobPath).Str("component", "dedupe").Msg("failed to stat blob")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if binfo.Size() == 0 {
|
||||
is.log.Warn().Msg("rebuild dedupe: found file without content, trying to find the original blob")
|
||||
is.log.Warn().Str("component", "dedupe").Msg("found file without content, trying to find the original blob")
|
||||
// a rebuild dedupe was attempted in the past
|
||||
// get original blob, should be found otherwise exit with error
|
||||
if originalBlob == "" {
|
||||
originalBlob, err = is.getOriginalBlob(digest, duplicateBlobs)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("rebuild dedupe: unable to find original blob")
|
||||
is.log.Error().Err(err).Str("component", "dedupe").Msg("failed to find original blob")
|
||||
|
||||
return zerr.ErrDedupeRebuild
|
||||
}
|
||||
|
@ -1883,7 +1897,7 @@ func (is *ImageStore) dedupeBlobs(ctx context.Context, digest godigest.Digest, d
|
|||
// if we have an original blob cached then we can safely dedupe the rest of them
|
||||
if originalBlob != "" {
|
||||
if err := is.storeDriver.Link(originalBlob, blobPath); err != nil {
|
||||
is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: unable to dedupe blob")
|
||||
is.log.Error().Err(err).Str("path", blobPath).Str("component", "dedupe").Msg("failed to dedupe blob")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1901,18 +1915,19 @@ func (is *ImageStore) dedupeBlobs(ctx context.Context, digest godigest.Digest, d
|
|||
}
|
||||
}
|
||||
|
||||
is.log.Info().Str("digest", digest.String()).Msg("rebuild dedupe: deduping blobs for digest finished successfully")
|
||||
is.log.Info().Str("digest", digest.String()).Str("component", "dedupe").
|
||||
Msg("deduping blobs for digest finished successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *ImageStore) restoreDedupedBlobs(ctx context.Context, digest godigest.Digest, duplicateBlobs []string) error {
|
||||
is.log.Info().Str("digest", digest.String()).Msg("rebuild dedupe: restoring deduped blobs for digest")
|
||||
is.log.Info().Str("digest", digest.String()).Str("component", "dedupe").Msg("restoring deduped blobs for digest")
|
||||
|
||||
// first we need to find the original blob, either in cache or by checking each blob size
|
||||
originalBlob, err := is.getOriginalBlob(digest, duplicateBlobs)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Msg("rebuild dedupe: unable to find original blob")
|
||||
is.log.Error().Err(err).Str("component", "dedupe").Msg("failed to find original blob")
|
||||
|
||||
return zerr.ErrDedupeRebuild
|
||||
}
|
||||
|
@ -1924,7 +1939,7 @@ func (is *ImageStore) restoreDedupedBlobs(ctx context.Context, digest godigest.D
|
|||
|
||||
binfo, err := is.storeDriver.Stat(blobPath)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("path", blobPath).Msg("rebuild dedupe: failed to stat blob")
|
||||
is.log.Error().Err(err).Str("path", blobPath).Str("component", "dedupe").Msg("failed to stat blob")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1934,7 +1949,8 @@ func (is *ImageStore) restoreDedupedBlobs(ctx context.Context, digest godigest.D
|
|||
// move content from original blob to deduped one
|
||||
buf, err := is.storeDriver.ReadFile(originalBlob)
|
||||
if err != nil {
|
||||
is.log.Error().Err(err).Str("path", originalBlob).Msg("rebuild dedupe: failed to get original blob content")
|
||||
is.log.Error().Err(err).Str("path", originalBlob).Str("component", "dedupe").
|
||||
Msg("failed to get original blob content")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -1947,7 +1963,7 @@ func (is *ImageStore) restoreDedupedBlobs(ctx context.Context, digest godigest.D
|
|||
}
|
||||
|
||||
is.log.Info().Str("digest", digest.String()).
|
||||
Msg("rebuild dedupe: restoring deduped blobs for digest finished successfully")
|
||||
Str("component", "dedupe").Msg("restoring deduped blobs for digest finished successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2052,7 +2052,7 @@ func TestGarbageCollectForImageStore(t *testing.T) {
|
|||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring,
|
||||
fmt.Sprintf("error while running GC for %s", path.Join(imgStore.RootDir(), repoName)))
|
||||
fmt.Sprintf("failed to run GC for %s", path.Join(imgStore.RootDir(), repoName)))
|
||||
})
|
||||
|
||||
Convey("Garbage collect error - not enough permissions to access index.json", func() {
|
||||
|
@ -2094,7 +2094,7 @@ func TestGarbageCollectForImageStore(t *testing.T) {
|
|||
data, err := os.ReadFile(logFile.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldContainSubstring,
|
||||
fmt.Sprintf("error while running GC for %s", path.Join(imgStore.RootDir(), repoName)))
|
||||
fmt.Sprintf("failed to run GC for %s", path.Join(imgStore.RootDir(), repoName)))
|
||||
So(os.Chmod(path.Join(dir, repoName, "index.json"), 0o755), ShouldBeNil)
|
||||
})
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
actual := strings.TrimSpace(str)
|
||||
So(actual, ShouldContainSubstring, "REPOSITORY TAG STATUS AFFECTED BLOB ERROR")
|
||||
// verify error message
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s blob: not found", manifestDig))
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s blob not found", manifestDig))
|
||||
|
||||
index, err := common.GetIndex(imgStore, repoName, log)
|
||||
So(err, ShouldBeNil)
|
||||
|
@ -180,7 +180,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
|
||||
imageRes := storage.CheckLayers(context.Background(), repoName, tag, manifestDescriptor, imgStore)
|
||||
So(imageRes.Status, ShouldEqual, "affected")
|
||||
So(imageRes.Error, ShouldEqual, "blob: not found")
|
||||
So(imageRes.Error, ShouldEqual, "blob not found")
|
||||
|
||||
_, err = driver.WriteFile(manifestFile, []byte("invalid content"))
|
||||
So(err, ShouldBeNil)
|
||||
|
@ -195,7 +195,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
actual = strings.TrimSpace(str)
|
||||
So(actual, ShouldContainSubstring, "REPOSITORY TAG STATUS AFFECTED BLOB ERROR")
|
||||
// verify error message
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s manifest: invalid contents", manifestDig))
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s invalid manifest content", manifestDig))
|
||||
|
||||
index, err = common.GetIndex(imgStore, repoName, log)
|
||||
So(err, ShouldBeNil)
|
||||
|
@ -205,7 +205,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
|
||||
imageRes = storage.CheckLayers(context.Background(), repoName, tag, manifestDescriptor, imgStore)
|
||||
So(imageRes.Status, ShouldEqual, "affected")
|
||||
So(imageRes.Error, ShouldEqual, "manifest: invalid contents")
|
||||
So(imageRes.Error, ShouldEqual, "invalid manifest content")
|
||||
})
|
||||
|
||||
Convey("Config integrity affected", func() {
|
||||
|
@ -235,7 +235,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
actual := strings.TrimSpace(str)
|
||||
So(actual, ShouldContainSubstring, "REPOSITORY TAG STATUS AFFECTED BLOB ERROR")
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s blob: not found", configDig))
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s blob not found", configDig))
|
||||
|
||||
_, err = driver.WriteFile(configFile, []byte("invalid content"))
|
||||
So(err, ShouldBeNil)
|
||||
|
@ -249,7 +249,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
str = space.ReplaceAllString(buff.String(), " ")
|
||||
actual = strings.TrimSpace(str)
|
||||
So(actual, ShouldContainSubstring, "REPOSITORY TAG STATUS AFFECTED BLOB ERROR")
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s config: invalid config", configDig))
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s invalid server config", configDig))
|
||||
})
|
||||
|
||||
Convey("Layers integrity affected", func() {
|
||||
|
@ -279,7 +279,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
actual := strings.TrimSpace(str)
|
||||
So(actual, ShouldContainSubstring, "REPOSITORY TAG STATUS AFFECTED BLOB ERROR")
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s blob: bad blob digest", layerDig))
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s bad blob digest", layerDig))
|
||||
})
|
||||
|
||||
Convey("Layer not found", func() {
|
||||
|
@ -307,7 +307,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
|
||||
imageRes := storage.CheckLayers(context.Background(), repoName, tag, manifestDescriptor, imgStore)
|
||||
So(imageRes.Status, ShouldEqual, "affected")
|
||||
So(imageRes.Error, ShouldEqual, "blob: not found")
|
||||
So(imageRes.Error, ShouldEqual, "blob not found")
|
||||
|
||||
buff := bytes.NewBufferString("")
|
||||
|
||||
|
@ -319,7 +319,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
actual := strings.TrimSpace(str)
|
||||
So(actual, ShouldContainSubstring, "REPOSITORY TAG STATUS AFFECTED BLOB ERROR")
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s blob: not found", layerDig))
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s blob not found", layerDig))
|
||||
})
|
||||
|
||||
Convey("Scrub index", func() {
|
||||
|
@ -487,7 +487,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
str := space.ReplaceAllString(buff.String(), " ")
|
||||
actual := strings.TrimSpace(str)
|
||||
So(actual, ShouldContainSubstring, "REPOSITORY TAG STATUS AFFECTED BLOB ERROR")
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s blob: not found", manifestDig))
|
||||
So(actual, ShouldContainSubstring, fmt.Sprintf("test 1.0 affected %s blob not found", manifestDig))
|
||||
|
||||
index, err := common.GetIndex(imgStore, repoName, log)
|
||||
So(err, ShouldBeNil)
|
||||
|
@ -497,7 +497,7 @@ func RunCheckAllBlobsIntegrityTests( //nolint: thelper
|
|||
|
||||
imageRes := storage.CheckLayers(context.Background(), repoName, tag, manifestDescriptor, imgStore)
|
||||
So(imageRes.Status, ShouldEqual, "affected")
|
||||
So(imageRes.Error, ShouldContainSubstring, "blob: not found")
|
||||
So(imageRes.Error, ShouldContainSubstring, "blob not found")
|
||||
})
|
||||
|
||||
Convey("use the result of an already scrubed manifest which is the subject of the current manifest", func() {
|
||||
|
|
|
@ -28,7 +28,8 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer
|
|||
|
||||
if config.Storage.RootDirectory == "" {
|
||||
// we can't proceed without global storage
|
||||
log.Error().Err(zerr.ErrImgStoreNotFound).Msg("controller: no storage config provided")
|
||||
log.Error().Err(zerr.ErrImgStoreNotFound).Str("component", "controller").
|
||||
Msg("no storage config provided")
|
||||
|
||||
return storeController, zerr.ErrImgStoreNotFound
|
||||
}
|
||||
|
@ -69,7 +70,7 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer
|
|||
// Init a Storager from connection string.
|
||||
store, err := factory.Create(storeName, config.Storage.StorageDriver)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("rootDir", config.Storage.RootDirectory).Msg("unable to create s3 service")
|
||||
log.Error().Err(err).Str("rootDir", config.Storage.RootDirectory).Msg("failed to create s3 service")
|
||||
|
||||
return storeController, err
|
||||
}
|
||||
|
@ -101,7 +102,7 @@ func New(config *config.Config, linter common.Lint, metrics monitoring.MetricSer
|
|||
//nolint: contextcheck
|
||||
subImageStore, err := getSubStore(config, subPaths, linter, metrics, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("controller: error getting sub image store")
|
||||
log.Error().Err(err).Str("component", "controller").Msg("failed to get sub image store")
|
||||
|
||||
return storeController, err
|
||||
}
|
||||
|
@ -138,7 +139,8 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig,
|
|||
isSame, _ := config.SameFile(cfg.Storage.RootDirectory, storageConfig.RootDirectory)
|
||||
|
||||
if isSame {
|
||||
log.Error().Err(zerr.ErrBadConfig).Msg("sub path storage directory is same as root directory")
|
||||
log.Error().Err(zerr.ErrBadConfig).
|
||||
Msg("invalid sub path storage directory, it must be different to the root directory")
|
||||
|
||||
return nil, zerr.ErrBadConfig
|
||||
}
|
||||
|
@ -183,7 +185,7 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig,
|
|||
// Init a Storager from connection string.
|
||||
store, err := factory.Create(storeName, storageConfig.StorageDriver)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("Unable to create s3 service")
|
||||
log.Error().Err(err).Str("rootDir", storageConfig.RootDirectory).Msg("failed to create s3 service")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
@ -198,7 +200,7 @@ func getSubStore(cfg *config.Config, subPaths map[string]config.StorageConfig,
|
|||
cacheDriver, err := CreateCacheDatabaseDriver(storageConfig, log)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Any("config", storageConfig).
|
||||
Msg("failed creating storage driver")
|
||||
Msg("failed to create storage driver")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
var ErrNoGoModFileFound = errors.New("test: no go.mod file found in parent directories")
|
||||
var ErrNoGoModFileFound = errors.New("no go.mod file found in parent directories")
|
||||
|
||||
func GetProjectRootDir() (string, error) {
|
||||
workDir, err := os.Getwd()
|
||||
|
|
|
@ -132,7 +132,7 @@ func TestReadLogFileAndSearchString(t *testing.T) {
|
|||
|
||||
Convey("Invalid path", t, func() {
|
||||
_, err = tcommon.ReadLogFileAndSearchString("invalidPath",
|
||||
"DB update completed, next update scheduled", 1*time.Second)
|
||||
"cve-db update completed, next update scheduled after interval", 1*time.Second)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
|
@ -159,7 +159,7 @@ func TestReadLogFileAndCountStringOccurence(t *testing.T) {
|
|||
|
||||
Convey("Invalid path", t, func() {
|
||||
_, err = tcommon.ReadLogFileAndCountStringOccurence("invalidPath",
|
||||
"DB update completed, next update scheduled", 1*time.Second, 1)
|
||||
"cve-db update completed, next update scheduled after interval", 1*time.Second, 1)
|
||||
So(err, ShouldNotBeNil)
|
||||
})
|
||||
|
||||
|
|
|
@ -108,12 +108,12 @@ func (olu BaseOciLayoutUtils) GetImageManifests(repo string) ([]ispec.Descriptor
|
|||
buf, err := imageStore.GetIndexContent(repo)
|
||||
if err != nil {
|
||||
if goerrors.Is(zerr.ErrRepoNotFound, err) {
|
||||
olu.Log.Error().Err(err).Msg("index.json doesn't exist")
|
||||
olu.Log.Error().Err(err).Msg("failed to get index.json contents because the file is missing")
|
||||
|
||||
return nil, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
||||
olu.Log.Error().Err(err).Msg("unable to open index.json")
|
||||
olu.Log.Error().Err(err).Msg("failed to open index.json")
|
||||
|
||||
return nil, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
@ -121,7 +121,8 @@ func (olu BaseOciLayoutUtils) GetImageManifests(repo string) ([]ispec.Descriptor
|
|||
var index ispec.Index
|
||||
|
||||
if err := json.Unmarshal(buf, &index); err != nil {
|
||||
olu.Log.Error().Err(err).Str("dir", path.Join(imageStore.RootDir(), repo)).Msg("invalid JSON")
|
||||
olu.Log.Error().Err(err).Str("dir", path.Join(imageStore.RootDir(), repo)).
|
||||
Msg("failed to unmarshal json")
|
||||
|
||||
return nil, zerr.ErrRepoNotFound
|
||||
}
|
||||
|
@ -141,13 +142,13 @@ func (olu BaseOciLayoutUtils) GetImageBlobManifest(repo string, digest godigest.
|
|||
|
||||
blobBuf, err := imageStore.GetBlobContent(repo, digest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("unable to open image metadata file")
|
||||
olu.Log.Error().Err(err).Msg("failed to open image metadata file")
|
||||
|
||||
return blobIndex, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(blobBuf, &blobIndex); err != nil {
|
||||
olu.Log.Error().Err(err).Msg("unable to marshal blob index")
|
||||
olu.Log.Error().Err(err).Msg("failed to marshal blob index")
|
||||
|
||||
return blobIndex, err
|
||||
}
|
||||
|
@ -167,13 +168,13 @@ func (olu BaseOciLayoutUtils) GetImageInfo(repo string, configDigest godigest.Di
|
|||
|
||||
blobBuf, err := imageStore.GetBlobContent(repo, configDigest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("unable to open image layers file")
|
||||
olu.Log.Error().Err(err).Msg("failed to open image layers file")
|
||||
|
||||
return imageInfo, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(blobBuf, &imageInfo); err != nil {
|
||||
olu.Log.Error().Err(err).Msg("unable to marshal blob index")
|
||||
olu.Log.Error().Err(err).Msg("failed to marshal blob index")
|
||||
|
||||
return imageInfo, err
|
||||
}
|
||||
|
@ -187,7 +188,7 @@ func (olu BaseOciLayoutUtils) GetImageTagsWithTimestamp(repo string) ([]cvemodel
|
|||
|
||||
manifests, err := olu.GetImageManifests(repo)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("unable to read image manifests")
|
||||
olu.Log.Error().Err(err).Msg("failed to read image manifests")
|
||||
|
||||
return tagsInfo, err
|
||||
}
|
||||
|
@ -199,14 +200,14 @@ func (olu BaseOciLayoutUtils) GetImageTagsWithTimestamp(repo string) ([]cvemodel
|
|||
if ok {
|
||||
imageBlobManifest, err := olu.GetImageBlobManifest(repo, digest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("unable to read image blob manifest")
|
||||
olu.Log.Error().Err(err).Msg("failed to read image blob manifest")
|
||||
|
||||
return tagsInfo, err
|
||||
}
|
||||
|
||||
imageInfo, err := olu.GetImageInfo(repo, imageBlobManifest.Config.Digest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("unable to read image info")
|
||||
olu.Log.Error().Err(err).Msg("failed to read image info")
|
||||
|
||||
return tagsInfo, err
|
||||
}
|
||||
|
@ -327,7 +328,7 @@ func (olu BaseOciLayoutUtils) GetImageManifestSize(repo string, manifestDigest g
|
|||
|
||||
manifestBlob, err := imageStore.GetBlobContent(repo, manifestDigest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("error when getting manifest blob content")
|
||||
olu.Log.Error().Err(err).Msg("failed to get manifest blob content")
|
||||
|
||||
return int64(len(manifestBlob))
|
||||
}
|
||||
|
@ -338,7 +339,7 @@ func (olu BaseOciLayoutUtils) GetImageManifestSize(repo string, manifestDigest g
|
|||
func (olu BaseOciLayoutUtils) GetImageConfigSize(repo string, manifestDigest godigest.Digest) int64 {
|
||||
imageBlobManifest, err := olu.GetImageBlobManifest(repo, manifestDigest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("can't get image blob manifest")
|
||||
olu.Log.Error().Err(err).Msg("failed to get image blob manifest")
|
||||
|
||||
return 0
|
||||
}
|
||||
|
@ -368,14 +369,14 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(repoName string) (common.RepoI
|
|||
|
||||
manifestList, err := olu.GetImageManifests(repoName)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("error getting image manifests")
|
||||
olu.Log.Error().Err(err).Msg("failed to get image manifests")
|
||||
|
||||
return common.RepoInfo{}, err
|
||||
}
|
||||
|
||||
lastUpdatedTag, err := olu.GetRepoLastUpdated(repoName)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Str("repository", repoName).Msg("can't get last updated manifest for repo")
|
||||
olu.Log.Error().Err(err).Str("repository", repoName).Msg("failed to get last updated manifest for repo")
|
||||
|
||||
return common.RepoInfo{}, err
|
||||
}
|
||||
|
@ -398,7 +399,7 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(repoName string) (common.RepoI
|
|||
|
||||
manifest, err := olu.GetImageBlobManifest(repoName, man.Digest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Msg("error getting image manifest blob")
|
||||
olu.Log.Error().Err(err).Msg("failed to get image manifest blob")
|
||||
|
||||
return common.RepoInfo{}, err
|
||||
}
|
||||
|
@ -415,7 +416,7 @@ func (olu BaseOciLayoutUtils) GetExpandedRepoInfo(repoName string) (common.RepoI
|
|||
imageConfigInfo, err := olu.GetImageConfigInfo(repoName, man.Digest)
|
||||
if err != nil {
|
||||
olu.Log.Error().Err(err).Str("repository", repoName).Str("manifest digest", man.Digest.String()).
|
||||
Msg("can't retrieve config info for the image")
|
||||
Msg("failed to retrieve config info for the image")
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -580,7 +581,7 @@ func (olu BaseOciLayoutUtils) ExtractImageDetails(
|
|||
) {
|
||||
manifest, dig, err := olu.GetImageManifest(repo, tag)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Could not retrieve image ispec manifest")
|
||||
log.Error().Err(err).Msg("failed to retrieve image manifest")
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
@ -589,7 +590,7 @@ func (olu BaseOciLayoutUtils) ExtractImageDetails(
|
|||
|
||||
imageConfig, err := olu.GetImageConfigInfo(repo, digest)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Could not retrieve image config")
|
||||
log.Error().Err(err).Msg("failed to retrieve image config")
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
|
95
scripts/check_logs.sh
Executable file
95
scripts/check_logs.sh
Executable file
|
@ -0,0 +1,95 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Colors for terminal
|
||||
if test -t 1; then
|
||||
# check if it supports colors
|
||||
ncolors=$(tput colors)
|
||||
if test -n "$ncolors" && test $ncolors -ge 8; then
|
||||
NC="$(tput sgr0)" # no color
|
||||
RED="$(tput setaf 1)" # red
|
||||
WHITE="$(tput setaf 7)" # white
|
||||
fi
|
||||
fi
|
||||
|
||||
exception="(HTTP|OpenID|OAuth|TLS|API|ID)"
|
||||
|
||||
# the "nolint: check-logs" comment should be places on the last line that the linter matches
|
||||
exclude_linter="(?!.*//nolint: check-logs)"
|
||||
|
||||
function lintLogContainingUpperCase {
|
||||
word_char="[\.-0-9a-z ]"
|
||||
capital_word="([a-z]*[A-Z][a-zA-Z]*)"
|
||||
|
||||
grep --with-filename -n -P "Msg[f]?\(\"(($word_char|$exception)*)(?!$exception)($capital_word)($exclude_linter)" $1
|
||||
}
|
||||
|
||||
# lintLogStartingWithUpperCase searched for log messages that start with an upper case letter
|
||||
function lintLogStartingWithUpperCase {
|
||||
grep --with-filename -n "Msg[f]\?(\"[A-Z]" $1 | grep -v -P "Msg[f]?\(\"$exception($exclude_linter)"
|
||||
}
|
||||
|
||||
# lintLogStartingWithComponent searches for log messages that starts with a component "component:"
|
||||
function lintLogStartingWithComponent {
|
||||
# We'll check for different functions that can generate errors or logs. If they start with
|
||||
# a number words followed by ":", it's considered as starting with a component.
|
||||
# Examples: '.Msgf("component:")', '.Errorf("com ponent:")', '.Msg("com-ponent:")'
|
||||
grep --with-filename -n -E "(Errorf|errors.New|Msg[f]?)\(\"[a-zA-Z-]+( [a-zA-Z-]+){0,1}:($exclude_linter)" $1
|
||||
}
|
||||
|
||||
# lintErrorLogsBeggining searches for log messages that don't start with "failed to"
|
||||
function lintErrorLogsBeggining {
|
||||
grep --with-filename -n -P "Error\(\)(?:.*)\n?.(?:.*)Msg[f]?\(\"(?!(failed to|failed due|invalid|unexpected|unsupported))($exclude_linter)" $1
|
||||
}
|
||||
|
||||
function printLintError {
|
||||
errReason=$1
|
||||
errPathAndContent=$2
|
||||
|
||||
IFS=':' read -r errPath errLine errLineContent <<< "$errPathAndContent"
|
||||
errLocation="$errPath:$errLine"
|
||||
|
||||
if test -t 1; then
|
||||
echo -e "${WHITE}$errLocation${NC}: ${RED}$errReason${NC}\n\t$errLineContent"
|
||||
else
|
||||
echo "$errLocation: $errReason: $errLineContent"
|
||||
fi
|
||||
}
|
||||
|
||||
files=$(find . -name '*.go' | grep -v '_test.go')
|
||||
|
||||
found_linting_error=false
|
||||
|
||||
for file in $files
|
||||
do
|
||||
lintOutput=$(lintLogStartingWithUpperCase "$file")
|
||||
if [ $? -eq 0 ]; then
|
||||
found_linting_error=true
|
||||
while IFS= read -r line; do
|
||||
printLintError "Log message should not start with a CAPITAL letter" "$(echo $line | tr -s [:space:])"
|
||||
done <<< "$lintOutput"
|
||||
fi
|
||||
|
||||
lintOutput=$(lintLogStartingWithComponent "$file")
|
||||
if [ $? -eq 0 ]; then
|
||||
found_linting_error=true
|
||||
while IFS= read -r line; do
|
||||
printLintError "Log message should not start with the component (ex: 'component:', 'mixed component-with-dash:')" \
|
||||
"$(echo $line | tr -s [:space:])"
|
||||
done <<< "$lintOutput"
|
||||
fi
|
||||
|
||||
lintOutput=$(lintErrorLogsBeggining "$file")
|
||||
if [ $? -eq 0 ]; then
|
||||
found_linting_error=true
|
||||
while IFS= read -r line; do
|
||||
printLintError "Error messages should start with 'failed to'" \
|
||||
"$(echo $line | tr -s [:space:])"
|
||||
done <<< "$lintOutput"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $found_linting_error = true ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
Loading…
Reference in a new issue