mirror of
https://github.com/project-zot/zot.git
synced 2025-01-06 22:40:28 -05:00
zb: replace map with sync.Map to avoid concurrent writes closes #582
Signed-off-by: Petu Eusebiu <peusebiu@cisco.com>
This commit is contained in:
parent
eed48c1715
commit
616d5f8a6d
2 changed files with 54 additions and 22 deletions
|
@ -10,6 +10,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
@ -71,11 +72,14 @@ func pullAndCollect(url string, repos []string, manifestItem manifestStruct,
|
|||
|
||||
switch idx {
|
||||
case smallSizeIdx:
|
||||
statusRequests["1MB"]++
|
||||
current := loadOrStore(&statusRequests, "1MB", 0)
|
||||
statusRequests.Store("1MB", current+1)
|
||||
case mediumSizeIdx:
|
||||
statusRequests["10MB"]++
|
||||
current := loadOrStore(&statusRequests, "10MB", 0)
|
||||
statusRequests.Store("10MB", current+1)
|
||||
case largeSizeIdx:
|
||||
statusRequests["100MB"]++
|
||||
current := loadOrStore(&statusRequests, "100MB", 0)
|
||||
statusRequests.Store("100MB", current+1)
|
||||
}
|
||||
|
||||
manifestHash = manifestBySizeHash[idx]
|
||||
|
@ -469,13 +473,16 @@ func pushMonolithAndCollect(workdir, url, trepo string, count int,
|
|||
switch idx {
|
||||
case smallSizeIdx:
|
||||
size = smallBlob
|
||||
statusRequests["1MB"]++
|
||||
current := loadOrStore(&statusRequests, "1MB", 0)
|
||||
statusRequests.Store("1MB", current+1)
|
||||
case mediumSizeIdx:
|
||||
size = mediumBlob
|
||||
statusRequests["10MB"]++
|
||||
current := loadOrStore(&statusRequests, "10MB", 0)
|
||||
statusRequests.Store("10MB", current+1)
|
||||
case largeSizeIdx:
|
||||
size = largeBlob
|
||||
statusRequests["100MB"]++
|
||||
current := loadOrStore(&statusRequests, "100MB", 0)
|
||||
statusRequests.Store("100MB", current+1)
|
||||
default:
|
||||
size = config.size
|
||||
}
|
||||
|
@ -690,14 +697,16 @@ func pushChunkAndCollect(workdir, url, trepo string, count int,
|
|||
switch idx {
|
||||
case smallSizeIdx:
|
||||
size = smallBlob
|
||||
statusRequests["1MB"]++
|
||||
current := loadOrStore(&statusRequests, "1MB", 0)
|
||||
statusRequests.Store("1MB", current+1)
|
||||
case mediumSizeIdx:
|
||||
size = mediumBlob
|
||||
statusRequests["10MB"]++
|
||||
current := loadOrStore(&statusRequests, "10MB", 0)
|
||||
statusRequests.Store("10MB", current+1)
|
||||
case largeSizeIdx:
|
||||
size = largeBlob
|
||||
statusRequests["100MB"]++
|
||||
|
||||
current := loadOrStore(&statusRequests, "100MB", 0)
|
||||
statusRequests.Store("100MB", current+1)
|
||||
default:
|
||||
size = config.size
|
||||
}
|
||||
|
@ -910,3 +919,14 @@ func pushChunkAndCollect(workdir, url, trepo string, count int,
|
|||
|
||||
return repos
|
||||
}
|
||||
|
||||
func loadOrStore(statusRequests *sync.Map, key string, value int) int {
|
||||
val, _ := statusRequests.LoadOrStore(key, value)
|
||||
|
||||
intValue, ok := val.(int)
|
||||
if !ok {
|
||||
log.Fatalf("invalid type: %#v, should be int", val)
|
||||
}
|
||||
|
||||
return intValue
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ const (
|
|||
var blobHash map[string]godigest.Digest = map[string]godigest.Digest{}
|
||||
|
||||
// nolint:gochecknoglobals // used only in this test
|
||||
var statusRequests map[string]int
|
||||
var statusRequests sync.Map
|
||||
|
||||
func setup(workingDir string) {
|
||||
_ = os.MkdirAll(workingDir, defaultDirPerms)
|
||||
|
@ -215,15 +215,25 @@ func printStats(requests int, summary *statsSummary, outFmt string) {
|
|||
log.Printf("\n")
|
||||
|
||||
if summary.mixedSize {
|
||||
log.Printf("1MB:\t%v", statusRequests["1MB"])
|
||||
log.Printf("10MB:\t%v", statusRequests["10MB"])
|
||||
log.Printf("100MB:\t%v", statusRequests["100MB"])
|
||||
current := loadOrStore(&statusRequests, "1MB", 0)
|
||||
log.Printf("1MB:\t%v", current)
|
||||
|
||||
current = loadOrStore(&statusRequests, "10MB", 0)
|
||||
log.Printf("10MB:\t%v", current)
|
||||
|
||||
current = loadOrStore(&statusRequests, "100MB", 0)
|
||||
log.Printf("100MB:\t%v", current)
|
||||
|
||||
log.Printf("\n")
|
||||
}
|
||||
|
||||
if summary.mixedType {
|
||||
log.Printf("Pull:\t%v", statusRequests["Pull"])
|
||||
log.Printf("Push:\t%v", statusRequests["Push"])
|
||||
pull := loadOrStore(&statusRequests, "Pull", 0)
|
||||
log.Printf("Pull:\t%v", pull)
|
||||
|
||||
push := loadOrStore(&statusRequests, "Push", 0)
|
||||
log.Printf("Push:\t%v", push)
|
||||
|
||||
log.Printf("\n")
|
||||
}
|
||||
|
||||
|
@ -358,7 +368,7 @@ func PushMonolithStreamed(
|
|||
var repos []string
|
||||
|
||||
if config.mixedSize {
|
||||
statusRequests = make(map[string]int)
|
||||
statusRequests = sync.Map{}
|
||||
}
|
||||
|
||||
for count := 0; count < requests; count++ {
|
||||
|
@ -385,7 +395,7 @@ func PushChunkStreamed(
|
|||
var repos []string
|
||||
|
||||
if config.mixedSize {
|
||||
statusRequests = make(map[string]int)
|
||||
statusRequests = sync.Map{}
|
||||
}
|
||||
|
||||
for count := 0; count < requests; count++ {
|
||||
|
@ -416,7 +426,7 @@ func Pull(
|
|||
manifestBySizeHash := make(map[int](map[string]string))
|
||||
|
||||
if config.mixedSize {
|
||||
statusRequests = make(map[string]int)
|
||||
statusRequests = sync.Map{}
|
||||
}
|
||||
|
||||
if config.mixedSize {
|
||||
|
@ -487,7 +497,7 @@ func MixedPullAndPush(
|
|||
) error {
|
||||
var repos []string
|
||||
|
||||
statusRequests = make(map[string]int)
|
||||
statusRequests = sync.Map{}
|
||||
|
||||
// Push blob given size
|
||||
manifestHash, repos, err := pushMonolithImage(workdir, url, trepo, repos, config.size, client)
|
||||
|
@ -507,10 +517,12 @@ func MixedPullAndPush(
|
|||
|
||||
if idx == readTestIdx {
|
||||
repos = pullAndCollect(url, repos, manifestItem, config, client, statsCh)
|
||||
statusRequests["Pull"]++
|
||||
current := loadOrStore(&statusRequests, "Pull", 0)
|
||||
statusRequests.Store("Pull", current+1)
|
||||
} else if idx == writeTestIdx {
|
||||
repos = pushMonolithAndCollect(workdir, url, trepo, count, repos, config, client, statsCh)
|
||||
statusRequests["Push"]++
|
||||
current := loadOrStore(&statusRequests, "Push", 0)
|
||||
statusRequests.Store("Pull", current+1)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue