mirror of
https://github.com/project-zot/zot.git
synced 2025-01-13 22:50:38 -05:00
GetCatalog() - Populate _catalog with mix of many small, medium and large images
Signed-off-by: Roxana Nemulescu <roxana.nemulescu@gmail.com>
This commit is contained in:
parent
cda1f4989d
commit
8ed34608e4
4 changed files with 114 additions and 105 deletions
2
.github/workflows/benchmark.yaml
vendored
2
.github/workflows/benchmark.yaml
vendored
|
@ -26,7 +26,7 @@ jobs:
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: ./cache
|
path: ./cache
|
||||||
key: ${{ runner.os }}-benchmark
|
key: $${runner.os }}-gen1-benchmark
|
||||||
# Run `github-action-benchmark` action
|
# Run `github-action-benchmark` action
|
||||||
- name: Store benchmark result
|
- name: Store benchmark result
|
||||||
uses: benchmark-action/github-action-benchmark@v1.14.0
|
uses: benchmark-action/github-action-benchmark@v1.14.0
|
||||||
|
|
2
.github/workflows/cluster.yaml
vendored
2
.github/workflows/cluster.yaml
vendored
|
@ -167,7 +167,7 @@ jobs:
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: ./cache
|
path: ./cache
|
||||||
key: ${{ runner.os }}-benchmark-stateless-cluster
|
key: $${runner.os }}-gen1-benchmark-stateless-cluster
|
||||||
# Run `github-action-benchmark` action
|
# Run `github-action-benchmark` action
|
||||||
- name: Store benchmark result
|
- name: Store benchmark result
|
||||||
uses: benchmark-action/github-action-benchmark@v1.14.0
|
uses: benchmark-action/github-action-benchmark@v1.14.0
|
||||||
|
|
136
cmd/zb/helper.go
136
cmd/zb/helper.go
|
@ -7,6 +7,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
|
mrand "math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -64,23 +65,7 @@ func pullAndCollect(url string, repos []string, manifestItem manifestStruct,
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if config.mixedSize {
|
if config.mixedSize {
|
||||||
smallSizeIdx := 0
|
_, idx := getRandomSize(config.probabilityRange)
|
||||||
mediumSizeIdx := 1
|
|
||||||
largeSizeIdx := 2
|
|
||||||
|
|
||||||
idx := flipFunc(config.probabilityRange)
|
|
||||||
|
|
||||||
switch idx {
|
|
||||||
case smallSizeIdx:
|
|
||||||
current := loadOrStore(&statusRequests, "1MB", 0)
|
|
||||||
statusRequests.Store("1MB", current+1)
|
|
||||||
case mediumSizeIdx:
|
|
||||||
current := loadOrStore(&statusRequests, "10MB", 0)
|
|
||||||
statusRequests.Store("10MB", current+1)
|
|
||||||
case largeSizeIdx:
|
|
||||||
current := loadOrStore(&statusRequests, "100MB", 0)
|
|
||||||
statusRequests.Store("100MB", current+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestHash = manifestBySizeHash[idx]
|
manifestHash = manifestBySizeHash[idx]
|
||||||
}
|
}
|
||||||
|
@ -256,7 +241,7 @@ func pullAndCollect(url string, repos []string, manifestItem manifestStruct,
|
||||||
return repos
|
return repos
|
||||||
}
|
}
|
||||||
|
|
||||||
func pushMonolithImage(workdir, url, trepo string, repos []string, size int,
|
func pushMonolithImage(workdir, url, trepo string, repos []string, config testConfig,
|
||||||
client *resty.Client,
|
client *resty.Client,
|
||||||
) (map[string]string, []string, error) {
|
) (map[string]string, []string, error) {
|
||||||
var statusCode int
|
var statusCode int
|
||||||
|
@ -292,6 +277,15 @@ func pushMonolithImage(workdir, url, trepo string, repos []string, size int,
|
||||||
}
|
}
|
||||||
|
|
||||||
loc := test.Location(url, resp)
|
loc := test.Location(url, resp)
|
||||||
|
|
||||||
|
var size int
|
||||||
|
|
||||||
|
if config.size == 0 {
|
||||||
|
size, _ = getRandomSize(config.probabilityRange)
|
||||||
|
} else {
|
||||||
|
size = config.size
|
||||||
|
}
|
||||||
|
|
||||||
blob := path.Join(workdir, fmt.Sprintf("%d.blob", size))
|
blob := path.Join(workdir, fmt.Sprintf("%d.blob", size))
|
||||||
|
|
||||||
fhandle, err := os.OpenFile(blob, os.O_RDONLY, defaultFilePerms)
|
fhandle, err := os.OpenFile(blob, os.O_RDONLY, defaultFilePerms)
|
||||||
|
@ -465,27 +459,7 @@ func pushMonolithAndCollect(workdir, url, trepo string, count int,
|
||||||
var size int
|
var size int
|
||||||
|
|
||||||
if config.mixedSize {
|
if config.mixedSize {
|
||||||
idx := flipFunc(config.probabilityRange)
|
size, _ = getRandomSize(config.probabilityRange)
|
||||||
smallSizeIdx := 0
|
|
||||||
mediumSizeIdx := 1
|
|
||||||
largeSizeIdx := 2
|
|
||||||
|
|
||||||
switch idx {
|
|
||||||
case smallSizeIdx:
|
|
||||||
size = smallBlob
|
|
||||||
current := loadOrStore(&statusRequests, "1MB", 0)
|
|
||||||
statusRequests.Store("1MB", current+1)
|
|
||||||
case mediumSizeIdx:
|
|
||||||
size = mediumBlob
|
|
||||||
current := loadOrStore(&statusRequests, "10MB", 0)
|
|
||||||
statusRequests.Store("10MB", current+1)
|
|
||||||
case largeSizeIdx:
|
|
||||||
size = largeBlob
|
|
||||||
current := loadOrStore(&statusRequests, "100MB", 0)
|
|
||||||
statusRequests.Store("100MB", current+1)
|
|
||||||
default:
|
|
||||||
size = config.size
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
size = config.size
|
size = config.size
|
||||||
}
|
}
|
||||||
|
@ -689,27 +663,7 @@ func pushChunkAndCollect(workdir, url, trepo string, count int,
|
||||||
var size int
|
var size int
|
||||||
|
|
||||||
if config.mixedSize {
|
if config.mixedSize {
|
||||||
idx := flipFunc(config.probabilityRange)
|
size, _ = getRandomSize(config.probabilityRange)
|
||||||
smallSizeIdx := 0
|
|
||||||
mediumSizeIdx := 1
|
|
||||||
largeSizeIdx := 2
|
|
||||||
|
|
||||||
switch idx {
|
|
||||||
case smallSizeIdx:
|
|
||||||
size = smallBlob
|
|
||||||
current := loadOrStore(&statusRequests, "1MB", 0)
|
|
||||||
statusRequests.Store("1MB", current+1)
|
|
||||||
case mediumSizeIdx:
|
|
||||||
size = mediumBlob
|
|
||||||
current := loadOrStore(&statusRequests, "10MB", 0)
|
|
||||||
statusRequests.Store("10MB", current+1)
|
|
||||||
case largeSizeIdx:
|
|
||||||
size = largeBlob
|
|
||||||
current := loadOrStore(&statusRequests, "100MB", 0)
|
|
||||||
statusRequests.Store("100MB", current+1)
|
|
||||||
default:
|
|
||||||
size = config.size
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
size = config.size
|
size = config.size
|
||||||
}
|
}
|
||||||
|
@ -920,6 +874,68 @@ func pushChunkAndCollect(workdir, url, trepo string, count int,
|
||||||
return repos
|
return repos
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getRandomSize(probabilityRange []float64) (int, int) {
|
||||||
|
var size int
|
||||||
|
|
||||||
|
idx := flipFunc(probabilityRange)
|
||||||
|
smallSizeIdx := 0
|
||||||
|
mediumSizeIdx := 1
|
||||||
|
largeSizeIdx := 2
|
||||||
|
|
||||||
|
switch idx {
|
||||||
|
case smallSizeIdx:
|
||||||
|
size = smallBlob
|
||||||
|
current := loadOrStore(&statusRequests, "1MB", 0)
|
||||||
|
statusRequests.Store("1MB", current+1)
|
||||||
|
case mediumSizeIdx:
|
||||||
|
size = mediumBlob
|
||||||
|
current := loadOrStore(&statusRequests, "10MB", 0)
|
||||||
|
statusRequests.Store("10MB", current+1)
|
||||||
|
case largeSizeIdx:
|
||||||
|
size = largeBlob
|
||||||
|
current := loadOrStore(&statusRequests, "100MB", 0)
|
||||||
|
statusRequests.Store("100MB", current+1)
|
||||||
|
default:
|
||||||
|
size = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return size, idx
|
||||||
|
}
|
||||||
|
|
||||||
|
// nolint:gosec
|
||||||
|
func flipFunc(probabilityRange []float64) int {
|
||||||
|
mrand.Seed(time.Now().UTC().UnixNano())
|
||||||
|
toss := mrand.Float64()
|
||||||
|
|
||||||
|
for idx, r := range probabilityRange {
|
||||||
|
if toss < r {
|
||||||
|
return idx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(probabilityRange) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// pbty - probabilities.
|
||||||
|
func normalizeProbabilityRange(pbty []float64) []float64 {
|
||||||
|
dim := len(pbty)
|
||||||
|
|
||||||
|
// npd - normalized probability density
|
||||||
|
npd := make([]float64, dim)
|
||||||
|
|
||||||
|
for idx := range pbty {
|
||||||
|
npd[idx] = 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
// [0.2, 0.7, 0.1] -> [0.2, 0.9, 1]
|
||||||
|
npd[0] = pbty[0]
|
||||||
|
for i := 1; i < dim; i++ {
|
||||||
|
npd[i] = npd[i-1] + pbty[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
return npd
|
||||||
|
}
|
||||||
|
|
||||||
func loadOrStore(statusRequests *sync.Map, key string, value int) int {
|
func loadOrStore(statusRequests *sync.Map, key string, value int) int {
|
||||||
val, _ := statusRequests.LoadOrStore(key, value)
|
val, _ := statusRequests.LoadOrStore(key, value)
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"math/big"
|
"math/big"
|
||||||
mrand "math/rand"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
urlparser "net/url"
|
urlparser "net/url"
|
||||||
|
@ -263,40 +262,6 @@ func printStats(requests int, summary *statsSummary, outFmt string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:gosec
|
|
||||||
func flipFunc(probabilityRange []float64) int {
|
|
||||||
mrand.Seed(time.Now().UTC().UnixNano())
|
|
||||||
toss := mrand.Float64()
|
|
||||||
|
|
||||||
for idx, r := range probabilityRange {
|
|
||||||
if toss < r {
|
|
||||||
return idx
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(probabilityRange) - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// pbty - probabilities.
|
|
||||||
func normalizeProbabilityRange(pbty []float64) []float64 {
|
|
||||||
dim := len(pbty)
|
|
||||||
|
|
||||||
// npd - normalized probability density
|
|
||||||
npd := make([]float64, dim)
|
|
||||||
|
|
||||||
for idx := range pbty {
|
|
||||||
npd[idx] = 0.0
|
|
||||||
}
|
|
||||||
|
|
||||||
// [0.2, 0.7, 0.1] -> [0.2, 0.9, 1]
|
|
||||||
npd[0] = pbty[0]
|
|
||||||
for i := 1; i < dim; i++ {
|
|
||||||
npd[i] = npd[i-1] + pbty[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
return npd
|
|
||||||
}
|
|
||||||
|
|
||||||
// test suites/funcs.
|
// test suites/funcs.
|
||||||
|
|
||||||
type testFunc func(
|
type testFunc func(
|
||||||
|
@ -307,6 +272,7 @@ type testFunc func(
|
||||||
client *resty.Client,
|
client *resty.Client,
|
||||||
) error
|
) error
|
||||||
|
|
||||||
|
// nolint:gosec
|
||||||
func GetCatalog(
|
func GetCatalog(
|
||||||
workdir, url, repo string,
|
workdir, url, repo string,
|
||||||
requests int,
|
requests int,
|
||||||
|
@ -314,6 +280,20 @@ func GetCatalog(
|
||||||
statsCh chan statsRecord,
|
statsCh chan statsRecord,
|
||||||
client *resty.Client,
|
client *resty.Client,
|
||||||
) error {
|
) error {
|
||||||
|
var repos []string
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
statusRequests = sync.Map{}
|
||||||
|
|
||||||
|
for count := 0; count < requests; count++ {
|
||||||
|
// Push random blob
|
||||||
|
_, repos, err = pushMonolithImage(workdir, url, repo, repos, config, client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for count := 0; count < requests; count++ {
|
for count := 0; count < requests; count++ {
|
||||||
func() {
|
func() {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
@ -355,6 +335,12 @@ func GetCatalog(
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clean up
|
||||||
|
err = deleteTestRepo(repos, url, client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -436,25 +422,31 @@ func Pull(
|
||||||
mediumSizeIdx := 1
|
mediumSizeIdx := 1
|
||||||
largeSizeIdx := 2
|
largeSizeIdx := 2
|
||||||
|
|
||||||
|
config.size = smallBlob
|
||||||
|
|
||||||
// Push small blob
|
// Push small blob
|
||||||
manifestBySize, repos, err := pushMonolithImage(workdir, url, trepo, repos, smallBlob, client)
|
manifestBySize, repos, err := pushMonolithImage(workdir, url, trepo, repos, config, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
manifestBySizeHash[smallSizeIdx] = manifestBySize
|
manifestBySizeHash[smallSizeIdx] = manifestBySize
|
||||||
|
|
||||||
|
config.size = mediumBlob
|
||||||
|
|
||||||
// Push medium blob
|
// Push medium blob
|
||||||
manifestBySize, repos, err = pushMonolithImage(workdir, url, trepo, repos, mediumBlob, client)
|
manifestBySize, repos, err = pushMonolithImage(workdir, url, trepo, repos, config, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
manifestBySizeHash[mediumSizeIdx] = manifestBySize
|
manifestBySizeHash[mediumSizeIdx] = manifestBySize
|
||||||
|
|
||||||
|
config.size = largeBlob
|
||||||
|
|
||||||
// Push large blob
|
// Push large blob
|
||||||
// nolint: ineffassign, staticcheck, wastedassign
|
// nolint: ineffassign, staticcheck, wastedassign
|
||||||
manifestBySize, repos, err = pushMonolithImage(workdir, url, trepo, repos, largeBlob, client)
|
manifestBySize, repos, err = pushMonolithImage(workdir, url, trepo, repos, config, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -463,7 +455,7 @@ func Pull(
|
||||||
} else {
|
} else {
|
||||||
// Push blob given size
|
// Push blob given size
|
||||||
var err error
|
var err error
|
||||||
manifestHash, repos, err = pushMonolithImage(workdir, url, trepo, repos, config.size, client)
|
manifestHash, repos, err = pushMonolithImage(workdir, url, trepo, repos, config, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -500,7 +492,7 @@ func MixedPullAndPush(
|
||||||
statusRequests = sync.Map{}
|
statusRequests = sync.Map{}
|
||||||
|
|
||||||
// Push blob given size
|
// Push blob given size
|
||||||
manifestHash, repos, err := pushMonolithImage(workdir, url, trepo, repos, config.size, client)
|
manifestHash, repos, err := pushMonolithImage(workdir, url, trepo, repos, config, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -548,8 +540,9 @@ type testConfig struct {
|
||||||
|
|
||||||
var testSuite = []testConfig{ // nolint:gochecknoglobals // used only in this test
|
var testSuite = []testConfig{ // nolint:gochecknoglobals // used only in this test
|
||||||
{
|
{
|
||||||
name: "Get Catalog",
|
name: "Get Catalog",
|
||||||
tfunc: GetCatalog,
|
tfunc: GetCatalog,
|
||||||
|
probabilityRange: normalizeProbabilityRange([]float64{0.7, 0.2, 0.1}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Push Monolith 1MB",
|
name: "Push Monolith 1MB",
|
||||||
|
@ -703,7 +696,7 @@ func Perf(
|
||||||
summary.total = time.Since(start)
|
summary.total = time.Since(start)
|
||||||
summary.rps = float32(requests) / float32(summary.total.Seconds())
|
summary.rps = float32(requests) / float32(summary.total.Seconds())
|
||||||
|
|
||||||
if tconfig.mixedSize {
|
if tconfig.mixedSize || tconfig.size == 0 {
|
||||||
summary.mixedSize = true
|
summary.mixedSize = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue