0
Fork 0
mirror of https://github.com/project-zot/zot.git synced 2024-12-16 21:56:37 -05:00
zot/test/scale-out/cloud_scale_out_no_auth.bats
Vishwas R 5ae7a028d9
feat(cluster): Add support for request proxying for scale out (#2385)
* feat(cluster): initial commit for scale-out cluster

Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com>

* feat(cluster): support shared storage scale out

This change introduces support for shared storage backed
zot cluster scale out.

New feature
Multiple stateless zot instances can run using the same shared
storage backend where each instance looks at a specific set
of repositories based on a siphash of the repository name to improve
scale as the load is distributed across multiple instances.
For a given config, there will only be one instance that can perform
dist-spec read/write on a given repository.

What's changed?
- introduced a transparent request proxy for dist-spec endpoints based on
siphash of repository name.
- new config for scale out cluster that specifies list of
cluster members.

Signed-off-by: Vishwas Rajashekar <vrajashe@cisco.com>

---------

Signed-off-by: Ramkumar Chinchani <rchincha@cisco.com>
Signed-off-by: Vishwas Rajashekar <vrajashe@cisco.com>
Co-authored-by: Ramkumar Chinchani <rchincha@cisco.com>
2024-05-20 09:05:21 -07:00

69 lines
2.2 KiB
Bash

# note: intended to be run as "make run-cloud-scale-out-tests"
# makefile target installs & checks all necessary tooling
# extra tools that are not covered in Makefile target needs to be added in verify_prerequisites()
NUM_ZOT_INSTANCES=6
ZOT_LOG_DIR=/tmp/zot-ft-logs/no-auth
load helpers_zot
load helpers_cloud
load helpers_haproxy
function launch_zot_server() {
local zot_server_address=${1}
local zot_server_port=${2}
local zot_root_dir=${ZOT_ROOT_DIR}
mkdir -p ${zot_root_dir}
mkdir -p ${ZOT_LOG_DIR}
local zot_config_file="${BATS_FILE_TMPDIR}/zot_config_${zot_server_address}_${zot_server_port}.json"
local zot_log_file="${ZOT_LOG_DIR}/zot-${zot_server_address}-${zot_server_port}.log"
create_zot_cloud_base_config_file ${zot_server_address} ${zot_server_port} ${zot_root_dir} ${zot_config_file} ${zot_log_file}
update_zot_cluster_member_list_in_config_file ${zot_config_file} ${ZOT_CLUSTER_MEMBERS_PATCH_FILE}
echo "launching zot server ${zot_server_address}:${zot_server_port}" >&3
echo "config file: ${zot_config_file}" >&3
echo "log file: ${zot_log_file}" >&3
zot_serve ${zot_config_file}
wait_zot_reachable ${zot_server_port}
}
function setup() {
# verify prerequisites are available
if ! $(verify_prerequisites); then
exit 1
fi
# setup S3 bucket and DynamoDB tables
setup_cloud_services
generate_zot_cluster_member_list ${NUM_ZOT_INSTANCES} ${ZOT_CLUSTER_MEMBERS_PATCH_FILE}
for ((i=0;i<${NUM_ZOT_INSTANCES};i++)); do
launch_zot_server 127.0.0.1 $(( 10000 + $i ))
done
# list all zot processes that were started
ps -ef | grep ".*zot.*serve.*" | grep -v grep >&3
generate_haproxy_config ${HAPROXY_CFG_FILE} "http"
haproxy_start ${HAPROXY_CFG_FILE}
# list HAproxy processes that were started
ps -ef | grep "haproxy" | grep -v grep >&3
}
function teardown() {
local zot_root_dir=${ZOT_ROOT_DIR}
haproxy_stop_all
zot_stop_all
rm -rf ${zot_root_dir}
teardown_cloud_services
}
@test "Check for successful zb run on haproxy frontend" {
# zb_run <test_name> <zot_address> <concurrency> <num_requests> <credentials (optional)>
zb_run "cloud-scale-out-no-auth-bats" "http://127.0.0.1:8000" 3 5
}