valkey/tests/cluster/cluster.tcl
Shivshankar 8baf322742
Rename remaining test procedures (#355)
Renamed below procedures and variables (missed in #287) as follows.

redis_cluster             ->     valkey_cluster
redis1                    ->     valkey1
redis2                    ->     valkey2
get_redis_dir             ->     get_valkey_dir
test_redis_cli_rdb_dump   ->     test_valkey_cli_rdb_dump
test_redis_cli_repl       ->     test_valkey_cli_repl
redis-cli                 ->     valkey-cli
redis_reset_state         ->     valkey_reset_state
redisHandle               ->     valkeyHandle
redis_safe_read           ->     valkey_safe_read
redis_safe_gets           ->     valkey_safe_gets
redis_write               ->     valkey_write
redis_read_reply          ->     valkey_read_reply
redis_readable            ->     valkey_readable
redis_readnl              ->     valkey_readnl
redis_writenl             ->     valkey_writenl
redis_read_map            ->     valkey_read_map
redis_read_line           ->     valkey_read_line
redis_read_null           ->     valkey_read_null
redis_read_bool           ->     valkey_read_bool
redis_read_double         ->     valkey_read_double
redis_read_verbatim_str   ->     valkey_read_verbatim_str
redis_call_callback       ->     valkey_call_callback

---------

Signed-off-by: Shivshankar-Reddy <shiva.sheri.github@gmail.com>
2024-04-24 18:01:33 +02:00

236 lines
7.0 KiB
Tcl

# Cluster-specific test functions.
#
# Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com
# This software is released under the BSD License. See the COPYING file for
# more information.
# Track cluster configuration as created by create_cluster below
set ::cluster_master_nodes 0
set ::cluster_replica_nodes 0
# Returns a parsed CLUSTER NODES output as a list of dictionaries. Optional status field
# can be specified to only returns entries that match the provided status.
proc get_cluster_nodes {id {status "*"}} {
set lines [split [R $id cluster nodes] "\r\n"]
set nodes {}
foreach l $lines {
set l [string trim $l]
if {$l eq {}} continue
set args [split $l]
set node [dict create \
id [lindex $args 0] \
addr [lindex $args 1] \
flags [split [lindex $args 2] ,] \
slaveof [lindex $args 3] \
ping_sent [lindex $args 4] \
pong_recv [lindex $args 5] \
config_epoch [lindex $args 6] \
linkstate [lindex $args 7] \
slots [lrange $args 8 end] \
]
if {[string match $status [lindex $args 7]]} {
lappend nodes $node
}
}
return $nodes
}
# Test node for flag.
proc has_flag {node flag} {
expr {[lsearch -exact [dict get $node flags] $flag] != -1}
}
# Returns the parsed myself node entry as a dictionary.
proc get_myself id {
set nodes [get_cluster_nodes $id]
foreach n $nodes {
if {[has_flag $n myself]} {return $n}
}
return {}
}
# Get a specific node by ID by parsing the CLUSTER NODES output
# of the instance Number 'instance_id'
proc get_node_by_id {instance_id node_id} {
set nodes [get_cluster_nodes $instance_id]
foreach n $nodes {
if {[dict get $n id] eq $node_id} {return $n}
}
return {}
}
# Return the value of the specified CLUSTER INFO field.
proc CI {n field} {
get_info_field [R $n cluster info] $field
}
# Return the value of the specified INFO field.
proc s {n field} {
get_info_field [R $n info] $field
}
# Assuming nodes are reset, this function performs slots allocation.
# Only the first 'n' nodes are used.
proc cluster_allocate_slots {n} {
set slot 16383
while {$slot >= 0} {
# Allocate successive slots to random nodes.
set node [randomInt $n]
lappend slots_$node $slot
incr slot -1
}
for {set j 0} {$j < $n} {incr j} {
R $j cluster addslots {*}[set slots_${j}]
}
}
# Check that cluster nodes agree about "state", or raise an error.
proc assert_cluster_state {state} {
foreach_valkey_id id {
if {[instance_is_killed valkey $id]} continue
wait_for_condition 1000 50 {
[CI $id cluster_state] eq $state
} else {
fail "Cluster node $id cluster_state:[CI $id cluster_state]"
}
}
}
# Search the first node starting from ID $first that is not
# already configured as a slave.
proc cluster_find_available_slave {first} {
foreach_valkey_id id {
if {$id < $first} continue
if {[instance_is_killed valkey $id]} continue
set me [get_myself $id]
if {[dict get $me slaveof] eq {-}} {return $id}
}
fail "No available slaves"
}
# Add 'slaves' slaves to a cluster composed of 'masters' masters.
# It assumes that masters are allocated sequentially from instance ID 0
# to N-1.
proc cluster_allocate_slaves {masters slaves} {
for {set j 0} {$j < $slaves} {incr j} {
set master_id [expr {$j % $masters}]
set slave_id [cluster_find_available_slave $masters]
set master_myself [get_myself $master_id]
R $slave_id cluster replicate [dict get $master_myself id]
}
}
# Create a cluster composed of the specified number of masters and slaves.
proc create_cluster {masters slaves} {
cluster_allocate_slots $masters
if {$slaves} {
cluster_allocate_slaves $masters $slaves
}
assert_cluster_state ok
set ::cluster_master_nodes $masters
set ::cluster_replica_nodes $slaves
}
proc cluster_allocate_with_continuous_slots {n} {
set slot 16383
set avg [expr ($slot+1) / $n]
while {$slot >= 0} {
set node [expr $slot/$avg >= $n ? $n-1 : $slot/$avg]
lappend slots_$node $slot
incr slot -1
}
for {set j 0} {$j < $n} {incr j} {
R $j cluster addslots {*}[set slots_${j}]
}
}
# Create a cluster composed of the specified number of masters and slaves,
# but with a continuous slot range.
proc cluster_create_with_continuous_slots {masters slaves} {
cluster_allocate_with_continuous_slots $masters
if {$slaves} {
cluster_allocate_slaves $masters $slaves
}
assert_cluster_state ok
set ::cluster_master_nodes $masters
set ::cluster_replica_nodes $slaves
}
# Set the cluster node-timeout to all the reachalbe nodes.
proc set_cluster_node_timeout {to} {
foreach_valkey_id id {
catch {R $id CONFIG SET cluster-node-timeout $to}
}
}
# Check if the cluster is writable and readable. Use node "id"
# as a starting point to talk with the cluster.
proc cluster_write_test {id} {
set prefix [randstring 20 20 alpha]
set port [get_instance_attrib valkey $id port]
set cluster [valkey_cluster 127.0.0.1:$port]
for {set j 0} {$j < 100} {incr j} {
$cluster set key.$j $prefix.$j
}
for {set j 0} {$j < 100} {incr j} {
assert {[$cluster get key.$j] eq "$prefix.$j"}
}
$cluster close
}
# Check if cluster configuration is consistent.
# All the nodes in the cluster should show same slots configuration and have health
# state "online" to be considered as consistent.
proc cluster_config_consistent {} {
for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
# Check if all the nodes are online
set shards_cfg [R $j CLUSTER SHARDS]
foreach shard_cfg $shards_cfg {
set nodes [dict get $shard_cfg nodes]
foreach node $nodes {
if {[dict get $node health] ne "online"} {
return 0
}
}
}
if {$j == 0} {
set base_cfg [R $j cluster slots]
} else {
set cfg [R $j cluster slots]
if {$cfg != $base_cfg} {
return 0
}
}
}
return 1
}
# Wait for cluster configuration to propagate and be consistent across nodes.
proc wait_for_cluster_propagation {} {
wait_for_condition 1000 50 {
[cluster_config_consistent] eq 1
} else {
fail "cluster config did not reach a consistent state"
}
}
# Check if cluster's view of hostnames is consistent
proc are_hostnames_propagated {match_string} {
for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
set cfg [R $j cluster slots]
foreach node $cfg {
for {set i 2} {$i < [llength $node]} {incr i} {
if {! [string match $match_string [lindex [lindex [lindex $node $i] 3] 1]] } {
return 0
}
}
}
}
return 1
}