mirror of
http://github.com/valkey-io/valkey
synced 2024-11-23 03:33:28 +00:00
Account sharded pubsub channels memory consumption (#10925)
Account sharded pubsub channels memory consumption in client memory usage computation to accurately evict client based on the set threshold for `maxmemory-clients`.
This commit is contained in:
parent
679344a2b0
commit
0ab885a685
@ -3669,9 +3669,7 @@ size_t getClientMemoryUsage(client *c, size_t *output_buffer_mem_usage) {
|
||||
|
||||
/* Add memory overhead of pubsub channels and patterns. Note: this is just the overhead of the robj pointers
|
||||
* to the strings themselves because they aren't stored per client. */
|
||||
mem += listLength(c->pubsub_patterns) * sizeof(listNode);
|
||||
mem += dictSize(c->pubsub_channels) * sizeof(dictEntry) +
|
||||
dictSlots(c->pubsub_channels) * sizeof(dictEntry*);
|
||||
mem += pubsubMemOverhead(c);
|
||||
|
||||
/* Add memory overhead of the tracking prefixes, this is an underestimation so we don't need to traverse the entire rax */
|
||||
if (c->client_tracking_prefixes)
|
||||
|
12
src/pubsub.c
12
src/pubsub.c
@ -722,3 +722,15 @@ void sunsubscribeCommand(client *c) {
|
||||
}
|
||||
if (clientTotalPubSubSubscriptionCount(c) == 0) c->flags &= ~CLIENT_PUBSUB;
|
||||
}
|
||||
|
||||
size_t pubsubMemOverhead(client *c) {
|
||||
/* PubSub patterns */
|
||||
size_t mem = listLength(c->pubsub_patterns) * sizeof(listNode);
|
||||
/* Global PubSub channels */
|
||||
mem += dictSize(c->pubsub_channels) * sizeof(dictEntry) +
|
||||
dictSlots(c->pubsub_channels) * sizeof(dictEntry*);
|
||||
/* Sharded PubSub channels */
|
||||
mem += dictSize(c->pubsubshard_channels) * sizeof(dictEntry) +
|
||||
dictSlots(c->pubsubshard_channels) * sizeof(dictEntry*);
|
||||
return mem;
|
||||
}
|
||||
|
@ -3000,6 +3000,7 @@ int pubsubPublishMessageAndPropagateToCluster(robj *channel, robj *message, int
|
||||
void addReplyPubsubMessage(client *c, robj *channel, robj *msg, robj *message_bulk);
|
||||
int serverPubsubSubscriptionCount();
|
||||
int serverPubsubShardSubscriptionCount();
|
||||
size_t pubsubMemOverhead(client *c);
|
||||
|
||||
/* Keyspace events notification */
|
||||
void notifyKeyspaceEvent(int type, char *event, robj *key, int dbid);
|
||||
|
@ -156,13 +156,13 @@ start_server {} {
|
||||
test "client evicted due to pubsub subscriptions" {
|
||||
r flushdb
|
||||
|
||||
# Since pubsub subscriptions cause a small overheatd this test uses a minimal maxmemory-clients config
|
||||
# Since pubsub subscriptions cause a small overhead this test uses a minimal maxmemory-clients config
|
||||
set temp_maxmemory_clients 200000
|
||||
r config set maxmemory-clients $temp_maxmemory_clients
|
||||
|
||||
# Test eviction due to pubsub patterns
|
||||
set rr [redis_client]
|
||||
# Add patterns until list maxes out maxmemroy clients and causes client eviction
|
||||
# Add patterns until list maxes out maxmemory clients and causes client eviction
|
||||
catch {
|
||||
for {set j 0} {$j < $temp_maxmemory_clients} {incr j} {
|
||||
$rr psubscribe $j
|
||||
@ -173,7 +173,7 @@ start_server {} {
|
||||
|
||||
# Test eviction due to pubsub channels
|
||||
set rr [redis_client]
|
||||
# Add patterns until list maxes out maxmemroy clients and causes client eviction
|
||||
# Subscribe to global channels until list maxes out maxmemory clients and causes client eviction
|
||||
catch {
|
||||
for {set j 0} {$j < $temp_maxmemory_clients} {incr j} {
|
||||
$rr subscribe $j
|
||||
@ -181,6 +181,17 @@ start_server {} {
|
||||
} e
|
||||
assert_match {I/O error reading reply} $e
|
||||
$rr close
|
||||
|
||||
# Test eviction due to sharded pubsub channels
|
||||
set rr [redis_client]
|
||||
# Subscribe to sharded pubsub channels until list maxes out maxmemory clients and causes client eviction
|
||||
catch {
|
||||
for {set j 0} {$j < $temp_maxmemory_clients} {incr j} {
|
||||
$rr ssubscribe $j
|
||||
}
|
||||
} e
|
||||
assert_match {I/O error reading reply} $e
|
||||
$rr close
|
||||
|
||||
# Restore config for next tests
|
||||
r config set maxmemory-clients $maxmemory_clients
|
||||
|
Loading…
Reference in New Issue
Block a user