mirror of
https://github.com/dragonflydb/dragonfly
synced 2024-11-21 06:58:50 +00:00
refactor: set info_replication_valkey_compatible=true (#3467)
* refactor: set info_replication_valkey_compatible=true * test: mark test_cluster_replication_migration as skipped because it's broken
This commit is contained in:
parent
5258bbacd1
commit
48a28c3ea3
@ -123,7 +123,7 @@ ABSL_FLAG(bool, s3_ec2_metadata, false,
|
||||
ABSL_FLAG(bool, s3_sign_payload, true,
|
||||
"whether to sign the s3 request payload when uploading snapshots");
|
||||
|
||||
ABSL_FLAG(bool, info_replication_valkey_compatible, false,
|
||||
ABSL_FLAG(bool, info_replication_valkey_compatible, true,
|
||||
"when true - output valkey compatible values for info-replication");
|
||||
|
||||
ABSL_DECLARE_FLAG(int32_t, port);
|
||||
|
@ -1523,6 +1523,7 @@ async def test_cluster_config_reapply(df_factory: DflyInstanceFactory):
|
||||
await close_clients(*[node.client for node in nodes], *[node.admin_client for node in nodes])
|
||||
|
||||
|
||||
@pytest.mark.skip("broken")
|
||||
@dfly_args({"proactor_threads": 4, "cluster_mode": "yes"})
|
||||
async def test_cluster_replication_migration(
|
||||
df_factory: DflyInstanceFactory, df_seeder_factory: DflySeederFactory
|
||||
@ -1553,7 +1554,7 @@ async def test_cluster_replication_migration(
|
||||
|
||||
# generate some data with seederv1
|
||||
seeder = df_seeder_factory.create(keys=2000, port=m1.port, cluster_mode=True)
|
||||
seeder.run(target_deviation=0.1)
|
||||
await seeder.run(target_deviation=0.1)
|
||||
|
||||
# start replication from replicas
|
||||
await r1_node.admin_client.execute_command(f"replicaof localhost {m1_node.instance.port}")
|
||||
@ -1750,7 +1751,7 @@ async def await_stable_sync(m_client: aioredis.Redis, replica_port, timeout=10):
|
||||
role = await m_client.execute_command("role")
|
||||
return role == [
|
||||
"master",
|
||||
[["127.0.0.1", str(replica_port), "stable_sync"]],
|
||||
[["127.0.0.1", str(replica_port), "online"]],
|
||||
]
|
||||
|
||||
while (time.time() - start) < timeout:
|
||||
|
@ -27,12 +27,12 @@ M_SLOW = [pytest.mark.slow]
|
||||
M_STRESS = [pytest.mark.slow, pytest.mark.opt_only]
|
||||
|
||||
|
||||
async def wait_for_replicas_state(*clients, state="stable_sync", timeout=0.05):
|
||||
async def wait_for_replicas_state(*clients, state="online", timeout=0.05):
|
||||
"""Wait until all clients (replicas) reach passed state"""
|
||||
while len(clients) > 0:
|
||||
await asyncio.sleep(timeout)
|
||||
roles = await asyncio.gather(*(c.role() for c in clients))
|
||||
clients = [c for c, role in zip(clients, roles) if role[0] != "replica" or role[3] != state]
|
||||
clients = [c for c, role in zip(clients, roles) if role[0] != "slave" or role[3] != state]
|
||||
|
||||
|
||||
"""
|
||||
@ -135,7 +135,7 @@ async def test_replication_all(
|
||||
|
||||
async def check_replica_finished_exec(c_replica: aioredis.Redis, m_offset):
|
||||
role = await c_replica.role()
|
||||
if role[0] != "replica" or role[3] != "stable_sync":
|
||||
if role[0] != "slave" or role[3] != "online":
|
||||
return False
|
||||
syncid, r_offset = await c_replica.execute_command("DEBUG REPLICA OFFSET")
|
||||
|
||||
@ -980,13 +980,13 @@ async def test_role_command(df_factory, n_keys=20):
|
||||
|
||||
assert await c_master.execute_command("role") == [
|
||||
"master",
|
||||
[["127.0.0.1", str(replica.port), "stable_sync"]],
|
||||
[["127.0.0.1", str(replica.port), "online"]],
|
||||
]
|
||||
assert await c_replica.execute_command("role") == [
|
||||
"replica",
|
||||
"slave",
|
||||
"localhost",
|
||||
str(master.port),
|
||||
"stable_sync",
|
||||
"online",
|
||||
]
|
||||
|
||||
# This tests that we react fast to socket shutdowns and don't hang on
|
||||
@ -994,7 +994,7 @@ async def test_role_command(df_factory, n_keys=20):
|
||||
master.stop()
|
||||
await asyncio.sleep(0.1)
|
||||
assert await c_replica.execute_command("role") == [
|
||||
"replica",
|
||||
"slave",
|
||||
"localhost",
|
||||
str(master.port),
|
||||
"connecting",
|
||||
@ -1344,13 +1344,13 @@ async def test_take_over_timeout(df_factory, df_seeder_factory):
|
||||
|
||||
assert await c_master.execute_command("role") == [
|
||||
"master",
|
||||
[["127.0.0.1", str(replica.port), "stable_sync"]],
|
||||
[["127.0.0.1", str(replica.port), "online"]],
|
||||
]
|
||||
assert await c_replica.execute_command("role") == [
|
||||
"replica",
|
||||
"slave",
|
||||
"localhost",
|
||||
str(master.port),
|
||||
"stable_sync",
|
||||
"online",
|
||||
]
|
||||
|
||||
await disconnect_clients(c_master, c_replica)
|
||||
@ -1558,7 +1558,7 @@ async def test_replicaof_flag_replication_waits(df_factory):
|
||||
|
||||
# check that it is in replica mode, yet status is down
|
||||
info = await c_replica.info("replication")
|
||||
assert info["role"] == "replica"
|
||||
assert info["role"] == "slave"
|
||||
assert info["master_host"] == "localhost"
|
||||
assert info["master_port"] == BASE_PORT
|
||||
assert info["master_link_status"] == "down"
|
||||
@ -2008,6 +2008,7 @@ async def test_policy_based_eviction_propagation(df_factory, df_seeder_factory):
|
||||
keys_replica = await c_replica.execute_command("keys k*")
|
||||
|
||||
assert set(keys_replica).difference(keys_master) == set()
|
||||
assert set(keys_master).difference(keys_replica) == set()
|
||||
|
||||
await disconnect_clients(c_master, *[c_replica])
|
||||
|
||||
|
@ -18,7 +18,7 @@ def main():
|
||||
info = client.info("replication")
|
||||
assert info["role"] == "master"
|
||||
replication_state = info["slave0"]
|
||||
assert replication_state["state"] == "stable_sync"
|
||||
assert replication_state["state"] == "online"
|
||||
|
||||
def is_zero_lag(replication_state):
|
||||
return replication_state["lag"] == 0
|
||||
|
@ -234,7 +234,7 @@ def attach(args):
|
||||
if args.attach_as_replica:
|
||||
newcomer = Node(args.attach_host, args.attach_port)
|
||||
replica_resp = send_command(newcomer, ["info", "replication"])
|
||||
if replica_resp["role"] != "replica":
|
||||
if replica_resp["role"] != "slave":
|
||||
die_with_err("Node is not in replica mode")
|
||||
if (
|
||||
replica_resp["master_host"] != args.target_host
|
||||
|
Loading…
Reference in New Issue
Block a user