def test_config_from_second_generation_snapshot(cluster): """ A regression test for #44: confirm that if we load a snapshot on startup, do nothing, then re-create a snapshot we don't end up with a messed up nodes config. """ cluster.create(3) # Bump the log a bit for _ in range(20): assert cluster.raft_exec('INCR', 'testkey') # Compact to get rid of logs node3 = cluster.node(3) assert node3.client.execute_command('RAFT.DEBUG', 'COMPACT') == b'OK' # Restart node node3.restart() node3.wait_for_node_voting() # Bump the log a bit for _ in range(20): assert cluster.raft_exec('INCR', 'testkey') # Recompact cluster.wait_for_unanimity() assert node3.client.execute_command('RAFT.DEBUG', 'COMPACT') == b'OK' node3.restart() node3.wait_for_node_voting() assert node3.raft_info()['num_nodes'] == 3
def test_snapshot_delivery(cluster): """ Ability to properly deliver and load a snapshot. """ cluster.create(3, raft_args={'raftize-all-commands': 'yes'}) n1 = cluster.node(1) n1.raft_exec('INCR', 'testkey') n1.raft_exec('INCR', 'testkey') n1.raft_exec('INCR', 'testkey') for i in range(1000): pipe = n1.client.pipeline(transaction=True) for j in range(100): pipe.rpush('list-%s' % i, 'elem-%s' % j) pipe.execute() cluster.node(3).terminate() n1.raft_exec('SETRANGE', 'bigkey', '104857600', 'x') n1.raft_exec('INCR', 'testkey') assert n1.client.get('testkey') == b'4' assert n1.client.execute_command('RAFT.DEBUG', 'COMPACT') == b'OK' assert n1.raft_info()['log_entries'] == 0 n3 = cluster.node(3) n3.start() n1.raft_exec('INCR', 'testkey') n3.wait_for_node_voting() cluster.wait_for_unanimity() n3.wait_for_log_applied() n3.client.execute_command('RAFT.CONFIG', 'SET', 'raftize-all-commands', 'no') assert n3.client.get('testkey') == b'5'
def test_cfg_node_removed_from_snapshot(cluster): """ Node able to learn that another node left by reading the snapshot metadata. """ cluster.create(5) cluster.node(1).raft_exec('SET', 'key', 'value') cluster.wait_for_unanimity() # interrupt # we now take down node 4 so it doesn't get updates and remove node 5. cluster.node(4).terminate() cluster.remove_node(5) cluster.wait_for_unanimity(exclude=[4]) cluster.node(1).wait_for_log_applied() assert cluster.node(1).raft_info()['num_nodes'] == 4 # now compact logs cluster.wait_for_unanimity(exclude=[4]) assert cluster.node(1).client.execute_command( 'RAFT.DEBUG', 'COMPACT') == b'OK' assert cluster.node(1).raft_info()['log_entries'] == 0 # bring back node 4 cluster.node(4).start() cluster.node(4).wait_for_election() cluster.wait_for_unanimity() assert cluster.node(4).raft_info()['num_nodes'] == 4
def test_log_rollback(cluster): """ Rollback of log entries that were written in the minority. """ cluster.create(3) assert cluster.leader == 1 assert cluster.raft_exec('INCRBY', 'key', '111') == 111 # Break cluster cluster.node(2).terminate() cluster.node(3).terminate() # Load a command which can't be committed assert cluster.node(1).current_index() == 6 conn = cluster.node(1).client.connection_pool.get_connection('RAFT') conn.send_command('RAFT', 'INCRBY', 'key', '222') assert cluster.node(1).current_index() == 7 cluster.node(1).terminate() # We want to be sure the last entry is in the log log = RaftLog(cluster.node(1).raftlog) log.read() assert log.entry_count() == 7 # Restart the cluster without node 1, make sure the write was # not committed. cluster.node(2).start() cluster.node(3).start() cluster.node(2).wait_for_election() assert cluster.node(2).current_index() == 7 # 6 + 1 no-op entry # Restart node 1 cluster.node(1).start() cluster.node(1).wait_for_election() # Make another write and make sure it overwrites the previous one in # node 1's log assert cluster.raft_exec('INCRBY', 'key', '333') == 444 cluster.wait_for_unanimity() # Make sure log reflects the change log.reset() log.read() assert match(r'.*INCRBY.*333', str(log.entries[-1].data()))
def test_all_committed_log_rewrite(cluster): """ Log rewrite operation when all entries are committed, so we expect an empty log. """ cluster.create(3) cluster.node(1).raft_exec('SET', 'key1', 'value') cluster.node(1).raft_exec('SET', 'key2', 'value') cluster.node(1).raft_exec('SET', 'key3', 'value') cluster.wait_for_unanimity() assert cluster.node(1).client.execute_command( 'RAFT.DEBUG', 'COMPACT') == b'OK' assert cluster.node(1).raft_info()['log_entries'] == 0 # Make sure we have no log entries! log = RaftLog(cluster.node(1).raftlog) log.read() assert log.entry_count(LogEntry.LogType.NORMAL) == 0
def test_snapshot_delivery_to_new_node(cluster): """ Ability to properly deliver and load a snapshot. """ r1 = cluster.add_node() r1.raft_exec('INCR', 'testkey') r1.raft_exec('INCR', 'testkey') r1.raft_exec('INCR', 'testkey') r1.raft_exec('SETRANGE', 'bigkey', '104857600', 'x') r1.raft_exec('INCR', 'testkey') assert r1.client.get('testkey') == b'4' assert r1.client.execute_command('RAFT.DEBUG', 'COMPACT') == b'OK' assert r1.raft_info()['log_entries'] == 0 r2 = cluster.add_node() cluster.wait_for_unanimity() assert r2.client.get('testkey') == b'4'
def test_log_reset_on_snapshot_load(cluster): """ Test correct reset of log when a snapshot is received. """ cluster.create(3, prepopulate_log=20) # Stop node 3, advance the log, then compact. cluster.node(3).terminate() for _ in range(20): assert cluster.raft_exec('INCR', 'testkey') assert cluster.node(1).client.execute_command('RAFT.DEBUG', 'COMPACT') == b'OK' # Start node 3 and wait for it to receive a snapshot cluster.node(3).start() cluster.node(3).wait_for_node_voting() # Restart node 3 and make sure it correctly started cluster.node(3).terminate() cluster.node(3).start() cluster.node(3).wait_for_node_voting() assert cluster.raft_exec('INCR', 'last-key') cluster.wait_for_unanimity()