def test_node_join_iterates_all_addrs(cluster): """ Node join iterates all addresses. """ r1 = cluster.add_node() assert r1.raft_exec('SET', 'key', 'value') == b'OK' r2 = cluster.add_node(cluster_setup=False) r2.start() assert r2.cluster('join', 'localhost:1', 'localhost:2', 'localhost:{}'.format(cluster.node_ports()[0])) == b'OK' r2.wait_for_election()
def test_node_join_redirects_to_leader(cluster): """ Node join can redirect to leader. """ r1 = cluster.add_node() assert r1.raft_exec('SET', 'key', 'value') == b'OK' r2 = cluster.add_node() r2.wait_for_election() r3 = cluster.add_node(cluster_setup=False) r3.start() r3.cluster('join', 'localhost:{}'.format(r2.port)) r3.wait_for_election()
def test_node_joins_and_gets_data(cluster): """ Node joins and gets data """ r1 = cluster.add_node() assert r1.raft_exec('SET', 'key', 'value') == b'OK' r2 = cluster.add_node() r2.wait_for_election() assert r2.raft_info().get('leader_id') == 1 assert r2.client.get('key') == b'value' # Also validate -MOVED as expected with raises(ResponseError, match='MOVED'): assert r2.raft_exec('SET', 'key', 'value') is None
def test_config_sanity(cluster): """ Configuration sanity check. """ r1 = cluster.add_node() r1.raft_config_set('raft-interval', 999) assert r1.raft_config_get('raft-interval') == {'raft-interval': '999'} r1.raft_config_set('request-timeout', 888) assert r1.raft_config_get('request-timeout') == {'request-timeout': '888'} r1.raft_config_set('election-timeout', 777) assert (r1.raft_config_get('election-timeout') == { 'election-timeout': '777' }) r1.raft_config_set('reconnect-interval', 111) assert (r1.raft_config_get('reconnect-interval') == { 'reconnect-interval': '111' }) r1.raft_config_set('raft-log-max-file-size', '64mb') assert (r1.raft_config_get('raft-log-max-file-size') == { 'raft-log-max-file-size': '64MB' }) r1.raft_config_set('loglevel', 'debug') assert r1.raft_config_get('loglevel') == {'loglevel': 'debug'}
def test_multi_exec(cluster): """ MULTI/EXEC test """ r1 = cluster.add_node() # MULTI does not go itself to the log assert r1.raft_exec('MULTI') assert r1.raft_info()['current_index'] == 1 # MULTI cannot be nested with raises(ResponseError, match='.*MULTI calls can not be nested'): r1.raft_exec('MULTI') # Commands are queued assert r1.raft_exec('INCR', 'key') == b'QUEUED' assert r1.raft_exec('INCR', 'key') == b'QUEUED' assert r1.raft_exec('INCR', 'key') == b'QUEUED' # More validations assert r1.raft_info()['current_index'] == 1 assert r1.raft_exec('EXEC') == [1, 2, 3] assert r1.raft_info()['current_index'] == 2 assert r1.raft_exec('GET', 'key') == b'3'
def test_multi_exec_with_watch(cluster): """ MULTI/EXEC with WATCH """ r1 = cluster.add_node() r1.client.execute_command('SET', 'watched-key', '1') c1 = r1.client.connection_pool.get_connection('c1') c1.send_command('WATCH', 'watched-key') assert c1.read_response() == b'OK' c2 = r1.client.connection_pool.get_connection('c2') c2.send_command('RAFT', 'SET', 'watched-key', '2') assert c2.read_response() == b'OK' c1.send_command('RAFT', 'MULTI') assert c1.read_response() == b'OK' c1.send_command('RAFT', 'SET', 'watched-key', '3') assert c1.read_response() == b'QUEUED' c1.send_command('RAFT', 'EXEC') assert c1.read_response() is None assert r1.client.execute_command('GET', 'watched-key') == b'2'
def test_multi_exec_raftized(cluster): """ MULTI/EXEC when raftize-all-commands is on. """ r1 = cluster.add_node() try: assert r1.raft_config_set('raftize-all-commands', 'yes') except ResponseError: skip('Not supported on this Redis') # MULTI does not go itself to the log assert r1.client.execute_command('MULTI') assert r1.raft_info()['current_index'] == 1 # Commands are queued assert r1.client.execute_command('INCR', 'key') == b'QUEUED' assert r1.client.execute_command('INCR', 'key') == b'QUEUED' assert r1.client.execute_command('INCR', 'key') == b'QUEUED' # More validations assert r1.raft_info()['current_index'] == 1 assert r1.client.execute_command('EXEC') == [1, 2, 3] assert r1.raft_info()['current_index'] == 2 assert r1.client.execute_command('GET', 'key') == b'3'
def test_add_node_as_a_single_leader(cluster): """ Single node becomes a leader """ # Do some basic sanity r1 = cluster.add_node() assert r1.raft_exec('SET', 'key', 'value') assert r1.raft_info()['current_index'] == 2
def test_single_node_log_is_reapplied(cluster): """Single node log is reapplied on startup""" r1 = cluster.add_node() assert r1.raft_exec('SET', 'key', 'value') r1.restart() r1.wait_for_election() assert r1.raft_info().get('leader_id') == 1 r1.wait_for_log_applied() assert r1.client.get('key') == b'value'
def test_identical_snapshot_and_log(cluster): r1 = cluster.add_node() assert r1.raft_exec('INCR', 'testkey') assert r1.raft_exec('INCR', 'testkey') r1.terminate() r1.start() r1.wait_for_info_param('state', 'up') # Both log and snapshot have all entries assert r1.client.get('testkey') == b'2'
def test_snapshot_delivery_to_new_node(cluster): """ Ability to properly deliver and load a snapshot. """ r1 = cluster.add_node() r1.raft_exec('INCR', 'testkey') r1.raft_exec('INCR', 'testkey') r1.raft_exec('INCR', 'testkey') r1.raft_exec('SETRANGE', 'bigkey', '104857600', 'x') r1.raft_exec('INCR', 'testkey') assert r1.client.get('testkey') == b'4' assert r1.client.execute_command('RAFT.DEBUG', 'COMPACT') == b'OK' assert r1.raft_info()['log_entries'] == 0 r2 = cluster.add_node() cluster.wait_for_unanimity() assert r2.client.get('testkey') == b'4'
def test_node_history_with_same_address(cluster): "" "" cluster.create(5) cluster.raft_exec("INCR", "step-counter") # Remove nodes ports = [] for node_id in [2, 3, 4, 5]: ports.append(cluster.node(node_id).port) cluster.remove_node(node_id) cluster.leader_node().wait_for_log_applied() cluster.node(cluster.leader).wait_for_num_nodes(1) # Now add and remove several more times for _ in range(5): for port in ports: n = cluster.add_node(port=port) cluster.leader_node().wait_for_num_nodes(2) cluster.leader_node().wait_for_log_applied() cluster.remove_node(n.id) cluster.leader_node().wait_for_num_nodes(1) cluster.leader_node().wait_for_log_applied() # Add enough data in the log to satisfy timing for _ in range(3000): cluster.raft_exec("INCR", "step-counter") # Add another node new_node = cluster.add_node(port=ports[0]) new_node.wait_for_node_voting() # Terminate all cluster.terminate() # Start new node cluster.start() # need some time to start applying logs.. time.sleep(2) assert cluster.raft_exec("GET", "step-counter") == b'3001'
def test_config_startup_only_params(cluster): """ Configuration startup-only params. """ r1 = cluster.add_node() with raises(ResponseError, match='.*only supported at load time'): r1.raft_config_set('id', 2) with raises(ResponseError, match='.*only supported at load time'): r1.raft_config_set('raft-log-filename', 'filename')
def test_fuzzing_with_config_changes(cluster): """ Basic Raft fuzzer test """ nodes = 5 cycles = 100 cluster.create(nodes) for i in range(cycles): assert cluster.raft_exec('INCRBY', 'counter', 1) == i + 1 if random.randint(1, 7) == 1: try: node_id = cluster.random_node_id() cluster.remove_node(node_id) except ResponseError: continue cluster.add_node().wait_for_node_voting() assert int(cluster.raft_exec('GET', 'counter')) == cycles
def test_raft_log_max_file_size(cluster): """ Raft log size configuration affects compaction. """ r1 = cluster.add_node() assert r1.raft_info()['log_entries'] == 1 assert r1.raft_config_set('raft-log-max-file-size', '1kb') for _ in range(10): assert r1.raft_exec('SET', 'testkey', 'x'*500) time.sleep(1) assert r1.raft_info()['log_entries'] < 10
def test_multi_discard(cluster): """ MULTI/DISCARD test """ r1 = cluster.add_node() assert r1.raft_exec('MULTI') assert r1.raft_exec('INCR', 'key') == b'QUEUED' assert r1.raft_exec('INCR', 'key') == b'QUEUED' assert r1.raft_exec('DISCARD') == b'OK' assert r1.raft_exec('GET', 'key') is None
def test_raftize(cluster): """ Test raftize-all-commands mode. """ r1 = cluster.add_node() try: assert r1.raft_config_set('raftize-all-commands', 'yes') except ResponseError: skip('Not supported on this Redis') assert r1.raft_info()['current_index'] == 1 assert r1.client.execute_command('SET', 'key', 'value') assert r1.raft_info()['current_index'] == 2
def test_multi_exec_invalid_use(cluster): r1 = cluster.add_node() # EXEC without MULTI is not supported with raises(ResponseError, match='.*EXEC without MULTI'): r1.raft_exec('EXEC') # DISCARD without MULTI is not supported with raises(ResponseError, match='.*DISCARD without MULTI'): r1.raft_exec('DISCARD') # MULTI cannot be nested assert r1.raft_exec('MULTI') == b'OK' with raises(ResponseError, match='.*MULTI calls can not be nested'): r1.raft_exec('MULTI')
def test_loading_log_tail_after_rewrite(cluster): r1 = cluster.add_node() assert r1.raft_exec('INCR', 'testkey') assert r1.raft_exec('INCR', 'testkey') assert r1.raft_exec('INCR', 'testkey') assert r1.client.execute_command('RAFT.DEBUG', 'COMPACT') == b'OK' assert r1.raft_exec('INCR', 'testkey') assert r1.raft_exec('INCR', 'testkey') assert r1.raft_exec('INCR', 'testkey') r1.kill() r1.start() r1.wait_for_info_param('state', 'up') # Log contains last 3 entries # Snapshot has first 3 entries assert r1.client.get('testkey') == b'6'
def test_loading_log_tail(cluster): r1 = cluster.add_node() assert r1.raft_exec('INCR', 'testkey') assert r1.raft_exec('INCR', 'testkey') assert r1.raft_exec('INCR', 'testkey') r1.client.save() assert r1.raft_exec('INCR', 'testkey') assert r1.raft_exec('INCR', 'testkey') assert r1.raft_exec('INCR', 'testkey') r1.kill() r1.start() r1.wait_for_info_param('state', 'up') # Log contains all entries # Snapshot has all but last 3 entries assert r1.client.get('testkey') == b'6'
def test_invalid_configs(cluster): """ Invalid configurations. """ r1 = cluster.add_node() with raises(ResponseError, match='.*invalid addr'): r1.raft_config_set('addr', 'host') with raises(ResponseError, match='.*invalid addr'): r1.raft_config_set('addr', 'host:0') with raises(ResponseError, match='.*invalid addr'): r1.raft_config_set('addr', 'host:99999') with raises(ResponseError, match='.*invalid .*value'): r1.raft_config_set('request-timeout', 'nonint')
def test_raftize_does_not_affect_lua(cluster): """ Make sure raftize-all-commands does not affect Lua commands. """ r1 = cluster.add_node() try: assert r1.raft_config_set('raftize-all-commands', 'yes') except ResponseError: skip('Not supported on this Redis') assert r1.raft_info()['current_index'] == 1 assert r1.client.execute_command( 'EVAL', """ redis.call('SET','key1','value1'); redis.call('SET','key2','value2'); redis.call('SET','key3','value3'); return 1234;""", '0') == 1234 assert r1.raft_info()['current_index'] == 2 assert r1.client.get('key1') == b'value1' assert r1.client.get('key2') == b'value2' assert r1.client.get('key3') == b'value3'
def test_raft_log_max_cache_size(cluster): """ Raft log cache configuration in effect. """ r1 = cluster.add_node() assert r1.raft_info()['cache_entries'] == 1 assert r1.raft_config_set('raft-log-max-cache-size', '1kb') assert r1.raft_exec('SET', 'testkey', 'testvalue') info = r1.raft_info() assert info['cache_entries'] == 2 assert info['cache_memory_size'] > 0 for _ in range(10): assert r1.raft_exec('SET', 'testkey', 'x' * 500) time.sleep(1) info = r1.raft_info() assert info['log_entries'] == 12 assert info['cache_entries'] < 5
def test_multi_exec_state_cleanup(cluster): """ MULTI/EXEC state is cleaned up on client disconnect """ r1 = cluster.add_node() # Normal flow, no disconnect c1 = r1.client.connection_pool.get_connection('multi') c1.send_command('RAFT', 'MULTI') assert c1.read_response() == b'OK' c2 = r1.client.connection_pool.get_connection('multi') c2.send_command('RAFT', 'MULTI') assert c2.read_response() == b'OK' assert r1.raft_info()['clients_in_multi_state'] == 2 c1.disconnect() c2.disconnect() time.sleep(1) # Not ideal assert r1.raft_info()['clients_in_multi_state'] == 0
def test_cfg_node_added_from_snapshot(cluster): """ Node able to join cluster and read cfg and data from snapshot. """ cluster.create(2) for i in range(100): cluster.node(1).raft_exec('SET', 'key%s' % i, 'val%s' % i) cluster.node(1).raft_exec('INCR', 'counter') # Make sure log is compacted assert cluster.node(1).client.execute_command( 'RAFT.DEBUG', 'COMPACT') == b'OK' assert cluster.node(1).raft_info()['log_entries'] == 0 # Add new node and wait for things to settle r3 = cluster.add_node() r3.wait_for_election() # Make sure we have what we expect for i in range(100): assert str(cluster.node(3).client.get('key%s' % i), 'utf-8') == 'val%s' % i assert cluster.node(3).client.get('counter') == b'100'
def test_multi_exec_with_disconnect(cluster): """ MULTI/EXEC, client drops before EXEC. """ r1 = cluster.add_node() c1 = r1.client.connection_pool.get_connection('c1') c2 = r1.client.connection_pool.get_connection('c2') # We use RAFT.DEBUG COMPACT with delay to make the Raft thread # busy and allow us to queue up several RaftReqs and disconnect in # time. # Note -- for compact to succeed we need at least one key. r1.client.execute_command('RAFT', 'SET', 'somekey', 'someval') c2.send_command('RAFT.DEBUG', 'COMPACT', '2') time.sleep(0.5) # While Raft thread is busy, pipeline a first non-MULTI request c1.send_command('RAFT', 'SET', 'test-key', '1') # Then pipeline a MULTI/EXEC which we expect to fail, because it # cannot determine CAS safety. We also want to be sure no other # commands that follow get executed. c1.send_command('RAFT', 'MULTI') c1.send_command('RAFT', 'SET', 'test-key', '2') c1.send_command('RAFT', 'EXEC') c1.send_command('RAFT', 'SET', 'test-key', '3') c1.disconnect() # Wait for RAFT.DEBUG COMPACT assert c2.read_response() == b'OK' # Make sure SET succeeded and EXEC didn't. assert r1.client.execute_command('GET', 'test-key') == b'1'