def test_node_txn_add_new_node(node_req_add, sim_pool, random): # Step 1. Prepare NODE requests and some of params to check # Count of NODE requests is random but less then pool size pool_reqs = [ node_req_add(sim_pool.size + i) for i in range(random.integer(1, sim_pool.size - 1)) ] domain_reqs = create_requests(DOMAIN_REQ_COUNT) reqs = pool_reqs + domain_reqs shuffle(reqs) sim_pool.sim_send_requests(reqs) current_view_no = sim_pool.view_no current_pool_ledger_size = get_pools_ledger_size(sim_pool, ledger_id=POOL_LEDGER_ID) current_domain_ledger_size = get_pools_ledger_size( sim_pool, ledger_id=DOMAIN_LEDGER_ID) expected_view_no = current_view_no + 1 expected_node_reg = sim_pool.validators + [ Greeks[sim_pool.size + i][0] for i in range(len(pool_reqs)) ] # Step 2. Start requests ordering random_interval = random.integer(10, 20) * 100 RepeatingTimer(sim_pool.timer, random_interval, partial(order_requests, sim_pool)) # Step 3. Initiate view change process during request's ordering for node in sim_pool.nodes: sim_pool.timer.schedule( random_interval + 1000, partial(node._view_changer.process_need_view_change, NeedViewChange(view_no=1))) # Step 4. Wait for VC completing for node in sim_pool.nodes: sim_pool.timer.wait_for(lambda: node._view_changer._data.view_no == 1) for node in sim_pool.nodes: sim_pool.timer.wait_for(lambda: not node._data.waiting_for_new_view) # Step 5. Check parameters like ordered txns count, node_reg state # For now we can run this checks only for old nodes with newly added. # Because we cannot run catchup process # ToDo: change this checks for making they on whole pool after INDY-2148 will be implemented sim_pool.timer.wait_for(lambda: all( [n._data.view_no == expected_view_no for n in sim_pool.nodes])) sim_pool.timer.wait_for( partial(check_node_reg, sim_pool, expected_node_reg)) for node in sim_pool.nodes: sim_pool.timer.wait_for( partial(check_ledger_size, node, current_pool_ledger_size + len(pool_reqs), POOL_LEDGER_ID)) sim_pool.timer.wait_for( partial(check_ledger_size, node, current_domain_ledger_size + len(domain_reqs), DOMAIN_LEDGER_ID))
def do_test_node_txn_mixed(sim_pool, random): # Step 1. Prepare NODE requests and some of params to check # Count of NODE requests is random but less then pool size indexes_to_demote = random.sample(range(1, sim_pool.size), sim_pool.size - 4) initial_view_no = sim_pool._initial_view_no demoted_names = [] domain_reqs = create_requests(DOMAIN_REQ_COUNT) pool_reqs = [ node_req_add(sim_pool, sim_pool.size + i) for i in range(random.integer(1, sim_pool.f)) ] promoted_names = [ Greeks[sim_pool.size + i][0] for i in range(len(pool_reqs)) ] for i in indexes_to_demote: name, req = node_req_demote(sim_pool, i) demoted_names.append(name) pool_reqs.append(req) all_reqs = domain_reqs + pool_reqs shuffle(all_reqs) sim_pool.sim_send_requests(all_reqs) current_pool_ledger_size = get_pools_ledger_size(sim_pool, ledger_id=POOL_LEDGER_ID) current_domain_ledger_size = get_pools_ledger_size( sim_pool, ledger_id=DOMAIN_LEDGER_ID) expected_node_reg = [ name for name in sim_pool._genesis_validators if name not in demoted_names ] + promoted_names # Step 2. Start ordering and VC do_order_and_vc(sim_pool, random, initial_view_no) # Step 3. Check parameters like ordered txns count, node_reg state # For now we can run this checks only for old nodes with newly added. # Because we cannot run catchup process # ToDo: change this checks for making they on whole pool after INDY-2148 will be implemented sim_pool.timer.wait_for( partial(check_node_reg, sim_pool, expected_node_reg)) for node in sim_pool.nodes: sim_pool.timer.wait_for( partial(check_ledger_size, node, current_pool_ledger_size + len(pool_reqs), POOL_LEDGER_ID)) sim_pool.timer.wait_for( partial(check_ledger_size, node, current_domain_ledger_size + len(domain_reqs), DOMAIN_LEDGER_ID))
def do_test(seed): # 1. Setup pool requests_count = REQUEST_COUNT batches_count = requests_count // MAX_BATCH_SIZE random = DefaultSimRandom(seed) reqs = create_requests(requests_count) pool = setup_pool(random) pool.sim_send_requests(reqs) initial_ledger_size = get_pools_ledger_size(pool) # 2. Send 3pc batches random_interval = 1000 RepeatingTimer(pool.timer, random_interval, partial(order_requests, pool)) for node in pool.nodes: pool.timer.schedule( 3000, partial(node._view_changer.process_need_view_change, NeedViewChange(view_no=1))) # 3. Make sure that view_change is completed for node in pool.nodes: pool.timer.wait_for(lambda: node._view_changer._data.view_no == 1) # 3. Make sure all nodes ordered all the requests for node in pool.nodes: pool.timer.wait_for(partial(check_batch_count, node, batches_count)) pool.timer.wait_for( partial(check_ledger_size, node, initial_ledger_size + REQUEST_COUNT)) # 4. Check data consistency pool.timer.wait_for(lambda: check_no_asserts(check_consistency, pool))
def test_demote_and_promote_back(sim_pool, random, indexes_to_demote): # Step 1. Prepare NODE requests and some of params to check # Count of NODE requests is random but less then pool size initial_view_no = sim_pool._initial_view_no demoted_names = [] demoted_reqs = [] promoted_reqs = [] for i in indexes_to_demote: name, req = node_req_demote(sim_pool, i) demoted_names.append(name) demoted_reqs.append(req) _, req = node_req_promote_back(sim_pool, i) promoted_reqs.append(req) pool_reqs = [] [pool_reqs.extend(r) for r in zip(demoted_reqs, promoted_reqs)] current_pool_ledger_size = get_pools_ledger_size(sim_pool, ledger_id=POOL_LEDGER_ID) expected_node_reg = sim_pool._genesis_validators do_order_and_vc(sim_pool, random, initial_view_no) sim_pool.timer.wait_for( partial(check_node_reg, sim_pool, expected_node_reg)) for node in sim_pool.nodes: sim_pool.timer.wait_for( partial(check_ledger_size, node, current_pool_ledger_size + len(pool_reqs), POOL_LEDGER_ID))