def get_up_to(from_, to): global first_epoch_switch_height, last_epoch for height, hash_ in utils.poll_blocks(nodes[0], timeout=TIMEOUT, poll_interval=0.01): block = nodes[0].get_block(hash_) hash_to_height[hash_] = height height_to_hash[height] = hash_ cur_epoch = block['result']['header']['epoch_id'] hash_to_epoch[hash_] = cur_epoch hash_to_next_epoch[hash_] = block['result']['header']['next_epoch_id'] if (first_epoch_switch_height is None and last_epoch is not None and last_epoch != cur_epoch): first_epoch_switch_height = height last_epoch = cur_epoch if height >= to: break for i in range(from_, to + 1): hash_ = height_to_hash[i] logger.info( f"{i} {hash_} {hash_to_epoch[hash_]} {hash_to_next_epoch[hash_]}") if len(epochs) == 0 or epochs[-1] != hash_to_epoch[hash_]: epochs.append(hash_to_epoch[hash_])
def _test_block_hash( self, msg_version: int, protocol_version: typing.Optional[int] = None) -> None: """Starts a cluster, fetches blocks and computes their hashes. The cluster is started with genesis configured to use given protocol version. The code fetches blocks until: 1) a block with all approvals set is encountered, 2) another block with at least one approval missing and 3) at least ten blocks total are checked. Args: msg_version: Version of the BlockHeaderInnerRest to use when serialising and computing hash. protocol_version: If given, protocol version to use in the cluster (which will be set in genesis); If not given, cluster will be started with the newest supported protocol version. """ genesis_overrides = [] if protocol_version: genesis_overrides = [['protocol_version', protocol_version]] nodes = () try: nodes = cluster.start_cluster(4, 0, 4, None, genesis_overrides, {}) got_all_set = False got_some_unset = False count = 0 for block_id in utils.poll_blocks(nodes[0]): header = nodes[0].get_block(block_id.hash)['result']['header'] self.assertEqual((block_id.height, block_id.hash), (header['height'], header['hash']), (block_id, header)) got = compute_block_hash(header, msg_version) self.assertEqual(header['hash'], got, header) if all(header['approvals']): if not got_all_set: nodes[1].kill() got_all_set = True elif any(approval is None for approval in header['approvals']): got_some_unset = True count += 1 if got_all_set and got_some_unset and count >= 10: break finally: for node in nodes: node.cleanup()
["validators", 0, "amount", "260000000000000000000000000000000"], [ "records", 0, "Account", "account", "locked", "260000000000000000000000000000000" ]], { 0: consensus_config, 1: consensus_config, 2: consensus_config }) logger.info('kill node1 and node2') nodes[1].kill() nodes[2].kill() node0_height, _ = utils.wait_for_blocks(nodes[0], target=TARGET_HEIGHT) logger.info('Restart node 1') nodes[1].start(boot_node=nodes[1]) time.sleep(2) for height, _ in utils.poll_blocks(nodes[1], timeout=TIMEOUT): if height >= node0_height and len(nodes[0].validators()) < 3: break logger.info('Restart node 2') nodes[2].start(boot_node=nodes[2]) time.sleep(2) target = nodes[0].get_latest_block().height utils.wait_for_blocks(nodes[2], target=target)
{}) time.sleep(3) nodes[1].kill() started = time.time() act_to_val = [0, 0, 0] ctx = utils.TxContext(act_to_val, nodes) last_balances = [x for x in ctx.expected_balances] sent_height = -1 caught_up_times = 0 for height, hash_ in utils.poll_blocks(nodes[0], timeout=TIMEOUT, poll_interval=0.1): logger.info(f'Got to height {height}') if ctx.get_balances() == ctx.expected_balances: logger.info('Balances caught up, took %s blocks, moving on', height - sent_height) ctx.send_moar_txs(hash_, 10, use_routing=True) sent_height = height caught_up_times += 1 else: assert height <= sent_height + 30, ('Balances before: {before}\n' 'Expected balances: {expected}\n' 'Current balances: {current}\n' 'Sent at height: {height}').format( before=last_balances,
{2: { "tracked_shards": [0] }}) started = time.time() boot_node = spin_up_node(config, near_root, node_dirs[0], 0) node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node=boot_node) ctx = utils.TxContext([0, 0], [boot_node, node1]) sent_txs = False observed_height = 0 for observed_height, hash_ in utils.poll_blocks(boot_node, timeout=TIMEOUT, poll_interval=0.1): if mode == 'onetx' and not sent_txs: ctx.send_moar_txs(hash_, 3, False) sent_txs = True elif mode == 'manytx': if ctx.get_balances() == ctx.expected_balances: logger.info(f'Sending moar txs at height {observed_height}') ctx.send_moar_txs(hash_, 3, False) if mode == 'onetx': assert ctx.get_balances() == ctx.expected_balances node2 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node=boot_node) tracker = utils.LogTracker(node2) time.sleep(3)
["chunk_producer_kickout_threshold", 10]], { 0: consensus_config0, 1: consensus_config1 }) time.sleep(2) nodes[1].kill() logger.info("step 1") node0_height, _ = utils.wait_for_blocks(nodes[0], target=EPOCH_LENGTH * 2 + 1, poll_interval=5) nodes[1].start(boot_node=nodes[1]) time.sleep(2) logger.info("step 2") state_sync_done_time = None state_sync_done_height = None for node1_height, _ in utils.poll_blocks(nodes[1], timeout=MAX_SYNC_WAIT, poll_interval=2): if node1_height > node0_height: break if node1_height >= EPOCH_LENGTH: if state_sync_done_time is None: state_sync_done_time = time.time() state_sync_done_height = node1_height elif time.time() - state_sync_done_time > 8: assert node1_height > state_sync_done_height, "No progress after state sync is done"
"tracked_shards": [0] } }) started = time.time() boot_node = spin_up_node(config, near_root, node_dirs[0], 0) node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node=boot_node) ctx = utils.TxContext([0, 0], [boot_node, node1]) sent_txs = False observed_height = 0 for height, block_hash in utils.poll_blocks(boot_node, timeout=TIMEOUT, poll_interval=0.1): observed_height = height if height >= START_AT_BLOCK: break if mode == 'onetx' and not sent_txs: ctx.send_moar_txs(block_hash, 3, False) sent_txs = True elif mode == 'manytx': if ctx.get_balances() == ctx.expected_balances: ctx.send_moar_txs(block_hash, 3, False) logger.info(f'Sending moar txs at height {height}') if mode == 'onetx': assert ctx.get_balances() == ctx.expected_balances
0: tracked_shards, 1: tracked_shards }) time.sleep(3) hash_ = nodes[0].get_latest_block().hash_bytes for i in range(4): stake = 50000000000000000000000000000000 if i == 3 else 0 tx = sign_staking_tx(nodes[i].signer_key, nodes[i].validator_key, stake, 1, hash_) nodes[0].send_tx(tx) logger.info("test%s stakes %d" % (i, stake)) for cur_height, _ in utils.poll_blocks(nodes[0], poll_interval=1): if cur_height >= EPOCH_LENGTH * 2: break if cur_height > EPOCH_LENGTH + 1: info = nodes[0].json_rpc('validators', 'latest') count = len(info['result']['next_validators']) assert count == 1, 'Number of validators do not match' validator = info['result']['next_validators'][0]['account_id'] assert validator == 'test3' while cur_height <= EPOCH_LENGTH * 3: statuses = sorted((enumerate(node.get_latest_block() for node in nodes)), key=lambda element: element[1].height) last = statuses.pop() cur_height = last[1].height node = nodes[last[0]]
ctx = utils.TxContext([4, 4, 4, 4, 4], [boot_node, None, node3, node4, observer]) initial_balances = ctx.get_balances() total_supply = sum(initial_balances) logger.info("Initial balances: %s\nTotal supply: %s" % (initial_balances, total_supply)) sent_txs = False largest_height = 0 # 1. Make the first node get to height 35. The second epoch will end around height 24-25, # which would already result in a stall if the first node can't sync the state from the # observer for the shard it doesn't care about for height, hash_ in utils.poll_blocks(observer, timeout=TIMEOUT, poll_interval=0.1): if height >= TARGET_HEIGHT: break if height > 1 and not sent_txs: ctx.send_moar_txs(hash_, 10, False) logger.info(f'Sending txs at height {height}') sent_txs = True logger.info("stage 1 done") # 2. Spin up the second node and make sure it gets to 35 as well, and doesn't diverge node2 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node=boot_node) node2.stop_checking_store() while True: