Exemple #1
0
def main():
    near_root, (stable_branch,
                current_branch) = branches.prepare_ab_test("master")
    node_root = "/tmp/near/db_migration"
    if os.path.exists(node_root):
        shutil.rmtree(node_root)
    subprocess.check_output('mkdir -p /tmp/near', shell=True)

    logging.info(f"The near root is {near_root}...")
    logging.info(f"The node root is {node_root}...")

    init_command = [
        "%snear-%s" % (near_root, stable_branch),
        "--home=%s" % node_root,
        "init",
        "--fast",
    ]

    # Init local node
    subprocess.call(init_command)

    # Run stable node for few blocks.
    config = {
        "local": True,
        'near_root': near_root,
        'binary_name': "near-%s" % stable_branch
    }

    logging.info("Starting the stable node...")

    node = cluster.spin_up_node(config, near_root, node_root, 0, None, None)

    logging.info("Running the stable node...")
    wait_for_blocks_or_timeout(node, 20, 100)
    logging.info("Blocks are being produced, sending some tx...")
    deploy_contract(node)
    send_some_tx(node)

    node.kill()

    logging.info(
        "Stable node has produced blocks... Stopping the stable node... ")

    # Run new node and verify it runs for a few more blocks.
    config["binary_name"] = "near-%s" % current_branch

    logging.info("Starting the current node...")
    node.binary_name = config['binary_name']
    node.start(node.node_key.pk, node.addr())

    logging.info("Running the current node...")
    wait_for_blocks_or_timeout(node, 20, 100)
    logging.info("Blocks are being produced, sending some tx...")
    send_some_tx(node)

    logging.info(
        "Currnet node has produced blocks... Stopping the current node... ")

    node.kill()

    logging.info("Restarting the current node...")

    node.start(node.node_key.pk, node.addr())
    wait_for_blocks_or_timeout(node, 20, 100)
Exemple #2
0
def main():
    node_root = "/tmp/near/upgradable"
    if os.path.exists(node_root):
        shutil.rmtree(node_root)
    subprocess.check_output('mkdir -p /tmp/near', shell=True)

    branch = branches.latest_rc_branch()
    print(f"Latest rc release branch is {branch}")
    near_root, (stable_branch,
                current_branch) = branches.prepare_ab_test(branch)

    # Setup local network.
    print([
        "%snear-%s" % (near_root, stable_branch),
        "--home=%s" % node_root, "testnet", "--v", "4", "--prefix", "test"
    ])
    subprocess.call([
        "%snear-%s" % (near_root, stable_branch),
        "--home=%s" % node_root, "testnet", "--v", "4", "--prefix", "test"
    ])
    genesis_config_changes = [("epoch_length", 20),
                              ("num_block_producer_seats", 10),
                              ("num_block_producer_seats_per_shard", [10]),
                              ("block_producer_kickout_threshold", 80),
                              ("chunk_producer_kickout_threshold", 80),
                              ("chain_id", "testnet")]
    node_dirs = [os.path.join(node_root, 'test%d' % i) for i in range(4)]
    for i, node_dir in enumerate(node_dirs):
        cluster.apply_genesis_changes(node_dir, genesis_config_changes)

    # Start 3 stable nodes and one current node.
    config = {
        "local": True,
        'near_root': near_root,
        'binary_name': "near-%s" % stable_branch
    }
    nodes = [
        cluster.spin_up_node(config, near_root, node_dirs[0], 0, None, None)
    ]
    for i in range(1, 3):
        nodes.append(
            cluster.spin_up_node(config, near_root, node_dirs[i], i,
                                 nodes[0].node_key.pk, nodes[0].addr()))
    if os.getenv('NAYDUCK'):
        config["binary_name"] = "near"
    else:
        config["binary_name"] = "near-%s" % current_branch
    nodes.append(
        cluster.spin_up_node(config, near_root, node_dirs[3], 3,
                             nodes[0].node_key.pk, nodes[0].addr()))

    time.sleep(2)

    # deploy a contract
    status = nodes[0].get_status()
    hash = status['sync_info']['latest_block_hash']
    tx = sign_deploy_contract_tx(
        nodes[0].signer_key,
        load_binary_file(
            '../runtime/near-vm-runner/tests/res/test_contract_rs.wasm'), 1,
        base58.b58decode(hash.encode('utf8')))
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res

    # write some random value
    tx = sign_function_call_tx(nodes[0].signer_key,
                               nodes[0].signer_key.account_id,
                               'write_random_value', [], 10**13, 0, 2,
                               base58.b58decode(hash.encode('utf8')))
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    wait_for_blocks_or_timeout(nodes[0], 20, 120)

    # Restart stable nodes into new version.
    for i in range(3):
        nodes[i].kill()
        nodes[i].binary_name = config['binary_name']
        nodes[i].start(nodes[0].node_key.pk, nodes[0].addr())

    wait_for_blocks_or_timeout(nodes[3], 60, 120)
    status0 = nodes[0].get_status()
    status3 = nodes[3].get_status()
    protocol_version = status0['protocol_version']
    latest_protocol_version = status3["latest_protocol_version"]
    assert protocol_version == latest_protocol_version, \
        "Latest protocol version %d should match active protocol version %d" % (latest_protocol_version, protocol_version)

    hash = status0['sync_info']['latest_block_hash']

    # write some random value again
    tx = sign_function_call_tx(nodes[0].signer_key,
                               nodes[0].signer_key.account_id,
                               'write_random_value', [], 10**13, 0, 4,
                               base58.b58decode(hash.encode('utf8')))
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    # hex_account_id = (b"I'm hex!" * 4).hex()
    hex_account_id = '49276d206865782149276d206865782149276d206865782149276d2068657821'
    tx = sign_payment_tx(key=nodes[0].signer_key,
                         to=hex_account_id,
                         amount=10**25,
                         nonce=5,
                         blockHash=base58.b58decode(hash.encode('utf8')))
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    # Successfully created a new account on transfer to hex
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    hex_account_balance = int(
        nodes[0].get_account(hex_account_id)['result']['amount'])
    assert hex_account_balance == 10**25
Exemple #3
0
def main():
    node_root = "/tmp/near/backward"
    if os.path.exists(node_root):
        shutil.rmtree(node_root)
    subprocess.check_output('mkdir -p /tmp/near', shell=True)

    branch = branches.latest_rc_branch()
    near_root, (stable_branch,
                current_branch) = branches.prepare_ab_test(branch)

    # Setup local network.
    subprocess.call([
        "%snear-%s" % (near_root, stable_branch),
        "--home=%s" % node_root, "testnet", "--v", "2", "--prefix", "test"
    ])

    # Run both binaries at the same time.
    config = {
        "local": True,
        'near_root': near_root,
        'binary_name': "near-%s" % stable_branch
    }
    stable_node = cluster.spin_up_node(config, near_root,
                                       os.path.join(node_root, "test0"), 0,
                                       None, None)
    config["binary_name"] = "near-%s" % current_branch
    current_node = cluster.spin_up_node(config, near_root,
                                        os.path.join(node_root, "test1"), 1,
                                        stable_node.node_key.pk,
                                        stable_node.addr())

    # Check it all works.
    BLOCKS = 100
    TIMEOUT = 150
    max_height = -1
    started = time.time()

    # Create account, transfer tokens, deploy contract, invoke function call
    status = stable_node.get_status()
    block_hash = base58.b58decode(
        status['sync_info']['latest_block_hash'].encode('utf-8'))

    new_account_id = 'test_account'
    new_signer_key = cluster.Key(new_account_id, stable_node.signer_key.pk,
                                 stable_node.signer_key.sk)
    create_account_tx = sign_create_account_with_full_access_key_and_balance_tx(
        stable_node.signer_key, new_account_id, new_signer_key, 10**24, 1,
        block_hash)
    res = stable_node.send_tx_and_wait(create_account_tx, timeout=20)
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    transfer_tx = sign_payment_tx(stable_node.signer_key, new_account_id,
                                  10**25, 2, block_hash)
    res = stable_node.send_tx_and_wait(transfer_tx, timeout=20)
    assert 'error' not in res, res

    status = stable_node.get_status()
    block_height = status['sync_info']['latest_block_height']
    nonce = block_height * 1_000_000 - 1

    tx = sign_deploy_contract_tx(new_signer_key, load_test_contract(), nonce,
                                 block_hash)
    res = stable_node.send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res

    tx = sign_function_call_tx(new_signer_key, new_account_id,
                               'write_random_value', [], 10**13, 0, nonce + 1,
                               block_hash)
    res = stable_node.send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    data = json.dumps([{
        "create": {
            "account_id": "near_2",
            "method_name": "call_promise",
            "arguments": [],
            "amount": "0",
            "gas": 30000000000000,
        },
        "id": 0
    }, {
        "then": {
            "promise_index": 0,
            "account_id": "near_3",
            "method_name": "call_promise",
            "arguments": [],
            "amount": "0",
            "gas": 30000000000000,
        },
        "id": 1
    }])

    tx = sign_function_call_tx(new_signer_key, new_account_id, 'call_promise',
                               bytes(data, 'utf-8'), 90000000000000, 0,
                               nonce + 2, block_hash)
    res = stable_node.send_tx_and_wait(tx, timeout=20)

    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    while max_height < BLOCKS:
        assert time.time() - started < TIMEOUT
        status = current_node.get_status()
        cur_height = status['sync_info']['latest_block_height']

        if cur_height > max_height:
            max_height = cur_height
        time.sleep(1)
Exemple #4
0
def doit(s, n, N, k, monkeys, timeout):
    global block_timeout, balances_timeout, tx_tolerance

    assert 2 <= n <= N

    config = load_config()
    local_config_changes = {}

    for i in range(N, N + k + 1):
        # make all the observers track all the shards
        local_config_changes[i] = {"tracked_shards": list(range(s))}

    near_root, node_dirs = init_cluster(
        N, k + 1, s, config,
        [["min_gas_price", 0], ["max_inflation_rate", [0, 1]],
         ["epoch_length", EPOCH_LENGTH],
         ["block_producer_kickout_threshold", 10],
         ["chunk_producer_kickout_threshold", 10]], local_config_changes)

    monkey_names = [x.__name__ for x in monkeys]
    proxy = None
    logging.info(monkey_names)
    if 'monkey_local_network' in monkey_names or 'monkey_global_network' in monkey_names:
        assert config[
            'local'], 'Network stress operations only work on local nodes'
        reject_list = RejectListProxy.create_reject_list(1)
        proxy = RejectListProxy(reject_list)
        expect_network_issues()
        block_timeout += 40
        balances_timeout += 20
        tx_tolerance += 0.3
    if 'monkey_node_restart' in monkey_names:
        expect_network_issues()
    if 'monkey_node_restart' in monkey_names or 'monkey_node_set' in monkey_names:
        block_timeout += 40
        balances_timeout += 10
        tx_tolerance += 0.5

    started = time.time()

    boot_node = spin_up_node(config,
                             near_root,
                             node_dirs[0],
                             0,
                             None,
                             None,
                             proxy=proxy)
    boot_node.stop_checking_store()
    boot_node.mess_with = False
    nodes = [boot_node]

    for i in range(1, N + k + 1):
        node = spin_up_node(config,
                            near_root,
                            node_dirs[i],
                            i,
                            boot_node.node_key.pk,
                            boot_node.addr(),
                            proxy=proxy)
        node.stop_checking_store()
        nodes.append(node)
        if i >= n and i < N:
            node.kill()
            node.mess_with = True
        else:
            node.mess_with = False

    stopped = Value('i', 0)
    error = Value('i', 0)
    ps = []
    nonces = [(Value('i', 1), Lock()) for _ in range(N + k + 1)]

    def launch_process(func):
        nonlocal stopped, error, ps

        p = Process(target=func, args=(stopped, error, nodes, nonces))
        p.start()
        ps.append((p, func.__name__))

    def check_errors():
        nonlocal error, ps
        if error.value != 0:
            for (p, _) in ps:
                p.terminate()
            assert False, "At least one process failed, check error messages above"

    for monkey in monkeys:
        launch_process(monkey)

    launch_process(blocks_tracker)

    started = time.time()
    while time.time() - started < timeout:
        check_errors()
        time.sleep(1)

    logging.info("")
    logging.info("==========================================")
    logging.info("# TIMEOUT IS HIT, SHUTTING DOWN THE TEST #")
    logging.info("==========================================")
    stopped.value = 1
    started_shutdown = time.time()
    while True:
        check_errors()
        still_running = [name for (p, name) in ps if p.is_alive()]

        if len(still_running) == 0:
            break

        if time.time() - started_shutdown > TIMEOUT_SHUTDOWN:
            for (p, _) in ps:
                p.terminate()
            assert False, "The test didn't gracefully shut down in time\nStill running: %s" % (
                still_running)

    check_errors()

    logging.info("Shut down complete, executing store validity checks")
    for node in nodes:
        node.is_check_store = True
        node.check_store()
Exemple #5
0
from utils import TxContext, LogTracker

START_AT_BLOCK = 75
TIMEOUT = 150 + START_AT_BLOCK * 10

config = load_config()
near_root, node_dirs = init_cluster(
    2, 1, 1, config,
    [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10],
     ["block_producer_kickout_threshold", 80]], {2: {
         "tracked_shards": [0]
     }})

started = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None)
node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk,
                     boot_node.addr())

ctx = TxContext([0, 0], [boot_node, node1])

sent_txs = False

observed_height = 0
while observed_height < START_AT_BLOCK:
    assert time.time() - started < TIMEOUT
    status = boot_node.get_status()
    new_height = status['sync_info']['latest_block_height']
    hash_ = status['sync_info']['latest_block_hash']
    if new_height > observed_height:
        observed_height = new_height
Exemple #6
0
TIMEOUT = 150 + START_AT_BLOCK * 10

config = load_config()

near_root, node_dirs = init_cluster(
    2, 3, 1,
    config, [["min_gas_price", 0], ["max_inflation_rate", [0, 1]],
             ["epoch_length", 10], ["block_producer_kickout_threshold", 80]],
    {4: {
        "tracked_shards": [0]
    }})

started = time.time()

# First observer
node2 = spin_up_node(config, near_root, node_dirs[2], 2, None, None)
# Boot from observer since block producer will blacklist third observer
boot_node = node2

# Second observer
node3 = spin_up_node(config, near_root, node_dirs[3], 3, boot_node.node_key.pk,
                     boot_node.addr())

# Spin up validators
node0 = spin_up_node(config, near_root, node_dirs[0], 0, boot_node.node_key.pk,
                     boot_node.addr(), [4])
node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk,
                     boot_node.addr(), [4])

ctx = TxContext([0, 0], [node0, node1])
logging.info("Genesis generated")

for node_dir in node_dirs:
    result = check_output(['ls', '-la', node_dir], text=True)
    logging.info(f'Node directory: {node_dir}')
    for line in result.split('\n'):
        logging.info(line)

INTERMEDIATE_HEIGHT = 310
SMALL_HEIGHT = 610
LARGE_HEIGHT = 660
TIMEOUT = 3600
start = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0)
validator = spin_up_node(config,
                         near_root,
                         node_dirs[1],
                         1,
                         boot_node=boot_node)
delayed_validator = spin_up_node(config,
                                 near_root,
                                 node_dirs[2],
                                 2,
                                 boot_node=boot_node)
observer = spin_up_node(config,
                        near_root,
                        node_dirs[3],
                        3,
                        boot_node=boot_node)
Exemple #8
0
                             "nanos": 0
                         }
                     }
                 },
                 4: {
                     "tracked_shards": [0, 1, 2, 3],
                     "view_client_throttle_period": {
                         "secs": 0,
                         "nanos": 0
                     }
                 }
             })

started = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None)
boot_node.stop_checking_store()
node3 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node.node_key.pk,
                     boot_node.addr())
node4 = spin_up_node(config, near_root, node_dirs[3], 3, boot_node.node_key.pk,
                     boot_node.addr())
observer = spin_up_node(config, near_root, node_dirs[4], 4,
                        boot_node.node_key.pk, boot_node.addr())
observer.stop_checking_store()

ctx = TxContext([4, 4, 4, 4, 4], [boot_node, None, node3, node4, observer])
initial_balances = ctx.get_balances()
total_supply = sum(initial_balances)

logger.info("Initial balances: %s\nTotal supply: %s" %
            (initial_balances, total_supply))
Exemple #9
0
def doit(s, n, N, k, monkeys, timeout):
    global block_timeout, balances_timeout, tx_tolerance

    assert 2 <= n <= N

    config = {'local': True, 'near_root': '../target/debug/'}
    local_config_changes = {}

    for i in range(N, N + k + 1):
        # make all the observers track all the shards
        local_config_changes[i] = {"tracked_shards": list(range(s))}

    near_root, node_dirs = init_cluster(
        N, s, k + 1, config, [["gas_price", 0], ["max_inflation_rate", 0],
                              ["epoch_length", EPOCH_LENGTH],
                              ["block_producer_kickout_threshold", 75]],
        local_config_changes)

    started = time.time()

    boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None)
    boot_node.mess_with = False
    nodes = [boot_node]

    for i in range(1, N + k + 1):
        node = spin_up_node(config, near_root, node_dirs[i], i,
                            boot_node.node_key.pk, boot_node.addr())
        nodes.append(node)
        if i >= n and i < N:
            node.kill()
            node.mess_with = True
        else:
            node.mess_with = False

    monkey_names = [x.__name__ for x in monkeys]
    print(monkey_names)
    if 'monkey_local_network' in monkey_names or 'monkey_global_network' in monkey_names:
        print(
            "There are monkeys messing up with network, initializing the infra"
        )
        init_network_pillager()
        expect_network_issues()
        block_timeout += 10
        tx_tolerance += 0.3
    if 'monkey_node_restart' in monkey_names:
        expect_network_issues()
    if 'monkey_node_restart' in monkey_names or 'monkey_node_set' in monkey_names:
        block_timeout += 10
        balances_timeout += 10
        tx_tolerance += 0.4

    stopped = Value('i', 0)
    error = Value('i', 0)
    ps = []
    nonces = [(Value('i', 1), Lock()) for _ in range(N + k + 1)]

    def launch_process(func):
        nonlocal stopped, error, ps

        p = Process(target=func, args=(stopped, error, nodes, nonces))
        p.start()
        ps.append((p, func.__name__))

    def check_errors():
        nonlocal error, ps
        if error.value != 0:
            for (p, _) in ps:
                p.terminate()
            assert False, "At least one process failed, check error messages above"

    for monkey in monkeys:
        launch_process(monkey)

    launch_process(blocks_tracker)

    started = time.time()
    while time.time() - started < timeout:
        check_errors()
        time.sleep(1)

    print("")
    print("==========================================")
    print("# TIMEOUT IS HIT, SHUTTING DOWN THE TEST #")
    print("==========================================")
    stopped.value = 1
    started_shutdown = time.time()
    while True:
        check_errors()
        still_running = [name for (p, name) in ps if p.is_alive()]

        if len(still_running) == 0:
            break

        if time.time() - started_shutdown > TIMEOUT_SHUTDOWN:
            for (p, _) in ps:
                p.terminate()
            assert False, "The test didn't gracefully shut down in time\nStill running: %s" % (
                still_running)

    check_errors()
     ["validators", 0, "amount", "110000000000000000000000000000000"],
     ["validators", 1, "amount", "110000000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "AccountV1", "locked",
         "110000000000000000000000000000000"
     ],
     # each validator account is two records, thus the index of a record for the second is 2, not 1
     [
         "records", 2, "Account", "account", "AccountV1", "locked",
         "110000000000000000000000000000000"
     ],
     ["total_supply", "6120000000000000000000000000000000"]], {4: {
         "tracked_shards": [0, 1], "archive": True
         }, 3: {"archive": True, "tracked_shards": [1]}, 2: {"archive": True, "tracked_shards": [0]}})

boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None, [], proxy)
node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk,
                     boot_node.addr(), [], proxy)

def get_validators(node):
    return set([x['account_id'] for x in node.get_status()['validators']])

logging.info("Getting to height %s" % HEIGHTS_BEFORE_ROTATE)
while True:
    assert time.time() - started < TIMEOUT
    status = boot_node.get_status()
    new_height = status['sync_info']['latest_block_height']
    if new_height > HEIGHTS_BEFORE_ROTATE:
        break
    time.sleep(1)
Exemple #11
0
START_AT_BLOCK = 75
TIMEOUT = 150 + START_AT_BLOCK * 10

config = load_config()
near_root, node_dirs = init_cluster(
    2, 1, 1,
    config, [["min_gas_price", 0], ["max_inflation_rate", [0, 1]],
             ["epoch_length", 10], ["block_producer_kickout_threshold", 80]],
    {2: {
        "tracked_shards": [0]
    }})

started = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0)
node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node=boot_node)

ctx = utils.TxContext([0, 0], [boot_node, node1])

sent_txs = False

observed_height = 0
for observed_height, hash_ in utils.poll_blocks(boot_node,
                                                timeout=TIMEOUT,
                                                poll_interval=0.1):
    if mode == 'onetx' and not sent_txs:
        ctx.send_moar_txs(hash_, 3, False)
        sent_txs = True
    elif mode == 'manytx':
        if ctx.get_balances() == ctx.expected_balances:
Exemple #12
0
                 },
                 2: {
                     "tracked_shards": [0]
                 },
                 3: {
                     "tracked_shards": [0]
                 },
                 4: {
                     "tracked_shards": [0]
                 },
             })

started = time.time()

# First observer
node2 = spin_up_node(config, near_root, node_dirs[2], 2)
# Boot from observer since block producer will blacklist third observer
boot_node = node2

# Second observer
node3 = spin_up_node(config, near_root, node_dirs[3], 3, boot_node=boot_node)

# Spin up validators
node0 = spin_up_node(config,
                     near_root,
                     node_dirs[0],
                     0,
                     boot_node=boot_node,
                     blacklist=[4])
node1 = spin_up_node(config,
                     near_root,
Exemple #13
0
def doit(s, n, N, k, monkeys, timeout):
    global block_timeout, balances_timeout, tx_tolerance, epoch_length, wait_if_restart, wipe_data, restart_sync_timeout

    assert 2 <= n <= N

    config = load_config()
    local_config_changes = {}

    monkey_names = [x.__name__ for x in monkeys]
    proxy = None
    logging.info(monkey_names)

    for i in range(N + k + 1):
        local_config_changes[i] = {
            "consensus": {
                "block_header_fetch_horizon": BLOCK_HEADER_FETCH_HORIZON,
                "state_sync_timeout": {
                    "secs": 5,
                    "nanos": 0
                }
            }
        }
    for i in range(N, N + k + 1):
        # make all the observers track all the shards
        local_config_changes[i]["tracked_shards"] = list(range(s))
    if 'monkey_wipe_data' in monkey_names:
        # When data can be deleted, with the short epoch length while the node with deleted data folder is syncing,
        # other nodes can run sufficiently far ahead to GC the old data. Have one archival node to address it.
        # It is also needed, because the balances timeout is longer, and the txs can get GCed on the observer node
        # by the time it gets to checking their status.
        local_config_changes[N + k]['archive'] = True

    if 'monkey_local_network' in monkey_names or 'monkey_packets_drop' in monkey_names or 'monkey_node_restart' in monkey_names:
        expect_network_issues()
        block_timeout += 40

    if 'monkey_local_network' in monkey_names or 'monkey_packets_drop' in monkey_names:
        assert config[
            'local'], 'Network stress operations only work on local nodes'
        drop_probability = 0.05 if 'monkey_packets_drop' in monkey_names else 0

        reject_list = RejectListProxy.create_reject_list(1)
        proxy = RejectListProxy(reject_list, drop_probability)
        tx_tolerance += 0.3

    if 'monkey_local_network' in monkey_names or 'monkey_packets_drop' in monkey_names:
        # add 15 seconds + 10 seconds for each unique network-related monkey
        balances_timeout += 15

        if 'monkey_local_network' in monkey_names:
            balances_timeout += 10

        if 'monkey_packets_drop' in monkey_names:
            wait_if_restart = True
            balances_timeout += 10

    if 'monkey_node_restart' in monkey_names or 'monkey_node_set' in monkey_names:
        balances_timeout += 10
        tx_tolerance += 0.5

    if 'monkey_wipe_data' in monkey_names:
        assert 'monkey_node_restart' in monkey_names or 'monkey_node_set' in monkey_names
        wipe_data = True
        balances_timeout += 25

        # if nodes can restart, we should give them way more time to sync.
        # if packets can also be dropped, each state-sync-related request or response lost adds 10 seconds
        # to the sync process.
        restart_sync_timeout = 45 if 'monkey_packets_drop' not in monkey_names else 90
        block_timeout += (10
                          if 'monkey_packets_drop' not in monkey_names else 40)

    # We need to make sure that the blocks that include txs are not garbage collected. From the first tx sent until
    # we check balances time equal to `balances_timeout * 2` passes, and the block production is capped at 1.7/s.
    # The GC keeps five epochs of blocks.
    min_epoch_length = (int((balances_timeout * 2) * 1.7) + 4) // 5
    epoch_length = max(epoch_length, min_epoch_length)

    near_root, node_dirs = init_cluster(
        N, k + 1, s, config,
        [["min_gas_price", 0], ["max_inflation_rate", [0, 1]],
         ["epoch_length", epoch_length],
         ["block_producer_kickout_threshold", 10],
         ["chunk_producer_kickout_threshold", 10]], local_config_changes)

    started = time.time()

    boot_node = spin_up_node(config,
                             near_root,
                             node_dirs[0],
                             0,
                             None,
                             None,
                             proxy=proxy)
    boot_node.stop_checking_store()
    boot_node.mess_with = False
    nodes = [boot_node]

    for i in range(1, N + k + 1):
        node = spin_up_node(config,
                            near_root,
                            node_dirs[i],
                            i,
                            boot_node.node_key.pk,
                            boot_node.addr(),
                            proxy=proxy)
        node.stop_checking_store()
        nodes.append(node)
        if i >= n and i < N:
            node.kill()
            node.mess_with = True
        else:
            node.mess_with = False

    stopped = Value('i', 0)
    error = Value('i', 0)
    ps = []
    nonces = [(Value('i', 1), Lock()) for _ in range(N + k + 1)]

    def launch_process(func):
        nonlocal stopped, error, ps

        p = Process(target=func, args=(stopped, error, nodes, nonces))
        p.start()
        ps.append((p, func.__name__))

    def check_errors():
        nonlocal error, ps
        if error.value != 0:
            for (p, _) in ps:
                p.terminate()
            assert False, "At least one process failed, check error messages above"

    for monkey in monkeys:
        launch_process(monkey)

    launch_process(blocks_tracker)

    started = time.time()
    while time.time() - started < timeout:
        check_errors()
        time.sleep(1)

    logging.info("")
    logging.info("==========================================")
    logging.info("# TIMEOUT IS HIT, SHUTTING DOWN THE TEST #")
    logging.info("==========================================")
    stopped.value = 1
    started_shutdown = time.time()
    proxies_stopped = False

    while True:
        check_errors()
        still_running = [name for (p, name) in ps if p.is_alive()]

        if len(still_running) == 0:
            break

        # If the test is running with proxies, `node_restart` and `node_set` can get
        # stuck because the proxies now are their child processes. We can't kill the
        # proxies rigth away, because that would interfere with block production, and
        # might prevent other workers (e.g. block_tracker) from completing in a timely
        # manner. Thus, kill the proxies some time into the shut down process.
        if time.time(
        ) - started_shutdown > TIMEOUT_SHUTDOWN / 2 and not proxies_stopped:
            logging.info(
                "Shutdown is %s seconds in, shutting down proxies if any" %
                (TIMEOUT_SHUTDOWN / 2))
            if boot_node.proxy is not None:
                boot_node.proxy.global_stopped.value = 1
                for p in boot_node.proxy.ps:
                    p.terminate()
            proxies_stopped = True

        if time.time() - started_shutdown > TIMEOUT_SHUTDOWN:
            for (p, _) in ps:
                p.terminate()
            assert False, "The test didn't gracefully shut down in time\nStill running: %s" % (
                still_running)

    check_errors()

    logging.info("Shut down complete, executing store validity checks")
    for node in nodes:
        node.is_check_store = True
        node.check_store()
Exemple #14
0
def main():
    node_root = utils.get_near_tempdir('state_migration', clean=True)
    executables = branches.prepare_ab_test('betanet')

    # Run stable node for few blocks.
    subprocess.call([
        "%sneard-%s" % (near_root, stable_branch),
        "--home=%s/test0" % node_root, "init", "--fast"
    ])
    stable_protocol_version = json.load(
        open('%s/test0/genesis.json' % node_root))['protocol_version']
    config = {
        "local": True,
        'near_root': near_root,
        'binary_name': "neard-%s" % stable_branch
    }
    stable_node = cluster.spin_up_node(config, near_root,
                                       os.path.join(node_root, "test0"), 0)

    utils.wait_for_blocks(stable_node, count=20)
    # TODO: we should make state more interesting to migrate by sending some tx / contracts.
    stable_node.cleanup()
    os.mkdir('%s/test0' % node_root)

    # Dump state.
    subprocess.call([
        "%sneard-%s" % (near_root, stable_branch), "--home",
        '%s/test0_finished' % node_root, "view_state", "dump_state"
    ])

    # Migrate.
    migrations_home = '../scripts/migrations'
    all_migrations = sorted(os.listdir(migrations_home),
                            key=lambda x: int(x.split('-')[0]))
    for fname in all_migrations:
        m = re.match('([0-9]+)\-.*', fname)
        if m:
            version = int(m.groups()[0])
            if version > stable_protocol_version:
                exitcode = subprocess.call([
                    'python',
                    os.path.join(migrations_home, fname),
                    '%s/test0_finished' % node_root,
                    '%s/test0_finished' % node_root
                ])
                assert exitcode == 0, "Failed to run migration %d" % version
    os.rename(os.path.join(node_root, 'test0_finished/output.json'),
              os.path.join(node_root, 'test0/genesis.json'))
    shutil.copy(os.path.join(node_root, 'test0_finished/config.json'),
                os.path.join(node_root, 'test0/'))
    shutil.copy(os.path.join(node_root, 'test0_finished/validator_key.json'),
                os.path.join(node_root, 'test0/'))
    shutil.copy(os.path.join(node_root, 'test0_finished/node_key.json'),
                os.path.join(node_root, 'test0/'))

    # Run new node and verify it runs for a few more blocks.
    config["binary_name"] = "neard-%s" % current_branch
    current_node = cluster.spin_up_node(config, near_root,
                                        os.path.join(node_root, "test0"), 0)

    utils.wait_for_blocks(current_node, count=20)

    # New genesis can be deserialized by new near is verified above (new near can produce blocks)
    # Also test new genesis protocol_version matches nearcore/res/genesis_config's
    new_genesis = json.load(open(os.path.join(node_root,
                                              'test0/genesis.json')))
    res_genesis = json.load(open('../nearcore/res/genesis_config.json'))
    assert new_genesis['protocol_version'] == res_genesis['protocol_version']
Exemple #15
0
def test_upgrade() -> None:
    """Test that upgrade from ‘stable’ to ‘current’ binary is possible.

    1. Start a network with 3 `stable` nodes and 1 `new` node.
    2. Start switching `stable` nodes one by one with `new` nodes.
    3. Run for three epochs and observe that current protocol version of the
       network matches `new` nodes.
    """
    executables = get_executables()
    node_root = utils.get_near_tempdir('upgradable', clean=True)

    # Setup local network.
    # TODO(#4372): testnet subcommand deprecated since 1.24.  Replace with
    # localnet after a couple of releases in 2022.
    cmd = (executables.stable.neard, "--home=%s" % node_root, "testnet", "--v",
           "4", "--prefix", "test")
    logger.info(' '.join(str(arg) for arg in cmd))
    subprocess.check_call(cmd)
    genesis_config_changes = [("epoch_length", 20),
                              ("num_block_producer_seats", 10),
                              ("num_block_producer_seats_per_shard", [10]),
                              ("block_producer_kickout_threshold", 80),
                              ("chunk_producer_kickout_threshold", 80),
                              ("chain_id", "testnet")]
    node_dirs = [os.path.join(node_root, 'test%d' % i) for i in range(4)]
    for i, node_dir in enumerate(node_dirs):
        cluster.apply_genesis_changes(node_dir, genesis_config_changes)
        cluster.apply_config_changes(node_dir, {'tracked_shards': [0]})

    # Start 3 stable nodes and one current node.
    config = executables.stable.node_config()
    nodes = [
        cluster.spin_up_node(config, executables.stable.root, node_dirs[0], 0)
    ]
    for i in range(1, 3):
        nodes.append(
            cluster.spin_up_node(config,
                                 executables.stable.root,
                                 node_dirs[i],
                                 i,
                                 boot_node=nodes[0]))
    config = executables.current.node_config()
    nodes.append(
        cluster.spin_up_node(config,
                             executables.current.root,
                             node_dirs[3],
                             3,
                             boot_node=nodes[0]))

    time.sleep(2)

    # deploy a contract
    hash = nodes[0].get_latest_block().hash_bytes
    tx = sign_deploy_contract_tx(nodes[0].signer_key,
                                 utils.load_test_contract(), 1, hash)
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res

    # write some random value
    tx = sign_function_call_tx(nodes[0].signer_key,
                               nodes[0].signer_key.account_id,
                               'write_random_value', [], 10**13, 0, 2, hash)
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    utils.wait_for_blocks(nodes[0], count=20)

    # Restart stable nodes into new version.
    for i in range(3):
        nodes[i].kill()
        nodes[i].binary_name = config['binary_name']
        nodes[i].start(boot_node=nodes[0])

    utils.wait_for_blocks(nodes[3], count=60)
    status0 = nodes[0].get_status()
    status3 = nodes[3].get_status()
    protocol_version = status0['protocol_version']
    latest_protocol_version = status3["latest_protocol_version"]
    assert protocol_version == latest_protocol_version, \
        "Latest protocol version %d should match active protocol version %d" % (
        latest_protocol_version, protocol_version)

    hash = base58.b58decode(
        status0['sync_info']['latest_block_hash'].encode('ascii'))

    # write some random value again
    tx = sign_function_call_tx(nodes[0].signer_key,
                               nodes[0].signer_key.account_id,
                               'write_random_value', [], 10**13, 0, 4, hash)
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    # hex_account_id = (b"I'm hex!" * 4).hex()
    hex_account_id = '49276d206865782149276d206865782149276d206865782149276d2068657821'
    tx = sign_payment_tx(key=nodes[0].signer_key,
                         to=hex_account_id,
                         amount=10**25,
                         nonce=5,
                         blockHash=hash)
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    # Successfully created a new account on transfer to hex
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    hex_account_balance = int(
        nodes[0].get_account(hex_account_id)['result']['amount'])
    assert hex_account_balance == 10**25
Exemple #16
0
                     "nanos": 0
                 }
             }
         },
         4: {
             "tracked_shards": [0, 1, 2, 3],
             "view_client_throttle_period": {
                 "secs": 0,
                 "nanos": 0
             }
         }
     })

started = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0)
boot_node.stop_checking_store()
node3 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node=boot_node)
node4 = spin_up_node(config, near_root, node_dirs[3], 3, boot_node=boot_node)
observer = spin_up_node(config,
                        near_root,
                        node_dirs[4],
                        4,
                        boot_node=boot_node)
observer.stop_checking_store()

ctx = utils.TxContext([4, 4, 4, 4, 4],
                      [boot_node, None, node3, node4, observer])
initial_balances = ctx.get_balances()
total_supply = sum(initial_balances)