コード例 #1
0
def test_sanity_spin_up():
    """Spins up a two-node cluster and wait for a few blocks to be produced.

    This is just a sanity check that the neard binary isn’t borked too much.
    See <https://github.com/near/nearcore/issues/4993>.
    """
    nodes = cluster.start_cluster(2, 0, 1, None, [], {})
    utils.wait_for_blocks(nodes[0], target=4)
コード例 #2
0
def main():
    nodes = []

    # Build the container
    run(('make', 'DOCKER_TAG=' + _DOCKER_IMAGE_TAG, 'docker-nearcore'))
    try:
        dot_near = pathlib.Path.home() / '.near'

        # Initialise local network
        cmd = f'neard --home /home/near localnet --v {NUM_NODES} --prefix test'
        docker_run(cmd, volume=(dot_near, '/home/near'))

        # Start all the nodes
        for ordinal in range(NUM_NODES):
            logger.info(f'Starting node {ordinal}')
            node = DockerNode(ordinal, dot_near / f'test{ordinal}')
            node.start(boot_node=nodes)
            nodes.append(node)

        # Wait for them to initialise
        for ordinal, node in enumerate(nodes):
            logger.info(f'Waiting for node {ordinal} to respond')
            node.wait_for_rpc(10)

        # Wait for BLOCKS blocks to be generated
        latest = utils.wait_for_blocks(nodes[0], target=BLOCKS)

        # Fetch latest block from all the nodes
        blocks = []
        for ordinal, node in enumerate(nodes):
            utils.wait_for_blocks(node, target=latest.height)
            response = node.get_block(latest.hash)
            assert 'result' in response, (ordinal, block)
            block = response['result']
            blocks.append(block)
            bid = cluster.BlockId.from_header(block['header'])
            logger.info(f'Node {ordinal} sees block: {bid}')

        # All blocks should be equal
        for ordinal in range(1, NUM_NODES):
            assert blocks[0] == blocks[ordinal], (ordinal, blocks)

        logger.info('All good')

    finally:
        # `docker stop` takes a few seconds so stop all containers in parallel.
        # atexit we’ll call DockerNode.cleanup method for each node as well and
        # it’ll handle all the other cleanups.
        cids = tuple(filter(None, (node._container_id for node in nodes)))
        if cids:
            logger.info('Stopping containers')
            run(('docker', 'stop') + cids)
        for node in nodes:
            node._container_id = None

        subprocess.check_call(
            ('docker', 'image', 'rm', '-f', _DOCKER_IMAGE_TAG))
コード例 #3
0
ファイル: db_migration.py プロジェクト: near/nearcore
def send_some_tx(node):
    # Write 10 values to storage
    nonce = node.get_nonce_for_pk(node.signer_key.account_id,
                                  node.signer_key.pk) + 10
    for i in range(10):
        hash_ = node.get_latest_block().hash_bytes
        keyvalue = bytearray(16)
        keyvalue[0] = (nonce // 10) % 256
        keyvalue[8] = (nonce // 10) % 255
        tx2 = sign_function_call_tx(node.signer_key,
                                    node.signer_key.account_id,
                                    'write_key_value', bytes(keyvalue),
                                    10000000000000, 100000000000, nonce, hash_)
        nonce += 10
        res = node.send_tx_and_wait(tx2, timeout=15)
        assert 'error' not in res, res
        assert 'Failure' not in res['result']['status'], res
    utils.wait_for_blocks(node, count=3)
コード例 #4
0
ファイル: block_sync_archival.py プロジェクト: near/nearcore
def run_test(cluster: Cluster) -> None:
    # Start the validator and the first observer.  Wait until the observer
    # synchronises a few epoch’s worth of blocks to be generated and then kill
    # validator so no more blocks are generated.
    boot = cluster.start_node(0, boot_node=None)
    fred = cluster.start_node(1, boot_node=boot)
    utils.wait_for_blocks(fred, target=TARGET_HEIGHT, poll_interval=1)
    metrics = get_metrics('boot', boot)
    boot.kill()

    # We didn’t generate enough blocks to fill boot’s in-memory cache which
    # means all Fred’s requests should be served from it.
    assert_metrics(metrics, ('cache/ok', ))

    # Restart Fred so that its cache is cleared.  Then start the second
    # observer, Barney, and wait for it to sync up.
    fred_blocks = get_all_blocks(fred)
    fred.kill(gentle=True)
    fred.start()

    barney = cluster.start_node(2, boot_node=fred)
    utils.wait_for_blocks(barney,
                          target=fred_blocks[-1].height,
                          poll_interval=1)
    barney_blocks = get_all_blocks(barney)
    if fred_blocks != barney_blocks:
        for f, b in zip(fred_blocks, barney_blocks):
            if f != b:
                logger.error(f'{f} != {b}')
        assert False

    # Since Fred’s in-memory cache is clear, all Barney’s requests are served
    # from storage.  Since DBCol::PartialChunks is garbage collected, some of the
    # requests are served from DBCol::Chunks.
    assert_metrics(get_metrics('fred', fred), (
        'chunk/ok',
        'partial/ok',
    ))
コード例 #5
0
ファイル: db_migration.py プロジェクト: near/nearcore
def main():
    executables = branches.prepare_ab_test()
    node_root = utils.get_near_tempdir('db_migration', clean=True)

    logging.info(f"The near root is {executables.stable.root}...")
    logging.info(f"The node root is {node_root}...")

    # Init local node
    subprocess.call((
        executables.stable.neard,
        "--home=%s" % node_root,
        "init",
        "--fast",
    ))

    # Run stable node for few blocks.
    logging.info("Starting the stable node...")
    config = executables.stable.node_config()
    node = cluster.spin_up_node(config, executables.stable.root,
                                str(node_root), 0)

    logging.info("Running the stable node...")
    utils.wait_for_blocks(node, count=20)
    logging.info("Blocks are being produced, sending some tx...")
    deploy_contract(node)
    send_some_tx(node)

    node.kill()

    logging.info(
        "Stable node has produced blocks... Stopping the stable node... ")

    # Run new node and verify it runs for a few more blocks.
    logging.info("Starting the current node...")
    config = executables.current.node_config()
    node.binary_name = config['binary_name']
    node.start(boot_node=node)

    logging.info("Running the current node...")
    utils.wait_for_blocks(node, count=20)
    logging.info("Blocks are being produced, sending some tx...")
    send_some_tx(node)

    logging.info(
        "Currnet node has produced blocks... Stopping the current node... ")

    node.kill()

    logging.info("Restarting the current node...")

    node.start(boot_node=node)
    utils.wait_for_blocks(node, count=20)
コード例 #6
0
ファイル: wrong_sync_info.py プロジェクト: near/nearcore
BLOCKS = 30

nodes = start_cluster(
    2, 1, 2, None,
    [["epoch_length", 7], ["block_producer_kickout_threshold", 80]], {})

started = time.time()

nodes[1].kill()
nodes[2].kill()

nodes[1].start(boot_node=nodes[0])
time.sleep(2)

logger.info(f'Waiting for {BLOCKS} blocks...')
height, _ = utils.wait_for_blocks(nodes[1], target=BLOCKS, timeout=TIMEOUT)
logger.info(f'Got to {height} blocks, getting to fun stuff')

res = nodes[1].json_rpc('adv_set_weight', 1000)
assert 'result' in res, res
res = nodes[1].json_rpc('adv_disable_header_sync', [])
assert 'result' in res, res

tracker = utils.LogTracker(nodes[2])
nodes[2].start(boot_node=nodes[1])
time.sleep(2)

utils.wait_for_blocks(nodes[2], target=BLOCKS, timeout=TIMEOUT)

assert tracker.check('ban a fraudulent peer')
コード例 #7
0
            "tracked_shards": [0, 1, 2, 3]
        }
    })

started = time.time()

act_to_val = [4, 4, 4, 4, 4]

ctx = utils.TxContext(act_to_val, nodes)

last_balances = [x for x in ctx.expected_balances]

step = 0
sent_height = -1

height, hash_ = utils.wait_for_blocks(nodes[4], target=1, check_storage=False)
tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100, 1,
                     base58.b58decode(hash_.encode('utf8')))
nodes[4].send_tx(tx)
ctx.expected_balances[0] -= 100
ctx.expected_balances[1] += 100
logger.info('Sent tx at height %s' % height)
sent_height = height

height, hash_ = utils.wait_for_blocks(nodes[4],
                                      target=sent_height + 6,
                                      check_storage=False)
cur_balances = ctx.get_balances()
assert cur_balances == ctx.expected_balances, "%s != %s" % (
    cur_balances, ctx.expected_balances)
コード例 #8
0
num_new_accounts = 10
balance = 50000000000000000000000000000000
account_keys = []
for i in range(num_new_accounts):
    account_name = f'test_account{i}.test0'
    signer_key = Key(account_name, nodes[0].signer_key.pk,
                     nodes[0].signer_key.sk)
    create_account_tx = sign_create_account_with_full_access_key_and_balance_tx(
        nodes[0].signer_key, account_name, signer_key,
        balance // num_new_accounts, i + 1, block_hash)
    account_keys.append(signer_key)
    res = nodes[0].send_tx_and_wait(create_account_tx, timeout=15)
    assert 'error' not in res, res

latest_block = utils.wait_for_blocks(nodes[0], target=50)
cur_height = latest_block.height
block_hash = latest_block.hash_bytes

for signer_key in account_keys:
    staking_tx = sign_staking_tx(signer_key, nodes[0].validator_key,
                                 balance // (num_new_accounts * 2),
                                 cur_height * 1_000_000 - 1, block_hash)
    res = nodes[0].send_tx_and_wait(staking_tx, timeout=15)
    assert 'error' not in res

cur_height, _ = utils.wait_for_blocks(nodes[0], target=80)

logger.info('restart node1')
nodes[1].start(boot_node=nodes[1])
logger.info('node1 restarted')
コード例 #9
0
    [["epoch_length", 15], ["num_block_producer_seats_per_shard", [5]],
     ["validators", 0, "amount", "60000000000000000000000000000000"],
     ["block_producer_kickout_threshold", 50],
     ["chunk_producer_kickout_threshold", 50],
     [
         "records", 0, "Account", "account", "locked",
         "60000000000000000000000000000000"
     ], ["total_supply", "5010000000000000000000000000000000"]], {
         0: consensus_config,
         1: consensus_config,
         2: consensus_config,
         3: consensus_config
     })

node0_height, _ = utils.wait_for_blocks(nodes[0],
                                        target=TARGET_HEIGHT,
                                        verbose=True)

logger.info('Kill node 1')
nodes[1].kill()

node0_height, _ = utils.wait_for_blocks(nodes[0],
                                        target=AFTER_SYNC_HEIGHT,
                                        verbose=True)

logger.info('Restart node 1')
nodes[1].start(boot_node=nodes[1])
time.sleep(3)

node1_height, _ = utils.wait_for_blocks(nodes[1],
                                        target=node0_height,
コード例 #10
0
        })

    boot_node = spin_up_node(config, near_root, node_dirs[0], 0, proxy=proxy)
    node1 = spin_up_node(config,
                         near_root,
                         node_dirs[1],
                         1,
                         boot_node=boot_node,
                         proxy=proxy)

    def get_validators(node):
        return set([x['account_id'] for x in node.get_status()['validators']])

    logging.info(f'Getting to height {HEIGHTS_BEFORE_ROTATE}')
    utils.wait_for_blocks(boot_node,
                          target=HEIGHTS_BEFORE_ROTATE,
                          timeout=timeout.left_seconds())

    node2 = spin_up_node(config,
                         near_root,
                         node_dirs[2],
                         2,
                         boot_node=boot_node,
                         proxy=proxy)
    node3 = spin_up_node(config,
                         near_root,
                         node_dirs[3],
                         3,
                         boot_node=boot_node,
                         proxy=proxy)
コード例 #11
0
ファイル: upgradable.py プロジェクト: near/nearcore
def test_upgrade() -> None:
    """Test that upgrade from ‘stable’ to ‘current’ binary is possible.

    1. Start a network with 3 `stable` nodes and 1 `new` node.
    2. Start switching `stable` nodes one by one with `new` nodes.
    3. Run for three epochs and observe that current protocol version of the
       network matches `new` nodes.
    """
    executables = get_executables()
    node_root = utils.get_near_tempdir('upgradable', clean=True)

    # Setup local network.
    # TODO(#4372): testnet subcommand deprecated since 1.24.  Replace with
    # localnet after a couple of releases in 2022.
    cmd = (executables.stable.neard, "--home=%s" % node_root, "testnet", "--v",
           "4", "--prefix", "test")
    logger.info(' '.join(str(arg) for arg in cmd))
    subprocess.check_call(cmd)
    genesis_config_changes = [("epoch_length", 20),
                              ("num_block_producer_seats", 10),
                              ("num_block_producer_seats_per_shard", [10]),
                              ("block_producer_kickout_threshold", 80),
                              ("chunk_producer_kickout_threshold", 80),
                              ("chain_id", "testnet")]
    node_dirs = [os.path.join(node_root, 'test%d' % i) for i in range(4)]
    for i, node_dir in enumerate(node_dirs):
        cluster.apply_genesis_changes(node_dir, genesis_config_changes)
        cluster.apply_config_changes(node_dir, {'tracked_shards': [0]})

    # Start 3 stable nodes and one current node.
    config = executables.stable.node_config()
    nodes = [
        cluster.spin_up_node(config, executables.stable.root, node_dirs[0], 0)
    ]
    for i in range(1, 3):
        nodes.append(
            cluster.spin_up_node(config,
                                 executables.stable.root,
                                 node_dirs[i],
                                 i,
                                 boot_node=nodes[0]))
    config = executables.current.node_config()
    nodes.append(
        cluster.spin_up_node(config,
                             executables.current.root,
                             node_dirs[3],
                             3,
                             boot_node=nodes[0]))

    time.sleep(2)

    # deploy a contract
    hash = nodes[0].get_latest_block().hash_bytes
    tx = sign_deploy_contract_tx(nodes[0].signer_key,
                                 utils.load_test_contract(), 1, hash)
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res

    # write some random value
    tx = sign_function_call_tx(nodes[0].signer_key,
                               nodes[0].signer_key.account_id,
                               'write_random_value', [], 10**13, 0, 2, hash)
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    utils.wait_for_blocks(nodes[0], count=20)

    # Restart stable nodes into new version.
    for i in range(3):
        nodes[i].kill()
        nodes[i].binary_name = config['binary_name']
        nodes[i].start(boot_node=nodes[0])

    utils.wait_for_blocks(nodes[3], count=60)
    status0 = nodes[0].get_status()
    status3 = nodes[3].get_status()
    protocol_version = status0['protocol_version']
    latest_protocol_version = status3["latest_protocol_version"]
    assert protocol_version == latest_protocol_version, \
        "Latest protocol version %d should match active protocol version %d" % (
        latest_protocol_version, protocol_version)

    hash = base58.b58decode(
        status0['sync_info']['latest_block_hash'].encode('ascii'))

    # write some random value again
    tx = sign_function_call_tx(nodes[0].signer_key,
                               nodes[0].signer_key.account_id,
                               'write_random_value', [], 10**13, 0, 4, hash)
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    # hex_account_id = (b"I'm hex!" * 4).hex()
    hex_account_id = '49276d206865782149276d206865782149276d206865782149276d2068657821'
    tx = sign_payment_tx(key=nodes[0].signer_key,
                         to=hex_account_id,
                         amount=10**25,
                         nonce=5,
                         blockHash=hash)
    res = nodes[0].send_tx_and_wait(tx, timeout=20)
    # Successfully created a new account on transfer to hex
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    hex_account_balance = int(
        nodes[0].get_account(hex_account_id)['result']['amount'])
    assert hex_account_balance == 10**25
コード例 #12
0
FINAL_HEIGHT_THRESHOLD = 80

nodes = start_cluster(
    4, 0, 4, None,
    [["epoch_length", 200], ["block_producer_kickout_threshold", 10]], {})
time.sleep(3)
cur_height = 0
fork1_height = 0
fork2_height = 0

for i in range(0, 4):
    res = nodes[i].json_rpc('adv_disable_doomslug', [])
    assert 'result' in res, res

# step 1, let nodes run for some time
utils.wait_for_blocks(nodes[0], target=FIRST_STEP_WAIT)

for i in range(2):
    nodes[i].kill()

logger.info("killing node 0 and 1")
utils.wait_for_blocks(nodes[2], target=FIRST_STEP_WAIT + SECOND_STEP_WAIT)

for i in range(2, 4):
    nodes[i].kill()

logger.info("killing node 2 and 3")

for i in range(2):
    nodes[i].start(boot_node=nodes[i])
    res = nodes[i].json_rpc('adv_disable_doomslug', [])
コード例 #13
0
ファイル: state_sync1.py プロジェクト: near/nearcore
        "block_fetch_horizon": 10,
        "block_header_fetch_horizon": 10
    }
}
nodes = start_cluster(
    4, 0, 1, None,
    [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10],
     ["chunk_producer_kickout_threshold", 10]], {
         0: consensus_config,
         1: consensus_config
     })
time.sleep(2)
nodes[1].kill()

logger.info("step 1")
utils.wait_for_blocks(nodes[0], target=BLOCK_WAIT)
nodes[1].start(boot_node=nodes[1])
time.sleep(2)

logger.info("step 2")
synced = False
block_height0 = block_height1 = -1
while block_height0 <= EPOCH_LENGTH and block_height1 <= EPOCH_LENGTH:
    block_height0, block_hash0 = nodes[0].get_latest_block()
    block_height1, block_hash1 = nodes[1].get_latest_block()
    if block_height0 > BLOCK_WAIT:
        if block_height0 > block_height1:
            try:
                nodes[0].get_block(block_hash1)
                if synced and abs(block_height0 - block_height1) >= 5:
                    assert False, "Nodes fall out of sync"
コード例 #14
0
ファイル: gc_sync_after_sync.py プロジェクト: near/nearcore
         "records", 0, "Account", "account", "locked",
         "12500000000000000000000000000000"
     ], ["validators", 1, "amount", "12500000000000000000000000000000"],
     [
         "records", 2, "Account", "account", "locked",
         "12500000000000000000000000000000"
     ], ['total_supply', "4925000000000000000000000000000000"],
     ["block_producer_kickout_threshold", 40],
     ["chunk_producer_kickout_threshold", 40], ["num_block_producer_seats", 10],
     ["num_block_producer_seats_per_shard", [10]]], {1: consensus_config})

logger.info('Kill node 1')
nodes[1].kill()

node0_height, _ = utils.wait_for_blocks(nodes[0],
                                        target=TARGET_HEIGHT1,
                                        verbose=True)

logger.info('Restart node 1')
nodes[1].start(boot_node=nodes[1])
time.sleep(3)

node1_height, _ = utils.wait_for_blocks(nodes[1],
                                        target=node0_height,
                                        verbose=True)

if swap_nodes:
    logger.info('Swap nodes 0 and 1')
    nodes[0], nodes[1] = nodes[1], nodes[0]

logger.info('Kill node 1')
コード例 #15
0
ファイル: proxy_restart.py プロジェクト: near/nearcore
#!/usr/bin/env python3
# Start two nodes. Proxify both nodes. Kill one of them, restart it
# and wait until block at height >= 20.
import sys, time
import pathlib

sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib'))

from cluster import start_cluster
from configured_logger import logger
from peer import *
from proxy import ProxyHandler
import utils

TARGET_HEIGHT = 20

nodes = start_cluster(2, 0, 1, None, [], {}, ProxyHandler)

nodes[1].kill()
nodes[1].start(boot_node=nodes[0])

utils.wait_for_blocks(nodes[1], target=TARGET_HEIGHT)
コード例 #16
0
ファイル: db_migration.py プロジェクト: near/nearcore
def deploy_contract(node):
    hash_ = node.get_latest_block().hash_bytes
    tx = sign_deploy_contract_tx(node.signer_key, utils.load_test_contract(),
                                 10, hash_)
    node.send_tx_and_wait(tx, timeout=15)
    utils.wait_for_blocks(node, count=3)
コード例 #17
0
nodes = start_cluster(
    2, 0, 1, None,
    [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 30],
     ["chunk_producer_kickout_threshold", 30], ["num_block_producer_seats", 4],
     ["num_block_producer_seats_per_shard", [4]],
     ["validators", 0, "amount", "150000000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "locked",
         "150000000000000000000000000000000"
     ], ["total_supply", "3100000000000000000000000000000000"]], {1: config1})
time.sleep(2)

block = nodes[1].get_block(nodes[1].get_latest_block().height)
epoch_id = block['result']['header']['epoch_id']

utils.wait_for_blocks(nodes[1], target=STOP_HEIGHT1)

nodes[1].kill()
for height, _ in utils.poll_blocks(nodes[0], timeout=TIMEOUT):
    cur_block = nodes[0].get_block(height)
    if cur_block['result']['header']['epoch_id'] != epoch_id:
        break

seed = bytes([1] * 32)
public_key, secret_key = nacl.bindings.crypto_sign_seed_keypair(seed)
node_key = Key("",
               base58.b58encode(public_key).decode('utf-8'),
               base58.b58encode(secret_key).decode('utf-8'))
nodes[1].reset_node_key(node_key)
nodes[1].start(boot_node=nodes[0])
time.sleep(2)
コード例 #18
0
         },
         1: {
             "tracked_shards": [0]
         },
         2: {
             "tracked_shards": [0],
             "consensus": {
                 "block_fetch_horizon": 20,
             },
         }
     })

started = time.time()

boot_node = spin_up_node(config, near_root, node_dirs[0], 0)
node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node=boot_node)

ctx = utils.TxContext([0, 0], [boot_node, node1])

utils.wait_for_blocks(boot_node, target=START_AT_BLOCK)

node2 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node=boot_node)
tracker = utils.LogTracker(node2)
time.sleep(3)

try:
    status = node2.get_status()
    sys.exit("node 2 successfully started while it should fail")
except requests.exceptions.ConnectionError:
    pass
コード例 #19
0
def main():
    node_root = utils.get_near_tempdir('state_migration', clean=True)
    executables = branches.prepare_ab_test('betanet')

    # Run stable node for few blocks.
    subprocess.call([
        "%sneard-%s" % (near_root, stable_branch),
        "--home=%s/test0" % node_root, "init", "--fast"
    ])
    stable_protocol_version = json.load(
        open('%s/test0/genesis.json' % node_root))['protocol_version']
    config = {
        "local": True,
        'near_root': near_root,
        'binary_name': "neard-%s" % stable_branch
    }
    stable_node = cluster.spin_up_node(config, near_root,
                                       os.path.join(node_root, "test0"), 0)

    utils.wait_for_blocks(stable_node, count=20)
    # TODO: we should make state more interesting to migrate by sending some tx / contracts.
    stable_node.cleanup()
    os.mkdir('%s/test0' % node_root)

    # Dump state.
    subprocess.call([
        "%sneard-%s" % (near_root, stable_branch), "--home",
        '%s/test0_finished' % node_root, "view_state", "dump_state"
    ])

    # Migrate.
    migrations_home = '../scripts/migrations'
    all_migrations = sorted(os.listdir(migrations_home),
                            key=lambda x: int(x.split('-')[0]))
    for fname in all_migrations:
        m = re.match('([0-9]+)\-.*', fname)
        if m:
            version = int(m.groups()[0])
            if version > stable_protocol_version:
                exitcode = subprocess.call([
                    'python',
                    os.path.join(migrations_home, fname),
                    '%s/test0_finished' % node_root,
                    '%s/test0_finished' % node_root
                ])
                assert exitcode == 0, "Failed to run migration %d" % version
    os.rename(os.path.join(node_root, 'test0_finished/output.json'),
              os.path.join(node_root, 'test0/genesis.json'))
    shutil.copy(os.path.join(node_root, 'test0_finished/config.json'),
                os.path.join(node_root, 'test0/'))
    shutil.copy(os.path.join(node_root, 'test0_finished/validator_key.json'),
                os.path.join(node_root, 'test0/'))
    shutil.copy(os.path.join(node_root, 'test0_finished/node_key.json'),
                os.path.join(node_root, 'test0/'))

    # Run new node and verify it runs for a few more blocks.
    config["binary_name"] = "neard-%s" % current_branch
    current_node = cluster.spin_up_node(config, near_root,
                                        os.path.join(node_root, "test0"), 0)

    utils.wait_for_blocks(current_node, count=20)

    # New genesis can be deserialized by new near is verified above (new near can produce blocks)
    # Also test new genesis protocol_version matches nearcore/res/genesis_config's
    new_genesis = json.load(open(os.path.join(node_root,
                                              'test0/genesis.json')))
    res_genesis = json.load(open('../nearcore/res/genesis_config.json'))
    assert new_genesis['protocol_version'] == res_genesis['protocol_version']
コード例 #20
0
ファイル: repro_2916.py プロジェクト: near/nearcore
async def main():
    # start a cluster with two shards
    nodes = start_cluster(2, 0, 2, None, [], {})

    height, hash_ = utils.wait_for_blocks(nodes[0], target=3)
    block = nodes[0].get_block(hash_)['result']
    chunk_hashes = [base58.b58decode(x['chunk_hash']) for x in block['chunks']]
    assert len(chunk_hashes) == 2
    assert all([len(x) == 32 for x in chunk_hashes])

    my_key_pair_nacl = nacl.signing.SigningKey.generate()
    received_responses = [None, None]

    # step = 0: before the node is killed
    # step = 1: after the node is killed
    for step in range(2):

        conn0 = await connect(nodes[0].addr())
        await run_handshake(conn0, nodes[0].node_key.pk, my_key_pair_nacl)
        for shard_ord, chunk_hash in enumerate(chunk_hashes):

            request = PartialEncodedChunkRequestMsg()
            request.chunk_hash = chunk_hash
            request.part_ords = []
            request.tracking_shards = [0, 1]

            routed_msg_body = RoutedMessageBody()
            routed_msg_body.enum = 'PartialEncodedChunkRequest'
            routed_msg_body.PartialEncodedChunkRequest = request

            peer_message = create_and_sign_routed_peer_message(
                routed_msg_body, nodes[0], my_key_pair_nacl)

            await conn0.send(peer_message)

            received_response = False

            def predicate(response):
                return response.enum == 'Routed' and response.Routed.body.enum == 'PartialEncodedChunkResponse'

            try:
                response = await asyncio.wait_for(conn0.recv(predicate), 5)
            except (concurrent.futures._base.TimeoutError,
                    asyncio.exceptions.TimeoutError):
                response = None

            if response is not None:
                logger.info("Received response for shard %s" % shard_ord)
                received_response = True
            else:
                logger.info("Didn't receive response for shard %s" % shard_ord)

            if step == 0:
                received_responses[shard_ord] = received_response
            else:
                assert received_responses[
                    shard_ord] == received_response, "The response doesn't match for the chunk in shard %s. Received response before node killed: %s, after: %s" % (
                        shard_ord, received_responses[shard_ord],
                        received_response)

        # we expect first node to only respond to one of the chunk requests, for the shard assigned to it
        assert received_responses[0] != received_responses[1], received_responses

        if step == 0:
            logger.info("Killing and restarting nodes")
            nodes[1].kill()
            nodes[0].kill()
            nodes[0].start()
            time.sleep(1)
コード例 #21
0
     ["total_supply", "4210000000000000000000000000000000"],
     ["validators", 0, "amount", "260000000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "locked",
         "260000000000000000000000000000000"
     ]], {
         0: consensus_config,
         1: consensus_config,
         2: consensus_config
     })

logger.info('kill node1 and node2')
nodes[1].kill()
nodes[2].kill()

node0_height, _ = utils.wait_for_blocks(nodes[0], target=TARGET_HEIGHT)

logger.info('Restart node 1')
nodes[1].start(boot_node=nodes[1])
time.sleep(2)

for height, _ in utils.poll_blocks(nodes[1], timeout=TIMEOUT):
    if height >= node0_height and len(nodes[0].validators()) < 3:
        break

logger.info('Restart node 2')
nodes[2].start(boot_node=nodes[2])
time.sleep(2)

target = nodes[0].get_latest_block().height
utils.wait_for_blocks(nodes[2], target=target)
コード例 #22
0
ファイル: state_sync3.py プロジェクト: near/nearcore
    "tracked_shards": [0]
}
nodes = start_cluster(
    1, 1, 1, None,
    [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10],
     ["chunk_producer_kickout_threshold", 10]], {
         0: consensus_config0,
         1: consensus_config1
     })
time.sleep(2)
nodes[1].kill()

logger.info("step 1")

node0_height, _ = utils.wait_for_blocks(nodes[0],
                                        target=EPOCH_LENGTH * 2 + 1,
                                        poll_interval=5)

nodes[1].start(boot_node=nodes[1])
time.sleep(2)

logger.info("step 2")
state_sync_done_time = None
state_sync_done_height = None
for node1_height, _ in utils.poll_blocks(nodes[1],
                                         timeout=MAX_SYNC_WAIT,
                                         poll_interval=2):
    if node1_height > node0_height:
        break
    if node1_height >= EPOCH_LENGTH:
        if state_sync_done_time is None:
コード例 #23
0
    "consensus": {
        "min_block_production_delay": {
            "secs": MIN_BLOCK_PROD_TIME,
            "nanos": 0,
        },
        "max_block_production_delay": {
            "secs": MAX_BLOCK_PROD_TIME,
            "nanos": 0,
        },
    }
})

nodes = start_cluster(1, 0, 1, CONFIG, [["epoch_length", EPOCH_LENGTH]], {})

# start at block_height = 10
utils.wait_for_blocks(nodes[0], target=10)
# fast forward to about block_height=190 and then test for boundaries
nodes[0].json_rpc('sandbox_fast_forward', {"delta_height": 180}, timeout=60)
for i in range(20):
    utils.wait_for_blocks(nodes[0], target=190 + i)
    block_height = nodes[0].get_latest_block().height
    epoch_height = nodes[0].get_validators()['result']['epoch_height']
    assert epoch_height == 2 if block_height > 200 else 1

# check that we still have correct epoch heights after consecutive fast forwards:
utils.wait_for_blocks(nodes[0], target=220)
nodes[0].json_rpc('sandbox_fast_forward', {"delta_height": 70}, timeout=60)
for i in range(20):
    utils.wait_for_blocks(nodes[0], target=290 + i)
    block_height = nodes[0].get_latest_block().height
    epoch_height = nodes[0].get_validators()['result']['epoch_height']
コード例 #24
0
                 1
             }
         }
     ], ["validators", 0, "amount", "110000000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "locked",
         "110000000000000000000000000000000"
     ], ["total_supply", "3060000000000000000000000000000000"]], {
         0: consensus_config,
         1: consensus_config
     })

logger.info('Kill node 1')
nodes[1].kill()

node0_height, _ = utils.wait_for_blocks(nodes[0], target=TARGET_HEIGHT)

logger.info('Restart node 1')
nodes[1].start(boot_node=nodes[1])
time.sleep(3)

start_time = time.time()

node1_height, _ = utils.wait_for_blocks(nodes[1], target=node0_height)

# all fresh data should be synced
blocks_count = 0
for height in range(node1_height - 10, node1_height):
    block0 = nodes[0].json_rpc('block', [height], timeout=15)
    block1 = nodes[1].json_rpc('block', [height], timeout=15)
    assert block0 == block1
コード例 #25
0
nodes = start_cluster(
    2, 0, 2, None,
    [["epoch_length", 100], ["block_producer_kickout_threshold", 80]],
    {0: client_config_change})
if not doomslug:
    # we expect inconsistency in store in node 0
    # because we're going to turn off doomslug
    # and allow applying blocks without proper validation
    nodes[0].stop_checking_store()

started = time.time()

time.sleep(2)
logger.info(f'Waiting for {BLOCKS} blocks...')
height, _ = utils.wait_for_blocks(nodes[0], target=BLOCKS)
logger.info(f'Got to {height} blocks, getting to fun stuff')

status = nodes[0].get_status()
logger.info(f"STATUS OF HONEST {status}")
saved_blocks = nodes[0].json_rpc('adv_get_saved_blocks', [])
logger.info(f"SAVED BLOCKS {saved_blocks}")

nodes[0].kill()  # to disallow syncing
nodes[1].kill()

# Switch node1 to an adversarial chain
nodes[1].reset_data()
nodes[1].start(boot_node=nodes[0])

num_produce_blocks = BLOCKS // 2 - 5
コード例 #26
0
        },
    }
})

nodes = start_cluster(1, 0, 1, CONFIG, [["epoch_length", EPOCH_LENGTH]], {})
sync_info = nodes[0].get_status()['sync_info']
pre_forward_block_hash = sync_info['latest_block_hash']

# request to fast forward
nodes[0].json_rpc('sandbox_fast_forward',
                  {"delta_height": BLOCKS_TO_FASTFORWARD},
                  timeout=60)

# wait a little for it to fast forward
# if this call times out, then the fast_forward failed somewhere
utils.wait_for_blocks(nodes[0], target=BLOCKS_TO_FASTFORWARD + 10, timeout=10)

# Assert that we're within the bounds of fast forward timestamp between range of min and max:
sync_info = nodes[0].get_status()['sync_info']
earliest = datetime.datetime.strptime(sync_info['earliest_block_time'][:-4],
                                      '%Y-%m-%dT%H:%M:%S.%f')
latest = datetime.datetime.strptime(sync_info['latest_block_time'][:-4],
                                    '%Y-%m-%dT%H:%M:%S.%f')

min_forwarded_secs = datetime.timedelta(
    0, BLOCKS_TO_FASTFORWARD * MIN_BLOCK_PROD_TIME)
max_forwarded_secs = datetime.timedelta(
    0, BLOCKS_TO_FASTFORWARD * MAX_BLOCK_PROD_TIME)
min_forwarded_time = earliest + min_forwarded_secs
max_forwarded_time = earliest + max_forwarded_secs
コード例 #27
0
            "secs": 3,
            "nanos": 0
        },
        "header_sync_stall_ban_timeout": {
            "secs": 5,
            "nanos": 0
        }
    },
    "tracked_shards": [0]
}
nodes = start_cluster(1, 1, 1, None, [["epoch_length", EPOCH_LENGTH]], {
    0: node0_config,
    1: node1_config
}, Handler)

utils.wait_for_blocks(nodes[0], target=110, poll_interval=2)

should_sync.value = True

logger.info("sync node 1")

start = time.time()

tracker0 = utils.LogTracker(nodes[0])
tracker1 = utils.LogTracker(nodes[1])

while True:
    assert time.time() - start < TIMEOUT

    if should_ban:
        if tracker1.check(BAN_STRING):
コード例 #28
0
ファイル: block_sync.py プロジェクト: near/nearcore
        }
    }
}
# give more stake to the bootnode so that it can produce the blocks alone
nodes = start_cluster(
    2, 0, 4, None,
    [["epoch_length", 100], ["num_block_producer_seats", 100],
     ["num_block_producer_seats_per_shard", [25, 25, 25, 25]],
     ["validators", 0, "amount", "110000000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "locked",
         "110000000000000000000000000000000"
     ], ["total_supply", "3060000000000000000000000000000000"]], {
         0: consensus_config0,
         1: consensus_config1
     })
time.sleep(3)

utils.wait_for_blocks(nodes[0], target=BLOCKS)

logger.info("kill node 0")
nodes[0].kill()
nodes[0].reset_data()

logger.info("restart node 0")
nodes[0].start(boot_node=nodes[0])
time.sleep(3)

node1_height = nodes[1].get_latest_block().height
utils.wait_for_blocks(nodes[0], target=node1_height)
コード例 #29
0
ファイル: backward_compatible.py プロジェクト: near/nearcore
def main():
    node_root = utils.get_near_tempdir('backward', clean=True)
    executables = branches.prepare_ab_test()

    # Setup local network.
    subprocess.check_call([
        executables.stable.neard,
        "--home=%s" % node_root,
        # TODO(#4372): testnet subcommand deprecated since 1.24.  Replace with
        # localnet after a couple of releases in 2022.
        "testnet",
        "--v",
        "2",
        "--prefix",
        "test"
    ])

    # Run both binaries at the same time.
    config = executables.stable.node_config()
    stable_node = cluster.spin_up_node(config, executables.stable.root,
                                       str(node_root / 'test0'), 0)
    config = executables.current.node_config()
    current_node = cluster.spin_up_node(config,
                                        executables.current.root,
                                        str(node_root / 'test1'),
                                        1,
                                        boot_node=stable_node)

    # Check it all works.
    BLOCKS = 100
    max_height = -1
    started = time.time()

    # Create account, transfer tokens, deploy contract, invoke function call
    block_hash = stable_node.get_latest_block().hash_bytes

    new_account_id = 'test_account.test0'
    new_signer_key = cluster.Key(new_account_id, stable_node.signer_key.pk,
                                 stable_node.signer_key.sk)
    create_account_tx = sign_create_account_with_full_access_key_and_balance_tx(
        stable_node.signer_key, new_account_id, new_signer_key, 10**24, 1,
        block_hash)
    res = stable_node.send_tx_and_wait(create_account_tx, timeout=20)
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    transfer_tx = sign_payment_tx(stable_node.signer_key, new_account_id,
                                  10**25, 2, block_hash)
    res = stable_node.send_tx_and_wait(transfer_tx, timeout=20)
    assert 'error' not in res, res

    block_height = stable_node.get_latest_block().height
    nonce = block_height * 1_000_000 - 1

    tx = sign_deploy_contract_tx(new_signer_key, utils.load_test_contract(),
                                 nonce, block_hash)
    res = stable_node.send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res

    tx = sign_deploy_contract_tx(stable_node.signer_key,
                                 utils.load_test_contract(), 3, block_hash)
    res = stable_node.send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res

    tx = sign_function_call_tx(new_signer_key, new_account_id,
                               'write_random_value', [], 10**13, 0, nonce + 1,
                               block_hash)
    res = stable_node.send_tx_and_wait(tx, timeout=20)
    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    data = json.dumps([{
        "create": {
            "account_id": "test_account.test0",
            "method_name": "call_promise",
            "arguments": [],
            "amount": "0",
            "gas": 30000000000000,
        },
        "id": 0
    }, {
        "then": {
            "promise_index": 0,
            "account_id": "test0",
            "method_name": "call_promise",
            "arguments": [],
            "amount": "0",
            "gas": 30000000000000,
        },
        "id": 1
    }])

    tx = sign_function_call_tx(stable_node.signer_key,
                               new_account_id, 'call_promise',
                               bytes(data, 'utf-8'), 90000000000000, 0,
                               nonce + 2, block_hash)
    res = stable_node.send_tx_and_wait(tx, timeout=20)

    assert 'error' not in res, res
    assert 'Failure' not in res['result']['status'], res

    utils.wait_for_blocks(current_node, target=BLOCKS)
コード例 #30
0
ファイル: state_sync5.py プロジェクト: near/nearcore
        "sync_step_period": {
            "secs": 0,
            "nanos": 200000000
        }
    },
    "tracked_shards": [0]
}
nodes = start_cluster(
    1, 1, 1, None,
    [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10],
     ["chunk_producer_kickout_threshold", 10]], {1: node1_config})
time.sleep(2)
nodes[1].kill()
logger.info('node1 is killed')

cur_height, _ = utils.wait_for_blocks(nodes[0], target=60)

genesis_block = nodes[0].json_rpc('block', [0])
genesis_hash = genesis_block['result']['header']['hash']
genesis_hash = base58.b58decode(genesis_hash.encode('ascii'))

nodes[1].start(boot_node=nodes[1])
tracker = utils.LogTracker(nodes[1])
time.sleep(1)

start_time = time.time()
node1_height = 0
nonce = 1
while node1_height <= cur_height:
    if time.time() - start_time > MAX_SYNC_WAIT:
        assert False, "state sync timed out"