Exemple #1
0
def doit(seq=[]):
    global nodes, all_stakes, sequence
    sequence = seq

    config = None
    nodes = start_cluster(
        2, 1, 1, config, [["epoch_length", EPOCH_LENGTH],
                          ["block_producer_kickout_threshold", 40],
                          ["chunk_producer_kickout_threshold", 40]], {
                              0: {
                                  "view_client_throttle_period": {
                                      "secs": 0,
                                      "nanos": 0
                                  },
                                  "consensus": {
                                      "state_sync_timeout": {
                                          "secs": 2,
                                          "nanos": 0
                                      }
                                  }
                              },
                              1: {
                                  "view_client_throttle_period": {
                                      "secs": 0,
                                      "nanos": 0
                                  },
                                  "consensus": {
                                      "state_sync_timeout": {
                                          "secs": 2,
                                          "nanos": 0
                                      }
                                  }
                              },
                              2: {
                                  "tracked_shards": [0],
                                  "view_client_throttle_period": {
                                      "secs": 0,
                                      "nanos": 0
                                  },
                                  "consensus": {
                                      "state_sync_timeout": {
                                          "secs": 2,
                                          "nanos": 0
                                      }
                                  }
                              }
                          })

    started = time.time()
    last_iter = started

    height, hash_ = nodes[2].get_latest_block()
    for i in range(3):
        nodes[i].stop_checking_store()

    logger.info("Initial stakes: %s" % get_stakes())
    all_stakes.append(get_stakes())

    do_moar_stakes(hash_, True)
    last_fake_stakes_height = FAKE_OFFSET
    last_staked_height = REAL_OFFSET

    while True:
        if time.time() - started >= TIMEOUT:
            break

        assert time.time() - last_iter < TIMEOUT_PER_ITER

        height, hash_ = nodes[0].get_latest_block()
        send_fakes = send_reals = False

        if (height + EPOCH_LENGTH - FAKE_OFFSET) // EPOCH_LENGTH > (
                last_fake_stakes_height + EPOCH_LENGTH -
                FAKE_OFFSET) // EPOCH_LENGTH:
            last_iter = time.time()

            send_fakes = True

        if (height + EPOCH_LENGTH - REAL_OFFSET) // EPOCH_LENGTH > (
                last_staked_height + EPOCH_LENGTH -
                REAL_OFFSET) // EPOCH_LENGTH:

            send_reals = True

        if send_fakes or send_reals:
            cur_stakes = get_stakes()
            logger.info("Current stakes: %s" % cur_stakes)
            if len(all_stakes) > 1:
                expected_stakes = get_expected_stakes()
                logger.info("Expect  stakes: %s" % expected_stakes)
                for (cur, expected) in zip(cur_stakes, expected_stakes):
                    if cur % 1000000 == 0:
                        assert cur == expected
                    else:
                        assert expected <= cur <= expected * 1.1

            do_moar_stakes(hash_, update_expected=send_reals)

        if send_fakes:
            last_fake_stakes_height += EPOCH_LENGTH

        elif send_reals:
            last_staked_height += EPOCH_LENGTH
Exemple #2
0
import base58, base64
import json
import struct
import sys
import threading

import deepdiff

sys.path.append('lib')
from cluster import start_cluster
from key import Key
from utils import load_test_contract
import transaction

nodes = start_cluster(
    4, 0, 1, None,
    [["epoch_length", 1000], ["block_producer_kickout_threshold", 80]], {})


def assert_changes_in_block_response(request, expected_response):
    for node_index, node in enumerate(nodes):
        response = node.get_changes_in_block(request)
        assert 'result' in response, "the request did not succeed: %r" % response
        response = response['result']
        diff = deepdiff.DeepDiff(expected_response, response)
        assert not diff, \
            "query node #%d same changes gives different results %r (expected VS actual):\n%r\n%r" \
            % (node_index, diff, expected_response, response)


def assert_changes_response(request, expected_response, **kwargs):
    async def handle(self, msg, fr, to):
        if msg.enum == "RoutingTableSync":
            logger.info("RoutingTableSync")
        if msg.enum == "RoutingTableSyncV2" and msg.RoutingTableSyncV2.enum == "Version2":
            if msg.RoutingTableSyncV2.Version2.routing_state.enum == "Done":
                success.value = 1
            logger.info("ROUTING_STATE %s" %
                        msg.RoutingTableSyncV2.Version2.routing_state.enum)
            logger.info("* known_edges %s" %
                        msg.RoutingTableSyncV2.Version2.known_edges)
            logger.info("* edges %s" %
                        len(msg.RoutingTableSyncV2.Version2.edges))
        return True


nodes = start_cluster(2, 0, 1, None, [], {}, Handler)

time.sleep(10)

signing_key, verifying_key = ed25519.create_keypair()

logger.info(str(signing_key))


def gen():
    return base58.b58encode(
        ed25519.create_keypair()[0].to_bytes()).decode('ascii')


def new_edge(peer_a, peer_b, nonce):
    s1 = base58.b58encode(str(peer_a).zfill(32)).decode("ascii")
Exemple #4
0
EPOCH_LENGTH = 100
CONFIG = utils.figure_out_sandbox_binary()
CONFIG.update({
    "consensus": {
        "min_block_production_delay": {
            "secs": MIN_BLOCK_PROD_TIME,
            "nanos": 0,
        },
        "max_block_production_delay": {
            "secs": MAX_BLOCK_PROD_TIME,
            "nanos": 0,
        },
    }
})

nodes = start_cluster(1, 0, 1, CONFIG, [["epoch_length", EPOCH_LENGTH]], {})

# start at block_height = 10
utils.wait_for_blocks(nodes[0], target=10)
# fast forward to about block_height=190 and then test for boundaries
nodes[0].json_rpc('sandbox_fast_forward', {"delta_height": 180}, timeout=60)
for i in range(20):
    utils.wait_for_blocks(nodes[0], target=190 + i)
    block_height = nodes[0].get_latest_block().height
    epoch_height = nodes[0].get_validators()['result']['epoch_height']
    assert epoch_height == 2 if block_height > 200 else 1

# check that we still have correct epoch heights after consecutive fast forwards:
utils.wait_for_blocks(nodes[0], target=220)
nodes[0].json_rpc('sandbox_fast_forward', {"delta_height": 70}, timeout=60)
for i in range(20):
Exemple #5
0
# Unstakes for them, makes sure they stop being a validator

import sys, time, base58, random, datetime

sys.path.append('lib')

from cluster import start_cluster
from configured_logger import logger
from transaction import sign_staking_tx

TIMEOUT = 150

config = None
nodes = start_cluster(
    2, 1, 1, config,
    [["epoch_length", 10], ["block_producer_kickout_threshold", 40]],
    {2: {
        "tracked_shards": [0]
    }})

started = time.time()


def get_validators():
    return set([x['account_id'] for x in nodes[0].get_status()['validators']])


def get_stakes():
    return [
        int(nodes[2].get_account("test%s" % i)['result']['locked'])
        for i in range(3)
    ]
Exemple #6
0
from configured_logger import logger
from utils import TxContext
from transaction import sign_payment_tx

TIMEOUT = 240

nodes = start_cluster(
    num_nodes=4,
    num_observers=1,
    num_shards=4,
    config=None,
    genesis_config_changes=[
        ["min_gas_price", 0],
        ["max_inflation_rate", [0, 1]],
        ["epoch_length", 10],
        ["block_producer_kickout_threshold", 70]
    ],
    client_config_changes={
                                  0: {"consensus": {"state_sync_timeout": {"secs": 2, "nanos": 0}}},
                                  1: {"consensus": {"state_sync_timeout": {"secs": 2, "nanos": 0}}},
                                  2: {"consensus": {"state_sync_timeout": {"secs": 2, "nanos": 0}}},
                                  3: {"consensus": {"state_sync_timeout": {"secs": 2, "nanos": 0}}},
                                  4: {"consensus": {"state_sync_timeout": {"secs": 2, "nanos": 0}},
                                      "tracked_shards": [0, 1, 2, 3]}
    }
)

started = time.time()

act_to_val = [4, 4, 4, 4, 4]

ctx = TxContext(act_to_val, nodes)
Exemple #7
0
# and ensures the balances get to the expected state in a timely manner.
# Sets epoch length to 10

import sys, time, base58, random

sys.path.append('lib')

from cluster import start_cluster
from utils import TxContext
from transaction import sign_payment_tx

TIMEOUT = 240

nodes = start_cluster(4, 0, 4, {
    'local': True,
    'near_root': '../target/debug/'
}, [["max_inflation_rate", 0], ["epoch_length", 10],
    ["block_producer_kickout_threshold", 70]], {})

started = time.time()

act_to_val = [3, 2, 0, 3]

ctx = TxContext(act_to_val, nodes)

last_balances = [x for x in ctx.expected_balances]

step = 0
sent_height = -1

while True:
Exemple #8
0
}
node1_config = {
    "consensus": {
        "header_sync_initial_timeout": {
            "secs": 3,
            "nanos": 0
        },
        "header_sync_stall_ban_timeout": {
            "secs": 5,
            "nanos": 0
        }
    },
    "tracked_shards": [0]
}
nodes = start_cluster(1, 1, 1, None, [["epoch_length", EPOCH_LENGTH]], {
    0: node0_config,
    1: node1_config
}, Handler)

status = nodes[0].get_status()
cur_height = status['sync_info']['latest_block_height']

while cur_height <= 110:
    status = nodes[0].get_status()
    cur_height = status['sync_info']['latest_block_height']
    time.sleep(2)

should_sync.value = True

logger.info("sync node 1")

start = time.time()
Exemple #9
0
HEIGHT_GOAL = 150
TIMEOUT = HEIGHT_GOAL * 3
EPOCH_LENGTH = 20

config = None
nodes = start_cluster(
    2, 2, 1, config,
    [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 40]],
    {
        0: {
            "view_client_throttle_period": {
                "secs": 0,
                "nanos": 0
            }
        },
        1: {
            "view_client_throttle_period": {
                "secs": 0,
                "nanos": 0
            }
        },
        2: {
            "tracked_shards": [0]
        }
    })

started = time.time()


def get_validators():
    return set([x['account_id'] for x in nodes[0].get_status()['validators']])
#!/usr/bin/env python3
"""
Spawn a cluster with four nodes. Check that no node tries to
connect to another node that is currently connected.
"""

import sys, time
import pathlib

sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib'))

import cluster
import utils

BLOCKS = 20

nodes = cluster.start_cluster(4, 0, 4, None, [], {})
trackers = [utils.LogTracker(node) for node in nodes]
utils.wait_for_blocks(nodes[0], target=BLOCKS)
assert all(not tracker.check('Dropping handshake (Active Peer).')
           for tracker in trackers)
Exemple #11
0
]
nightly_genesis_change = [
    ["minimum_validators_per_shard", 2], ["min_gas_price", 0],
    ["max_inflation_rate", [0, 1]], ["epoch_length", 10],
    ["block_producer_kickout_threshold", 60],
    ["chunk_producer_kickout_threshold", 60],
    ["validators", 0, "amount", "110000000000000000000000000000000"],
    [
        "records", 0, "Account", "account", "locked",
        "110000000000000000000000000000000"
    ], ["total_supply", "4060000000000000000000000000000000"]
]

# give more stake to the bootnode so that it can produce the blocks alone
nodes = start_cluster(2, 1, 8, None,
                      nightly_genesis_change if nightly else genesis_change,
                      {})
time.sleep(3)
nodes[1].kill()

started = time.time()

act_to_val = [0, 0, 0]
ctx = utils.TxContext(act_to_val, nodes)

last_balances = [x for x in ctx.expected_balances]

sent_height = -1
caught_up_times = 0

for height, hash_ in utils.poll_blocks(nodes[0],
TIMEOUT = 300
BLOCKS = 30

# Low sync_check_period to sync from a new peer with greater height
client_config_change = {
    "consensus": {
        "sync_check_period": {
            "secs": 0,
            "nanos": 100000000
        }
    }
}

nodes = start_cluster(
    2, 0, 2, None,
    [["epoch_length", 100], ["block_producer_kickout_threshold", 80]],
    {0: client_config_change})
if not doomslug:
    # we expect inconsistency in store in node 0
    # because we're going to turn off doomslug
    # and allow applying blocks without proper validation
    nodes[0].stop_checking_store()

started = time.time()

time.sleep(2)
print("Waiting for %s blocks..." % BLOCKS)

while True:
    assert time.time() - started < TIMEOUT
    status = nodes[0].get_status()
Exemple #13
0
                    logging.info(f"Height: {h}")

            with success.get_lock():
                if h >= 10 and success.value == 0:
                    logging.info(
                        f'SUCCESS DROP={self.dropped} TOTAL={self.total}')
                    success.value = 1

        drop = random.random() < DROP_RATIO and 'Handshake' not in msg.enum

        if drop:
            self.dropped += 1
        self.total += 1

        return not drop


start_cluster(3, 0, 1, None, [["epoch_length", 500]], {}, Handler)

started = time.time()

while True:
    logging.info(f"Time: {time.time() - started:0.2}, Fin: {success.value}")
    assert time.time() - started < TIMEOUT
    time.sleep(1)

    if success.value == 1:
        break

logging.info("Success")
Exemple #14
0
consensus_config = {
    "consensus": {
        "block_fetch_horizon": 20,
        "block_header_fetch_horizon": 20
    }
}

nodes = start_cluster(
    4, 0, 1, None,
    [["epoch_length", 10],
     ["validators", 0, "amount", "12500000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "locked",
         "12500000000000000000000000000000"
     ], ["validators", 1, "amount", "12500000000000000000000000000000"],
     [
         "records", 2, "Account", "account", "locked",
         "12500000000000000000000000000000"
     ], ['total_supply', "4925000000000000000000000000000000"],
     ["block_producer_kickout_threshold", 40],
     ["chunk_producer_kickout_threshold", 40], ["num_block_producer_seats", 10],
     ["num_block_producer_seats_per_shard", [10]]], {1: consensus_config})

logger.info('Kill node 1')
nodes[1].kill()

node0_height, _ = utils.wait_for_blocks(nodes[0],
                                        target=TARGET_HEIGHT1,
                                        verbose=True)
Exemple #15
0
# process transactions. Makes large-ish number of block producers per shard to minimize
# the chance of the second block producer occupying all the seats in one of the shards

import sys, time, base58, random

sys.path.append('lib')

from cluster import start_cluster
from utils import TxContext
from transaction import sign_payment_tx

TIMEOUT = 240

nodes = start_cluster(
    2, 1, 8, None,
    [["num_block_producer_seats", 199],
     ["num_block_producer_seats_per_shard", [24, 25, 25, 25, 25, 25, 25, 25]],
     ["min_gas_price", 0], ["max_inflation_rate", 0], ["epoch_length", 10],
     ["block_producer_kickout_threshold", 70]], {})
time.sleep(3)
nodes[1].kill()

started = time.time()

act_to_val = [0, 0, 0]
ctx = TxContext(act_to_val, nodes)

last_balances = [x for x in ctx.expected_balances]

max_height = 0
sent_height = -1
caught_up_times = 0
Exemple #16
0
from cluster import start_cluster

BLOCK_WAIT = 40
EPOCH_LENGTH = 80

consensus_config = {
    "consensus": {
        "block_fetch_horizon": 10,
        "block_header_fetch_horizon": 10
    }
}
nodes = start_cluster(2, 0, 1, {
    'local': True,
    'near_root': '../target/debug/'
}, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10],
    ["chunk_producer_kickout_threshold", 10]], {
        0: consensus_config,
        1: consensus_config
    })
time.sleep(2)
nodes[1].kill()

cur_height = 0
print("step 1")
while cur_height < BLOCK_WAIT:
    status = nodes[0].get_status()
    cur_height = status['sync_info']['latest_block_height']
    time.sleep(2)
nodes[1].start(nodes[1].node_key.pk, nodes[1].addr())
time.sleep(2)
Exemple #17
0
sys.path.append('lib')

from cluster import start_cluster
from configured_logger import logger
from utils import TxContext
from transaction import sign_payment_tx

TIMEOUT = 180

# give more stake to the bootnode so that it can produce the blocks alone
nodes = start_cluster(
    2, 1, 8, None,
    [["num_block_producer_seats", 199],
     ["num_block_producer_seats_per_shard", [24, 25, 25, 25, 25, 25, 25, 25]],
     ["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10],
     ["block_producer_kickout_threshold", 60],
     ["chunk_producer_kickout_threshold", 60],
     ["validators", 0, "amount", "110000000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "locked",
         "110000000000000000000000000000000"
     ], ["total_supply", "4060000000000000000000000000000000"]], {})
time.sleep(3)
nodes[1].kill()

started = time.time()

act_to_val = [0, 0, 0]
ctx = TxContext(act_to_val, nodes)

last_balances = [x for x in ctx.expected_balances]
Exemple #18
0
sys.path.append('lib')

from cluster import start_cluster
from utils import wait_for_blocks_or_timeout

TIMEOUT = 60
EPOCH_LENGTH = 20

config = None
nodes = start_cluster(
    4, 1, 1, config,
    [
        ["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 40],
        ['protocol_version', 35], ['max_inflation_rate', [0, 1]],
        ['protocol_reward_rate', [0, 1]], ['block_producer_kickout_threshold', 70],
        ['chunk_producer_kickout_threshold', 70]
    ],
    {4: {
        "tracked_shards": [0]
    }})

started = time.time()

old_accounts = [
    nodes[0].get_account("test%s" % x)['result']for x in range(4)
]
near_account_old = nodes[0].get_account("near")

wait_for_blocks_or_timeout(nodes[4], EPOCH_LENGTH * 3 + 2, TIMEOUT)
for i in range(4):
Exemple #19
0
# test various ways of submitting transactions (broadcast_tx_async, broadcast_tx_sync, broadcast_tx_commit)

import sys, time, base58, base64

sys.path.append('lib')

from cluster import start_cluster
from configured_logger import logger
from utils import TxContext
from transaction import sign_payment_tx

nodes = start_cluster(
    2, 1, 1,
    None, [["min_gas_price", 0], ['max_inflation_rate', [0, 1]],
           ["epoch_length", 100], ['transaction_validity_period', 200],
           ["block_producer_kickout_threshold", 70]], {})

time.sleep(3)
started = time.time()

old_balances = [
    int(nodes[0].get_account("test%s" % x)['result']['amount'])
    for x in [0, 1]
]
logger.info(f"BALANCES BEFORE {old_balances}")

status = nodes[0].get_status()
hash1 = status['sync_info']['latest_block_hash']

for i in range(3):
    tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100 + i, i + 1,
Exemple #20
0
node0_config = {"gc_blocks_limit": 10}

node1_config = {
    "consensus": {
        "block_fetch_horizon": 10,
        "block_header_fetch_horizon": 10,
        "state_fetch_horizon": 0
    },
    "tracked_shards": [0],
    "gc_blocks_limit": 10,
}

nodes = start_cluster(
    1, 1, 1, None,
    [["epoch_length", 10], ["block_producer_kickout_threshold", 80],
     ["chunk_producer_kickout_threshold", 80]], {
         0: node0_config,
         1: node1_config
     })

status1 = nodes[1].get_status()
height = status1['sync_info']['latest_block_height']

start = time.time()
while height < TARGET_HEIGHT1:
    assert time.time() - start < TIME_OUT
    time.sleep(1)
    status1 = nodes[1].get_status()
    height = status1['sync_info']['latest_block_height']

logger.info('Kill node 1')
Exemple #21
0
from transaction import sign_staking_tx, sign_create_account_with_full_access_key_and_balance_tx

MAX_SYNC_WAIT = 30
EPOCH_LENGTH = 10

node1_config = {
    "consensus": {
        "sync_step_period": {
            "secs": 0,
            "nanos": 100
        }
    },
    "tracked_shards": [0]
}
nodes = start_cluster(
    1, 1, 1, None,
    [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10],
     ["chunk_producer_kickout_threshold", 10]], {1: node1_config})
time.sleep(2)
nodes[1].kill()
print('node1 is killed')

status = nodes[0].get_status()
block_hash = status['sync_info']['latest_block_hash']
cur_height = status['sync_info']['latest_block_height']

num_new_accounts = 10
balance = 50000000000000000000000000000000
account_keys = []
for i in range(num_new_accounts):
    signer_key = Key(f'test_account{i}', nodes[0].signer_key.pk,
                     nodes[0].signer_key.sk)
Exemple #22
0
            "nanos": 400000000
        },
        "max_block_wait_delay": {
            "secs": 0,
            "nanos": 400000000
        }
    }
}

nodes = start_cluster(
    3, 0, 1, None,
    [["epoch_length", 10], ["num_block_producer_seats", 5],
     ["num_block_producer_seats_per_shard", [5]],
     ["total_supply", "4210000000000000000000000000000000"],
     ["validators", 0, "amount", "260000000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "locked",
         "260000000000000000000000000000000"
     ]], {
         0: consensus_config,
         1: consensus_config,
         2: consensus_config
     })

logger.info('kill node1 and node2')
nodes[1].kill()
nodes[2].kill()

node0_height, _ = utils.wait_for_blocks(nodes[0], target=TARGET_HEIGHT)

logger.info('Restart node 1')
nodes[1].start(boot_node=nodes[1])
Exemple #23
0
client_config_changes = {}
if not config['local']:
    client_config_changes = {
        "min_block_production_delay": {
            "secs": 4,
        },
        "max_block_production_delay": {
            "secs": 8,
        },
        "max_block_wait_delay": {
            "secs": 24,
        },
    }
    TIMEOUT = 600
nodes = start_cluster(
    4, 0, 4, None,
    [["epoch_length", 6], ["block_producer_kickout_threshold", 80]],
    client_config_changes)

started = time.time()

hash_to_height = {}
hash_to_epoch = {}
hash_to_next_epoch = {}
height_to_hash = {}
epochs = []

block_producers_map = {}


def get_light_client_block(hash_, last_known_block):
    global block_producers_map
Exemple #24
0
# Make sure all nodes can still sync.

import sys, time, base58

sys.path.append('lib')

from cluster import start_cluster
from transaction import sign_staking_tx

EPOCH_LENGTH = 20
tracked_shards = {"tracked_shards": [0, 1, 2, 3]}

nodes = start_cluster(3, 1, 4, {
    'local': True,
    'near_root': '../target/debug/'
}, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10],
    ["chunk_producer_kickout_threshold", 10]], {
        0: tracked_shards,
        1: tracked_shards
    })

time.sleep(3)

status = nodes[0].get_status()
hash_ = status['sync_info']['latest_block_hash']

for i in range(4):
    stake = 100000000000000000000000000 if i == 3 else 0
    tx = sign_staking_tx(nodes[i].signer_key, nodes[i].validator_key, stake, 1,
                         base58.b58decode(hash_.encode('utf8')))
    nodes[0].send_tx(tx)
    print("test%s stakes %d" % (i, stake))
            "nanos": 400000000
        },
        "max_block_wait_delay": {
            "secs": 0,
            "nanos": 400000000
        }
    }
}

nodes = start_cluster(
    2, 0, 1, None,
    [["epoch_length", 10], ["num_block_producer_seats", 5],
     ["num_block_producer_seats_per_shard", [5]],
     ["chunk_producer_kickout_threshold", 80],
     ["validators", 0, "amount", "110000000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "AccountV1", "locked",
         "110000000000000000000000000000000"
     ], ["total_supply", "3060000000000000000000000000000000"]], {
         0: consensus_config,
         1: consensus_config
     })

print('Kill node 1')
nodes[1].kill()

node0_height = 0
while node0_height < TARGET_HEIGHT:
    status = nodes[0].get_status()
    node0_height = status['sync_info']['latest_block_height']
    time.sleep(2)
Exemple #26
0
expect receiving HandshakeFailure. Use that information to send valid handshake and
connect to the node.
"""
import asyncio
import socket
import sys
import time

sys.path.append('lib')

import base58
import nacl.signing
from cluster import start_cluster
from peer import ED_PREFIX, connect, create_handshake, sign_handshake

nodes = start_cluster(1, 0, 4, None, [], {})


async def main():
    my_key_pair_nacl = nacl.signing.SigningKey.generate()

    conn = await connect(nodes[0].addr())

    handshake = create_handshake(my_key_pair_nacl, nodes[0].node_key.pk, 12345)

    # First handshake attempt. Should fail with Protocol Version Mismatch
    sign_handshake(my_key_pair_nacl, handshake.HandshakeV2)
    await conn.send(handshake)
    response = await conn.recv()

    assert response.enum == 'HandshakeFailure', response.enum
        }
    }
}
consensus_config1 = {
    "consensus": {
        "sync_step_period": {
            "secs": 0,
            "nanos": 1000
        }
    },
    "tracked_shards": [0]
}
nodes = start_cluster(
    1, 1, 1, None,
    [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10],
     ["chunk_producer_kickout_threshold", 10]], {
         0: consensus_config0,
         1: consensus_config1
     })
time.sleep(2)
nodes[1].kill()

print("step 1")

node0_height = 0
while node0_height <= EPOCH_LENGTH + 600:
    status = nodes[0].get_status()
    node0_height = status['sync_info']['latest_block_height']
    time.sleep(5)
nodes[1].start(nodes[1].node_key.pk, nodes[1].addr())
time.sleep(2)
Exemple #28
0
# test various ways of submitting transactions (broadcast_tx_async, broadcast_tx_sync, broadcast_tx_commit)

import sys, time, base58, base64

sys.path.append('lib')

from cluster import start_cluster
from utils import TxContext
from transaction import sign_payment_tx

nodes = start_cluster(
    2, 0, 1,
    None, [["min_gas_price", 0], ['max_inflation_rate', [0, 1]],
           ["epoch_length", 10], ["block_producer_kickout_threshold", 70]], {})

time.sleep(3)
started = time.time()

old_balances = [
    int(nodes[0].get_account("test%s" % x)['result']['amount'])
    for x in [0, 1]
]
print("BALANCES BEFORE", old_balances)

status = nodes[0].get_status()
hash_ = status['sync_info']['latest_block_hash']

for i in range(3):
    tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100 + i, i + 1,
                         base58.b58decode(hash_.encode('utf8')))
    if i == 0:
Exemple #29
0
async def main():
    # start a cluster with two shards
    nodes = start_cluster(2, 0, 2, None, [], {})

    started = time.time()

    while True:
        if time.time() - started > 10:
            assert False, "Giving up waiting for two blocks"

        status = nodes[0].get_status()
        hash_ = status['sync_info']['latest_block_hash']
        height = status['sync_info']['latest_block_height']

        if height > 2:
            block = nodes[0].get_block(hash_)
            chunk_hashes = [
                base58.b58decode(x['chunk_hash'])
                for x in block['result']['chunks']
            ]

            assert len(chunk_hashes) == 2
            assert all([len(x) == 32 for x in chunk_hashes])

            break

    my_key_pair_nacl = nacl.signing.SigningKey.generate()
    received_responses = [None, None]

    # step = 0: before the node is killed
    # step = 1: after the node is killed
    for step in range(2):

        conn0 = await connect(nodes[0].addr())
        await run_handshake(conn0, nodes[0].node_key.pk, my_key_pair_nacl)
        for shard_ord, chunk_hash in enumerate(chunk_hashes):

            request = PartialEncodedChunkRequestMsg()
            request.chunk_hash = chunk_hash
            request.part_ords = []
            request.tracking_shards = [0, 1]

            routed_msg_body = RoutedMessageBody()
            routed_msg_body.enum = 'PartialEncodedChunkRequest'
            routed_msg_body.PartialEncodedChunkRequest = request

            peer_message = create_and_sign_routed_peer_message(
                routed_msg_body, nodes[0], my_key_pair_nacl)

            await conn0.send(peer_message)

            received_response = False

            def predicate(response):
                return response.enum == 'Routed' and response.Routed.body.enum == 'PartialEncodedChunkResponse'

            try:
                response = await asyncio.wait_for(conn0.recv(predicate), 5)
            except concurrent.futures._base.TimeoutError:
                response = None

            if response is not None:
                logger.info("Received response for shard %s" % shard_ord)
                received_response = True
            else:
                logger.info("Didn't receive response for shard %s" % shard_ord)

            if step == 0:
                received_responses[shard_ord] = received_response
            else:
                assert received_responses[
                    shard_ord] == received_response, "The response doesn't match for the chunk in shard %s. Received response before node killed: %s, after: %s" % (
                        shard_ord, received_responses[shard_ord],
                        received_response)

        # we expect first node to only respond to one of the chunk requests, for the shard assigned to it
        assert received_responses[0] != received_responses[
            1], received_responses

        if step == 0:
            logger.info("Killing and restarting nodes")
            nodes[1].kill()
            nodes[0].kill()
            nodes[0].start(None, None)
            time.sleep(1)
Exemple #30
0
STOP_HEIGHT1 = 35
TIMEOUT = 50

config1 = {
    "network": {
        "ttl_account_id_router": {
            "secs": 1,
            "nanos": 0
        },
    }
}
nodes = start_cluster(
    2, 0, 1, None,
    [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 30],
     ["chunk_producer_kickout_threshold", 30], ["num_block_producer_seats", 4],
     ["num_block_producer_seats_per_shard", [4]],
     ["validators", 0, "amount", "150000000000000000000000000000000"],
     [
         "records", 0, "Account", "account", "locked",
         "150000000000000000000000000000000"
     ], ["total_supply", "3100000000000000000000000000000000"]], {1: config1})
time.sleep(2)

status1 = nodes[1].get_status()
height1 = status1['sync_info']['latest_block_height']
block = nodes[1].get_block(height1)
epoch_id = block['result']['header']['epoch_id']

start = time.time()
while True:
    assert time.time() - start < TIMEOUT
    time.sleep(1)