示例#1
0
def send_random_transactions(node_account, test_accounts, max_tps_per_node):
    logger.info("===========================================")
    logger.info("New iteration of 'send_random_transactions'")
    base_block_hash = mocknet_helpers.get_latest_block_hash()
    pmap(
        lambda index_and_account: random_transaction(index_and_account[1],
                                                     index_and_account[0],
                                                     node_account,
                                                     max_tps_per_node,
                                                     base_block_hash=
                                                     base_block_hash),
        enumerate(test_accounts))
示例#2
0
文件: mocknet.py 项目: near/nearcore
def chain_measure_bps_and_tps(archival_node,
                              start_time,
                              end_time,
                              duration=None):
    latest_block_hash = archival_node.get_latest_block().hash
    curr_block = archival_node.get_block(latest_block_hash)['result']
    curr_time = get_timestamp(curr_block)

    if end_time is None:
        end_time = curr_time
    if start_time is None:
        start_time = end_time - duration
    logger.info(
        f'Measuring BPS and TPS in the time range {start_time} to {end_time}')

    # One entry per block, equal to the timestamp of that block.
    block_times = []
    # One entry per block, containing the count of transactions in all chunks of the block.
    tx_count = []
    block_counter = 0
    while curr_time > start_time:
        if curr_time < end_time:
            block_times.append(curr_time)
            gas_per_chunk = []
            for chunk in curr_block['chunks']:
                gas_per_chunk.append(chunk['gas_used'] * 1e-12)
            gas_block = sum(gas_per_chunk)
            tx_per_chunk = [None] * len(curr_block['chunks'])
            pmap(
                lambda i: get_chunk_txn(i, curr_block['chunks'], archival_node,
                                        tx_per_chunk),
                range(len(curr_block['chunks'])))
            txs = sum(tx_per_chunk)
            tx_count.append(txs)
            logger.info(
                f'Processed block at time {curr_time} height #{curr_block["header"]["height"]}, # txs in a block: {txs}, per chunk: {tx_per_chunk}, gas in block: {gas_block}, gas per chunk: {gas_per_chunk}'
            )
        prev_hash = curr_block['header']['prev_hash']
        curr_block = archival_node.get_block(prev_hash)['result']
        curr_time = get_timestamp(curr_block)
    block_times.reverse()
    tx_count.reverse()
    assert block_times
    tx_cumulative = data.compute_cumulative(tx_count)
    bps = data.compute_rate(block_times)
    tps_fit = data.linear_regression(block_times, tx_cumulative)
    logger.info(
        f'Num blocks: {len(block_times)}, num transactions: {len(tx_count)}, bps: {bps}, tps_fit: {tps_fit}'
    )
    return {'bps': bps, 'tps': tps_fit['slope']}
示例#3
0
def atexit_cleanup():
    print('Drain task_queue')
    for _, t in timeout_threads.items():
        t.cancel()
    try:
        while task_queue.get(block=False):
            task_queue.task_done()
    except:
        pass

    print('Shutdown worker threads')
    for _ in range(num_worker_threads):
        task_queue.put(None)

    pmap(delete_machine, machines.keys())
示例#4
0
文件: mocknet.py 项目: near/nearcore
def start_load_test_helpers(nodes,
                            script,
                            rpc_nodes,
                            num_nodes,
                            max_tps,
                            get_node_key=False):
    account = get_validator_account(nodes[0])
    pmap(
        lambda node: start_load_test_helper(node,
                                            script,
                                            account.pk,
                                            account.sk,
                                            rpc_nodes,
                                            num_nodes,
                                            max_tps,
                                            lead_account_id=account.account_id,
                                            get_node_key=get_node_key), nodes)
示例#5
0
文件: mocknet.py 项目: near/nearcore
def get_nodes(pattern=None):
    machines = gcloud.list(pattern=pattern,
                           project=PROJECT,
                           username=NODE_USERNAME,
                           ssh_key_path=NODE_SSH_KEY_PATH)
    nodes = pmap(
        lambda machine: GCloudNode(machine.name,
                                   username=NODE_USERNAME,
                                   project=PROJECT,
                                   ssh_key_path=NODE_SSH_KEY_PATH), machines)
    return nodes
示例#6
0
def execute(targets, args):
    execute_argparser = argparse.ArgumentParser(
        'execute a command on target of machines')
    execute_argparser.add_argument('-t',
                                   '--timeout',
                                   help='timeout to execute commands. ',
                                   default='2m')
    execute_argparser.add_argument('command')
    execute_argparser.add_argument('args', nargs=argparse.REMAINDER)
    args = execute_argparser.parse_args(args)
    timeout = timeparse(args.timeout)
    if args.args:
        cmd = convert_list_command_to_str([args.command, *args.args])
    else:
        cmd = args.command

    l = Lock()
    for target in targets:
        print(f'Start executing on {target}')

    def exec(i):
        target = targets[i]
        ret = None
        try:
            log_path = os.path.join(config.logs_dir, str(target))
            log = open(log_path, 'w')
            proc = target.run(cmd, timeout=timeout, stdout=log, stderr=log)
            if proc.returncode == 0:
                output = f'{term.green}SUCCESS{term.off} on {target}'
            else:
                output = f'{term.red}FAIL{term.off} on {target}. Exit code: {proc.returncode}'
            ret = proc
        except RunException as e:
            output = f'{term.red}FAIL{term.off} on {target}. Timeout'
            ret = e
        output += f'. Log: file://{log_path}'
        with l:
            term.saveCursor()
            term.up(len(targets) - i)
            term.clearLine()
            term.writeLine(output)
            term.restoreCursor()
        return ret

    results = pmap(exec, range(len(targets)))
    if all(
            map(lambda r: isinstance(r, RunResult) and r.returncode == 0,
                results)):
        term.writeLine('All execution succeeded', term.green)
        exit(0)
    else:
        term.writeLine('Some execution failed', term.red)
        exit(1)
示例#7
0
文件: mocknet.py 项目: near/nearcore
def create_and_upload_genesis(validator_nodes,
                              chain_id,
                              rpc_nodes=None,
                              epoch_length=20000,
                              node_pks=None,
                              increasing_stakes=0.0,
                              num_seats=100,
                              sharding=True,
                              all_node_pks=None,
                              node_ips=None):
    logger.info(
        f'create_and_upload_genesis: validator_nodes: {validator_nodes}')
    assert chain_id
    logger.info('Uploading genesis and config files')
    with tempfile.TemporaryDirectory() as tmp_dir:
        logger.info(
            'Assuming that genesis_updater.py is available on the instances.')
        validator_node_names = [node.instance_name for node in validator_nodes]
        rpc_node_names = [node.instance_name for node in rpc_nodes]
        assert '-spoon' in chain_id, f'Expecting chain_id like "testnet-spoon" or "mainnet-spoon", got {chain_id}'
        chain_id_in = chain_id.split('-spoon')[0]
        genesis_filename_in = f'/home/ubuntu/.near/{chain_id_in}-genesis/genesis.json'
        records_filename_in = f'/home/ubuntu/.near/{chain_id_in}-genesis/records.json'
        config_filename_in = f'/home/ubuntu/.near/{chain_id_in}-genesis/config.json'
        stamp = time.strftime('%Y%m%d-%H%M%S', time.gmtime())
        done_filename = f'/home/ubuntu/genesis_update_done_{stamp}.txt'
        pmap(
            lambda node: start_genesis_updater(
                node, 'genesis_updater.py', genesis_filename_in,
                '/home/ubuntu/.near/genesis.json', records_filename_in,
                '/home/ubuntu/.near/records.json', config_filename_in,
                '/home/ubuntu/.near/config.json', chain_id,
                validator_node_names, rpc_node_names, done_filename,
                epoch_length, node_pks, increasing_stakes, num_seats, sharding,
                all_node_pks, node_ips), validator_nodes + rpc_nodes)
        pmap(lambda node: wait_genesis_updater_done(node, done_filename),
             validator_nodes + rpc_nodes)
示例#8
0
def parse_config(config_file, sub_machines=None):
    config = yaml.safe_load(open(config_file))
    default = config.get('default', {})
    machine_spec = config.get('machines', None)
    if machine_spec is None:
        raise "Machines cannot be empty"
    machines = pmap(lambda ms: machine_from_spec(ms, default), machine_spec)
    group_machines = {}
    for m in machines:
        group_machines[str(m)] = m
    assert len(group_machines) == len(machines)
    if sub_machines:
        targets = []
        for n in sub_machines:
            matches = list(
                filter(lambda name: n in name, group_machines.keys()))
            assert len(matches) == 1
            targets += matches
        assert len(targets) > 0
    else:
        targets = group_machines.keys()
    targets = sorted(targets)
    p('machine targets: ', ', '.join(targets))
    return list(map(lambda n: group_machines[n], targets))
示例#9
0
文件: mocknet.py 项目: near/nearcore
def wait_all_nodes_up(all_nodes):
    pmap(lambda node: wait_node_up(node), all_nodes)
示例#10
0
def send_transfers(i0):
    pmap(
        lambda account_and_index: send_transfer(account_and_index[
            0], account_and_index[1], i0), test_accounts)
示例#11
0
def send_random_transactions(i0):
    pmap(lambda x: random_transaction(x, i0), test_accounts)
示例#12
0
文件: mocknet.py 项目: near/nearcore
def get_tx_events(nodes, tx_filename):
    run('mkdir ./logs/')
    run('rm -rf ./logs/*_txs')
    all_events = pmap(
        lambda node: get_tx_events_single_node(node, tx_filename), nodes)
    return sorted(data.flatten(all_events))
#!/usr/bin/env python

# When script exit with traceback, remote node is not deleted. This script is
# to delete remote machines so test can be rerun
# DANGER: make sure not delete production nodes!

from rc import gcloud, pmap
from distutils.util import strtobool
import sys
sys.path.append('lib')
from utils import user_name

machines = gcloud.list()
to_delete_prefix = sys.argv[1] if len(
    sys.argv) >= 2 else f"pytest-node-{user_name()}-"
to_delete = list(
    filter(lambda m: m.name.startswith(to_delete_prefix), machines))

if to_delete:
    a = input(
        f"going to delete {list(map(lambda m: m.name, to_delete))}\ny/n: ")
    if strtobool(a):

        def delete_machine(m):
            print(f'deleting {m.name}')
            m.delete()
            print(f'{m.name} deleted')

        pmap(delete_machine, to_delete)
示例#14
0
def get_tx_events(nodes):
    run('mkdir ./logs/')
    run('rm -rf ./logs/*_txs')
    all_events = pmap(get_tx_events_single_node, nodes)
    return sorted(data.flatten(all_events))
示例#15
0
def get_nodes(prefix=''):
    return pmap(lambda i: get_node(i, prefix=prefix), range(NUM_NODES))
示例#16
0
def start_load_test_helpers(nodes, script):
    account = get_validator_account(get_node(0))
    pmap(
        lambda node: start_load_test_helper(node, script, account.pk, account.
                                            sk), nodes)
示例#17
0
文件: mocknet.py 项目: near/nearcore
def compress_and_upload(nodes, src_filename, dst_filename):
    res = run(f'gzip {src_filename}')
    assert res.returncode == 0
    pmap(lambda node: upload_and_extract(node, src_filename, dst_filename),
         nodes)
示例#18
0

def print_chain_data(block, file=sys.stdout):
    all_height = list(map(lambda b: b['height_included'], block['chunks']))
    all_catch_up = False
    if all(map(lambda h: h == block['header']['height'], all_height)):
        all_catch_up = True
    print(block['header']['hash'],
          block['header']['height'],
          block['header']['approvals'],
          all_catch_up,
          all_height,
          file=file)


subprocess.run('mkdir -p /tmp/100_node/', shell=True)

f = []
for node in range(100):
    f.append(open(f'/tmp/100_node/pytest-node-{node}.txt', 'w'))


def query_node(i):
    node = GCloudNode(f'pytest-node-{i}')
    chain_query(node, lambda b: print_chain_data(b, f[i]), max_blocks=20)


pmap(query_node, range(100))

# node = RpcNode('localhost', 3030)
# chain_query(node, print_chain_data, max_blocks=20)
示例#19
0
文件: mocknet.py 项目: near/nearcore
def start_nodes(nodes, upgrade_schedule=None):
    pmap(lambda node: start_node(node, upgrade_schedule=upgrade_schedule),
         nodes)
示例#20
0
文件: mocknet.py 项目: near/nearcore
def stop_nodes(nodes):
    pmap(stop_node, nodes)
示例#21
0
if len(sys.argv) >= 2:
    node_prefix = sys.argv[1]
else:
    node_prefix = f'pytest-node-{user_name()}'
nodes = [
    machine for machine in gcloud.list()
    if machine.name.startswith(node_prefix)
]

if len(sys.argv) >= 3:
    log_file = sys.argv[2]
else:
    log_file = pathlib.Path(tempfile.gettempdir()) / 'python-rc.log'

collected_place = (
    pathlib.Path(tempfile.gettempdir()) / 'near' /
    f'collected_logs_{datetime.datetime.strftime(datetime.datetime.now(),"%Y%m%d")}'
)
collected_place.mkdir(parents=True, exist_ok=True)


def collect_file(node):
    logger.info(f'Download file from {node.name}')
    node.download(str(log_file), str(collected_place / f'{node.name}.txt'))
    logger.info(f'Download file from {node.name} finished')


pmap(collect_file, nodes)
logger.info(f'All download finish, log collected at {collected_place}')
def send_transfers(base_block_hash):
    pmap(
        lambda account_and_index: send_transfer(account_and_index[0], (
            account_and_index[1] + 1) % NUM_ACCOUNTS, base_block_hash),
        test_accounts)
示例#23
0
def atexit_cleanup_remote():
    with remote_nodes_lock:
        if remote_nodes:
            rc.pmap(atexit_cleanup, remote_nodes)
def send_random_transactions(base_block_hash):
    pmap(lambda x: random_transaction(x, base_block_hash), test_accounts)
示例#25
0
def get_logs(nodes):
    pmap(get_log, nodes)
示例#26
0

def create_machine(i):
    m = gcloud.create(name=machine_name_prefix + str(i),
                      machine_type='n1-standard-2',
                      disk_size='200G',
                      image_project='near-core',
                      image=image_name,
                      zone=zones[i % len(zones)],
                      min_cpu_platform='Intel Skylake',
                      reserve_ip=reserve_ip)
    pbar.update(1)
    return m


machines = pmap(create_machine, range(num_machines))
pbar.close()
# machines = pmap(lambda name: gcloud.get(name), [
#                 f'{machine_name_prefix}{i}' for i in range(num_machines)])

tempdir = pathlib.Path(tempfile.gettempdir()) / 'near'


def get_node_dir(i):
    node_dir = tempdir / f'node{i}'
    node_dir.mkdir(parents=True, exist_ok=True)
    return node_dir


for i in range(num_machines):
    node_dir = get_node_dir(i)
示例#27
0
def accounts_from_nodes(nodes):
    return pmap(get_validator_account, nodes)
示例#28
0
        rpc_nodes, validator_nodes, args.progressive_upgrade,
        args.increasing_stakes, args.num_seats)
    logger.info(f'upgrade_schedule: %s' % str(upgrade_schedule))

    if not args.skip_setup:
        logger.info('Setting remote python environments')
        mocknet.setup_python_environments(all_nodes,
                                          'add_and_delete_state.wasm')
        logger.info('Setting remote python environments -- done')

    if not args.skip_restart:
        logger.info(f'Restarting')
        # Make sure nodes are running by restarting them.
        mocknet.stop_nodes(all_nodes)
        time.sleep(10)
        node_pks = pmap(lambda node: mocknet.get_node_keys(node)[0],
                        validator_nodes)
        all_node_pks = pmap(lambda node: mocknet.get_node_keys(node)[0],
                            all_nodes)
        node_ips = [node.machine.ip for node in all_nodes]
        mocknet.create_and_upload_genesis(
            validator_nodes,
            chain_id,
            rpc_nodes=rpc_nodes,
            epoch_length=epoch_length,
            node_pks=node_pks,
            increasing_stakes=args.increasing_stakes,
            num_seats=args.num_seats,
            sharding=not args.no_sharding,
            all_node_pks=all_node_pks,
            node_ips=node_ips)
        mocknet.start_nodes(all_nodes, upgrade_schedule)
示例#29
0
def setup_python_environments(nodes, wasm_contract):
    pmap(lambda n: setup_python_environment(n, wasm_contract), nodes)
def send_skyward_transactions(node_account, test_accounts, max_tps_per_node):
    pmap(
        lambda index_and_account: skyward_transaction(index_and_account[
            1], index_and_account[0], node_account, max_tps_per_node),
        enumerate(test_accounts))