def cleanup(self): self.kill() # move the node dir to avoid weird interactions with multiple serial test invocations target_path = self.node_dir + '_finished' if os.path.exists(target_path) and os.path.isdir(target_path): shutil.rmtree(target_path) os.rename(self.node_dir, target_path) # Get log and delete machine rc.run(f'mkdir -p /tmp/pytest_remote_log') self.machine.download( '/tmp/python-rc.log', f'/tmp/pytest_remote_log/{self.machine.name}.log') self.destroy_machine()
def compile_rust_contract(content): empty_contract_rs = os.path.join(os.path.dirname(__file__), '../empty-contract-rs') run('mkdir -p /tmp/near') tmp_contract = tempfile.TemporaryDirectory(dir='/tmp/near').name p = run(f'cp -r {empty_contract_rs} {tmp_contract}') if p.returncode != 0: raise Exception(p.stderr) with open(f'{tmp_contract}/src/lib.rs', 'a') as f: f.write(content) p = run('bash', input=f''' cd {tmp_contract} ./build.sh ''') if p.returncode != 0: raise Exception(p.stderr) return f'{tmp_contract}/target/release/empty_contract_rs.wasm'
def create_target_dir(machine): base_target_dir = os.environ.get(KEY_TARGET_ENV_VAR, DEFAULT_KEY_TARGET) target_dir = f'{base_target_dir}/{machine.name}' run(f'mkdir -p {target_dir}') return target_dir
def get_tx_events(nodes): run('mkdir ./logs/') run('rm -rf ./logs/*_txs') all_events = pmap(get_tx_events_single_node, nodes) return sorted(data.flatten(all_events))
def resume_network(self): rc.run( f'gcloud compute firewall-rules delete {self.machine.name}-stop', input='yes\n')
def stop_network(self): rc.run( f'gcloud compute firewall-rules create {self.machine.name}-stop --direction=EGRESS --priority=1000 --network=default --action=DENY --rules=all --target-tags={self.machine.name}' )
def build(sha, thread_n, outdir, build_before, hostname, remote, release): already_exists = bash(f''' cd {thread_n} git rev-parse HEAD ''') print(already_exists) sys.stdout.flush() if already_exists.returncode == 0 and already_exists.stdout.strip() == sha and build_before: print('Woohoo! Skipping the build.') sys.stdout.flush() return 0 if not enough_space(): print("Not enough space.") bld = bash(f'''rm -rf {thread_n}''') with open(str(outdir) + '/build_out', 'w') as fl_o: with open(str(outdir) + '/build_err', 'w') as fl_e: kwargs = {"stdout": fl_o, "stderr": fl_e} bash('''docker build . -f pytest-runtime.Dockerfile -t pytest-runtime''') print("Checkout") bld = bash(f''' cd {thread_n} git fetch git checkout {sha} ''' , **kwargs, login=True) print(bld) if bld.returncode != 0: print("Clone") bld = bash(f''' rm -rf {thread_n} git clone https://github.com/nearprotocol/nearcore {thread_n} cd {thread_n} git checkout {sha} ''' , **kwargs, login=True) print(bld) if bld.returncode != 0: return build_fail_cleanup(bld, thread_n) if "mocknet" in hostname: print("Skipping the build for mocknet tests") return 0 if remote: print("Build for remote.") bld = bash(f''' cd {thread_n} cargo build -j2 -p neard --features adversarial ''' , **kwargs, login=True) if bld.returncode != 0: return build_fail_cleanup(bld, thread_n) return 0 print("Build") bld = bash(f''' cd {thread_n} cargo build -j2 -p neard --features adversarial {release} cargo build -j2 -p genesis-populate {release} cargo build -j2 -p restaked {release} ''' , **kwargs, login=True) print(bld) if bld.returncode != 0: return build_fail_cleanup(bld, thread_n) bld = run(f'''cd {thread_n} && cargo test -j2 --workspace --no-run --all-features --target-dir target_expensive {release}''', **kwargs) if bld.returncode != 0: return build_fail_cleanup(bld, thread_n) bld = run(f'''cd {thread_n} && cargo build -j2 -p neard --target-dir normal_target {release}''', **kwargs) if bld.returncode != 0: return build_fail_cleanup(bld, thread_n) return 0
from concurrent.futures import ThreadPoolExecutor, as_completed import pathlib import time import tempfile from tqdm import tqdm import shutil import sys sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) from cluster import apply_config_changes, apply_genesis_changes from utils import user_name try: image_name = sys.argv[1] except: branch = run( 'git rev-parse --symbolic-full-name --abbrev-ref HEAD').stdout.strip() username = user_name() image_name = f'near-{branch}-{datetime.datetime.strftime(datetime.datetime.now(),"%Y%m%d")}-{username}' try: machine_name_prefix = sys.argv[2] except: machine_name_prefix = f'pytest-node-{username}-' genesis_time = (datetime.datetime.utcnow() - datetime.timedelta(hours=2)).isoformat() + 'Z' # binary search this to observe if network forks, default is 1 block_production_time = 1 client_config_changes = {
import os import datetime from rc import gcloud, run import sys sys.path.append('lib') from utils import user_name additional_flags = '' try: image_name = sys.argv[1] branch = sys.argv[2] except: branch = run( 'git rev-parse --symbolic-full-name --abbrev-ref HEAD').stdout.strip() username = user_name() image_name = f'near-{branch}-{datetime.datetime.strftime(datetime.datetime.now(),"%Y%m%d")}-{username}' machine_name = f'{image_name}-image-builder' print("Creating machine:", machine_name) m = gcloud.create(name=machine_name, machine_type='n1-standard-64', disk_size='50G', image_project='ubuntu-os-cloud', image_family='ubuntu-1804-lts', zone='us-west2-c', firewall_allows=['tcp:3030', 'tcp:24567'], min_cpu_platform='Intel Skylake')
def test_run_sh_syntax(): assert run(['ls', '~']).returncode == 0 assert run('ls ~').returncode == 0 assert run(['cat', '"~"']) == run('cat "~"')
def compress_and_upload(nodes, src_filename, dst_filename): res = run(f'gzip {src_filename}') assert res.returncode == 0 pmap(lambda node: upload_and_extract(node, src_filename, dst_filename), nodes)
def get_tx_events(nodes, tx_filename): run('mkdir ./logs/') run('rm -rf ./logs/*_txs') all_events = pmap( lambda node: get_tx_events_single_node(node, tx_filename), nodes) return sorted(data.flatten(all_events))
from rc import gcloud, pmap, run from distutils.util import strtobool import sys import datetime sys.path.append('lib') from utils import user_name machines = gcloud.list() node_prefix = sys.argv[1] if len( sys.argv) >= 2 else f"pytest-node-{user_name()}" nodes = list(filter(lambda m: m.name.startswith(node_prefix), machines)) log_file = sys.argv[2] if len(sys.argv) >= 3 else "produce_record.txt" collected_place = f'/tmp/near/collected_logs_{datetime.datetime.strftime(datetime.datetime.now(),"%Y%m%d")}' run(['mkdir', '-p', collected_place]) def collect_file(node): print(f'Download file from {node.name}') node.download('/tmp/python-rc.log', f'{collected_place}/{node.name}.txt') print(f'Download file from {node.name} finished') pmap(collect_file, nodes) print(f'All download finish, log collected at {collected_place}')
def ls(_args): p(run(f'ls {config.groups_dir}').stdout)
(Key(load_testing_account_id(i), pk, sk), i) for i in range(node_index * NUM_ACCOUNTS, (node_index + 1) * NUM_ACCOUNTS) ] base_block_hash = get_latest_block_hash() rpc_info = (LOCAL_ADDR, RPC_PORT) return [(Account(key, get_nonce_for_pk(key.account_id, key.pk), base_block_hash, rpc_info), i) for (key, i) in test_account_keys] if __name__ == '__main__': test_accounts = get_test_accounts_from_args() run(f'rm -rf {TX_OUT_FILE}') i0 = test_accounts[0][1] start_time = time.time() # begin with only transfers for TPS measurement total_tx_sent = 0 elapsed_time = 0 while time.time() - start_time < TRANSFER_ONLY_TIMEOUT: (total_tx_sent, elapsed_time) = throttle_txns(send_transfers, total_tx_sent, elapsed_time, MAX_TPS_PER_NODE, i0) write_tx_events(test_accounts) # Ensure load testing contract is deployed to all accounts before