def stop_clients(clients=[], impls=['go'], boot=0): # create group in inventory inventory = Inventory() clients = clients or list(inventory.clients) inventory.inventory['client_stop_group'] = dict(children=clients, hosts=[]) assert inventory.es assert inventory.boot0 for client in clients: assert client d = dict(hosts=inventory.inventory[client], vars=dict()) inventory.inventory[client] = d d['vars']['target_client_impl'] = impls # print json.dumps(inventory.inventory, indent=2) exec_playbook(inventory.inventory, playbook='client-stop.yml', impls=impls)
def start_clients(clients=[], req_num_peers=7, impls=['go'], boot=0, enable_mining=True): """ start all clients with a custom config (nodeid) """ inventory = Inventory() clients = clients or list(inventory.clients) inventory.inventory['client_start_group'] = dict(children=clients, hosts=[]) # print clients # quit() bt = get_boot_ip_pk(inventory, boot) assert inventory.es assert inventory.boot0 for client in clients: assert client d = dict(hosts=inventory.inventory[client], vars=dict()) dra = {} dra['go'] = docker_run_args['go'].format(bootstrap_public_key=bt['pk'], bootstrap_ip=bt['ip'], req_num_peers=req_num_peers, privkey=clients_config[client]['go']['privkey'], mining_state=enable_mining ) dra['cpp'] = docker_run_args['cpp'].format(bootstrap_ip=bt['ip'], client_ip=inventory.instances[client], req_num_peers=req_num_peers, mining_state='--force-mining --mining on' if enable_mining else '') dra['python'] = docker_run_args['python'].format(bootstrap_ip=bt['ip'], req_num_peers=req_num_peers, coinbase=clients_config[client]['python']['coinbase'], mining_state=mining_cpu_percentage if enable_mining else '0') d['vars']['target_client_impl'] = impls d['vars']['docker_run_args'] = {} d['vars']['docker_tee_args'] = {} for impl in ['go', 'cpp', 'python']: d['vars']['docker_run_args'][impl] = dra[impl] d['vars']['docker_tee_args'][impl] = teees_args.format( elarch_ip=inventory.es, pubkey_hex=clients_config[client][impl]['pubkey']) inventory.inventory[client] = d # print json.dumps(inventory.inventory, indent=2) exec_playbook(inventory.inventory, playbook='client-start.yml', impls=impls)
def run(run_clients): """Run the clients. Because of ``autouse=True`` this method is executed before everything else in this module. The `run_clients` fixture is defined in ``conftest.py``. It is true by default but false if the --norun command line flag is set. """ log_event('started') if not run_clients: return inventory = Inventory() clients = inventory.clients # create schedule events = [] for c in list(clients)[:num_scheduled_clients]: events.extend(mkschedule(c)) events = sorted(events, key=lambda x: x['time']) assert len(events) print '\n'.join(repr(e) for e in events) # reset client storage # use client-reset.yml playbook # start-up all clients log_event('start_all_clients') start_clients(clients=clients, impls=impls, boot=boot) log_event('start_all_clients.done') # run events log_event('run_churn_schedule') elapsed = 0 while events: e = events.pop(0) # sleep until time has come if elapsed < e['time']: time.sleep(e['time'] - elapsed) elapsed = e['time'] cmd = dict(running=start_clients, stopped=stop_clients)[e['state']] client = e['client'] print elapsed, cmd.__name__, client cmd(clients=[client], impls=impls, boot=boot) log_event('run_churn_schedule.done') # start all clients log_event('start_all_clients_again') start_clients(clients=clients, impls=impls, boot=boot) log_event('start_all_clients_again.done') # let them agree on a block log_event('wait_for_consensus') time.sleep(max_time_to_reach_consensus) log_event('wait_for_consensus.done')
def clients(): """py.test passes this fixture to every test function expecting an argument called ``clients``. """ inventory = Inventory() return inventory.clients
def run(run_clients): """Run the clients. Because of ``autouse=True`` this method is executed before everything else in this module. The `run_clients` fixture is defined in ``conftest.py``. It is true by default but false if the --norun command line flag is set. """ log_event('started') if not run_clients: return inventory = Inventory() clients = list(inventory.clients) # stop all clients # log_event('stopping_clients') # stop_clients(clients=clients,impl=impl) # log_event('stopping_clients.done') log_event('starting_one_client') start_clients(clients=clients[:1], impls=impl) log_event('starting_one_client.done') print 'mine a bit' blocktime = 12 # intitial difficulty is very high, takes around 2 minutes for initial mined block delay = blocktime * 14 log_event('waiting', delay=delay) time.sleep(delay) # start other clients log_event('starting_other_clients') start_clients(clients=clients[1:], impls=impl) log_event('starting_other_clients.done') # create tx sender = clients[0] recipient = clients[1] rpc_host = inventory.inventory[sender][0] rpc_port = 20000 # hard coded FIXME if we get multiple clients per ec endpoint = 'http://%s:%d' % (rpc_host, rpc_port) sending_address = coinbase(endpoint) receiving_address = Ox(nodeid_tool.coinbase(str(recipient))) print 'sending addr %s, receiving addr %s' % (sending_address, receiving_address) value = 100 # print balance(endpoint, sending_address) # this fails randomly, why ? assert value < balance(endpoint, sending_address) log_event('sending_transaction', sender=sending_address, to=receiving_address, value=value) tx = transact(endpoint, sender=sending_address, to=receiving_address, value=value) log_event('sending_transaction.done', result=tx) log_event('waiting', delay=max_time_to_reach_consensus) time.sleep(max_time_to_reach_consensus) log_event('waiting.done')
#!/usr/bin/env python import webbrowser from base import Inventory url = 'http://%s/index.html#/dashboard/file/guided.json' % Inventory().es webbrowser.open(url)
def start_clients(clients=[], req_num_peers=7, impls=['go'], boot=0, enable_mining=True): """ start all clients with a custom config (nodeid) """ inventory = Inventory() clients = clients or list(inventory.clients) inventory.inventory['client_start_group'] = dict(children=clients, hosts=[]) # print clients # quit() bt = get_boot_ip_pk(inventory, boot) assert inventory.es assert inventory.boot0 for client in clients: assert client pubkey = {} privkey = {} coinbase = {} for impl in ['go', 'cpp', 'python']: ext_id = str(client) + impl # print ext_id pubkey[impl] = nodeid_tool.topub(ext_id) privkey[impl] = nodeid_tool.topriv(ext_id) coinbase[impl] = nodeid_tool.coinbase(ext_id) d = dict(hosts=inventory.inventory[client], vars=dict()) dra = {} dra['go'] = docker_run_args['go'].format(bootstrap_public_key=bt['pk'], bootstrap_ip=bt['ip'], req_num_peers=req_num_peers, privkey=privkey['go'], mining_state=enable_mining) dra['cpp'] = docker_run_args['cpp'].format( bootstrap_ip=bt['ip'], client_ip=inventory.instances[client], req_num_peers=req_num_peers, mining_state='--force-mining --mining on' if enable_mining else '') dra['python'] = docker_run_args['python'].format( bootstrap_ip=bt['ip'], req_num_peers=req_num_peers, coinbase=coinbase['python'], mining_state=mining_cpu_percentage if enable_mining else '0') d['vars']['target_client_impl'] = impls d['vars']['docker_run_args'] = {} d['vars']['docker_tee_args'] = {} for impl in ['go', 'cpp', 'python']: d['vars']['docker_run_args'][impl] = dra[impl] d['vars']['docker_tee_args'][impl] = teees_args.format( elarch_ip=inventory.es, pubkey_hex=pubkey[impl]) inventory.inventory[client] = d # print json.dumps(inventory.inventory, indent=2) exec_playbook(inventory.inventory, playbook='client-start.yml', impls=impls)