def setup(self, urls, numkeys): self.localState = {} self.transactions = [] self.clients = [] self.state = IntegerKeyState(urls[0]) with Progress("Creating clients") as p: for u in urls: key = generate_private_key() self.clients.append(IntegerKeyClient(u, keystring=key)) p.step() print "Checking for pre-existing state" self.state.fetch() keys = self.state.State.keys() for k, v in self.state.State.iteritems(): self.localState[k] = v with Progress("Populating initial key values") as p: txncount = 0 starttime = time.clock() for n in range(1, numkeys + 1): n = str(n) if n not in keys: c = self._get_client() v = random.randint(5, 1000) self.localState[n] = v txnid = c.set(n, v) if txnid is None: raise Exception("Failed to set {} to {}".format(n, v)) self.transactions.append(txnid) txncount += 1 self.txnrate(starttime, txncount) self._wait_for_transaction_commits()
def shutdown(self): if len(self._validators) == 0: # no validators to shutdown return with Progress("Sending interrupt signal to validators: ") as p: for v in self._validators: if v.is_running(): v.shutdown() p.step() running_count = 0 to = TimeOut(1) with Progress("Giving validators time to shutdown: ") as p: while True: running_count = 0 for v in self._validators: if v.is_running(): running_count += 1 if to.is_timed_out() or running_count == 0: break else: time.sleep(1) p.step() if running_count != 0: with Progress("Killing {} intransigent validators: " .format(running_count)) as p: for v in self._validators: if v.is_running(): v.shutdown(True) p.step()
def run(self, numkeys, rounds=1, txintv=0): if len(self.clients) == 0: return self.state.fetch() keys = self.state.State.keys() print "Running {0} rounds for {1} keys " \ "with {2} second inter-transaction time" \ .format(rounds, numkeys, txintv) for r in range(0, rounds): with Progress("Updating clients state") as p: for c in self.clients: c.fetch_state() p.step() cnt = 0 starttime = time.time() with Progress("Round {}".format(r)) as p: for k in keys: k = str(k) c = self._get_client() self.localState[k] += 2 txn_dep = self.last_key_txn.get(k, None) txn_id = c.inc(k, 2, txn_dep) if txn_id is None: raise Exception( "Failed to inc key:{} value:{} by 2".format( k, self.localState[k])) self.transactions.append(txn_id) self.last_key_txn[k] = txn_id cnt += 1 if cnt % 10 == 0: p.step() time.sleep(txintv) for k in keys: k = str(k) c = self._get_client() self.localState[k] -= 1 txn_dep = self.last_key_txn[k] txn_id = c.dec(k, 1, txn_dep) if txn_id is None: raise Exception( "failed to dec key:{} value:{} by 1".format( k, self.localState[k])) self.transactions.append(txn_id) self.last_key_txn[k] = txn_id cnt += 1 if cnt % 10 == 0: p.step() time.sleep(txintv) txn_count = len(self.transactions) self.txnrate(starttime, txn_count, "submitted") self._wait_for_transaction_commits() self.txnrate(starttime, txn_count, "committed")
def extend_genesis_util(self, overrides): print vnm = None try: vnm = get_default_vnm(2, overrides=overrides) # Test genesis util cfg = vnm.get_configuration(0) ledger_type = cfg['LedgerType'] gblock_file = genesis_info_file_name(cfg['DataDirectory']) self.assertFalse(os.path.exists(gblock_file)) vnm.do_genesis() self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network vnm.launch() # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print 'block_lists: %s' % blk_lists if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print while (is_convergent(vnm.urls(), tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print 'network converged on root: %s' % root finally: if vnm is not None: archive_name = 'Test%sGenesisResults' % ledger_type.upper() vnm.shutdown(archive_name=archive_name)
def run(self, rounds=1): if len(self.clients) == 0: return self.state.fetch() keys = self.state.State.keys() for r in range(1, rounds + 1): with Progress("Updating clients state") as p: for c in self.clients: c.fetch_state() p.step() cnt = 0 with Progress("Round {}".format(r)) as p: for k in keys: c = self._get_client() self.localState[k] += 2 if k in self.last_key_txn: txn_dep = self.last_key_txn[k] else: txn_dep = None txn_id = c.inc(k, 2, txn_dep) if txn_id is None: raise Exception( "Failed to inc key:{} value:{} by 2".format( k, self.localState[k])) self.transactions.append(txn_id) self.last_key_txn[k] = txn_id cnt += 1 if cnt % 10 == 0: p.step() for k in keys: c = self._get_client() self.localState[k] -= 1 txn_dep = self.last_key_txn[k] txn_id = c.dec(k, 1, txn_dep) if txn_id is None: raise Exception( "Failed to dec key:{} value:{} by 1".format( k, self.localState[k])) self.transactions.append(txn_id) self.last_key_txn[k] = txn_id cnt += 1 if cnt % 10 == 0: p.step() self._wait_for_transaction_commits()
def test_initial_connectivity_n_minus_1(self): try: self.vnm.validator_config['LedgerURL'] = "**none**" validator = self.vnm.launch_node(genesis=True) validators = [validator] with Progress("Launching validator network") as p: self.vnm.validator_config['LedgerURL'] = validator.url node_identifiers = [validator.Address] for i in range(1, 5): self.vnm.validator_config['InitialConnectivity'] = i v = self.vnm.launch_node(genesis=False, daemon=False) validators.append(v) node_identifiers.append(v.Address) p.step() self.vnm.wait_for_registration(validators, validator) validator_urls = self.vnm.urls() clients = [SawtoothClient(base_url=u) for u in validator_urls] integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(clients) self._verify_orderly_transactions(clients, node_identifiers) finally: self.vnm.shutdown() self.vnm.create_result_archive( 'TestOrderlyInitialConnectivity.tar.gz')
def expand_network(self, validators, count=1): """ expand existing network. validators: running validators against which to launch new nodes count: new validators to launch against each running validator validator: running validator against which to verify registration """ ledger_validator = validators[0] new_validators = [] with Progress("Extending validator network") as p: cfg = { 'LedgerURL': ledger_validator.url } for _ in validators: for _ in range(0, count): v = self.launch_node(overrides=cfg) new_validators.append(v) p.step() self.wait_for_registration(new_validators, ledger_validator, max_time=240) return new_validators
def _do_teardown(self): print 'destroying', str(self.__class__.__name__) if hasattr(self, '_node_ctrl') and self._node_ctrl is not None: # Shut down the network with Progress("terminating network") as p: for node_name in self._node_ctrl.get_node_names(): self._node_ctrl.stop(node_name) to = TimeOut(16) while len(self._node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in self._node_ctrl.get_node_names(): try: print "%s still 'up'; sending kill..." % node_name self._node_ctrl.kill(node_name) except Exception as e: print e.message # Clean temp dir print 'deleting temp directory' tmp_dir = hasattr(self, '_currency_home') tmp_dir = None if tmp_dir is False else self._currency_home if tmp_dir is not None and os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) # Restore environmental vars if hasattr(self, '_old_currency_home'): print 'restoring environmental variables' if self._old_currency_home is None: os.unsetenv('CURRENCYHOME') else: os.environ['CURRENCYHOME'] = self._old_currency_home
def wait_for_registration(self, validators, validator, max_time=None): """ Wait for newly launched validators to register. validators: list of validators on which to wait validator: running validator against which to verify registration """ max_time = 120 if max_time is None else max_time unregistered_count = len(validators) with Progress("{0} waiting for registration of {1} validators".format( validator.name, unregistered_count, )) as p: url = validator.url to = TimeOut(max_time) while unregistered_count > 0: if to(): raise ExitError( "{} extended validators failed to register " "within {}S.".format(unregistered_count, to.WaitTime)) p.step() time.sleep(1) unregistered_count = 0 for v in validators: if not v.is_registered(url): unregistered_count += 1 try: v.check_error() except ValidatorManagerException as vme: v.dump_log() v.dump_stderr() raise ExitError(str(vme)) return True
def setup(self, urls, numkeys): self.localState = {} self.transactions = [] self.last_key_txn = {} self.clients = [] self.state = IntegerKeyState(urls[0]) with Progress("Creating clients") as p: for u in urls: try: key = generate_private_key() self.clients.append(IntegerKeyClient(u, keystring=key)) p.step() except MessageException: logger.warn("Unable to connect to Url: %s ", u) if len(self.clients) == 0: return # add check for if a state already exists with Progress("Checking for pre-existing state") as p: self.state.fetch() for k, v in self.state.State.iteritems(): self.localState[k] = v p.step() keys = self.state.State.keys() with Progress("Populating initial key values") as p: txncount = 0 starttime = time.time() for n in range(1, numkeys + 1): n = str(n) if n not in keys: c = self._get_client() v = random.randint(5, 1000) self.localState[n] = v txnid = c.set(n, v) if txnid is None: raise Exception("Failed to set {} to {}".format(n, v)) self.transactions.append(txnid) txncount += 1 self.last_key_txn[n] = txnid p.step() print self.txnrate(starttime, txncount, "submitted") self._wait_for_transaction_commits() self.txnrate(starttime, txncount, "committed")
def launch_network(self, count=1): with Progress("Launching initial validator") as p: self.ValidatorConfig['LedgerURL'] = "**none**" self.ValidatorConfig['GenesisLedger'] = True if(self.blockChainArchive is not None): self.ValidatorConfig['Restore'] = True validator = self.launch_node() while not validator.is_registered(): if validator.has_error(): validator.dump_log() validator.dump_stderr() raise ExitError("Initial validator crashed.") p.step() time.sleep(1) with Progress("Launching validator network") as p: self.ValidatorConfig['LedgerURL'] = validator.Url self.ValidatorConfig['GenesisLedger'] = False self.ValidatorConfig['Restore'] = False for i in range(1, count): self.launch_node() p.step() with Progress("Waiting for validator registration") as p: unregCount = len(self.Validators) url = validator.Url to = TimeOut(120) while unregCount > 0: if to(): raise ExitError( "{} validators failed to register within {}S.".format( unregCount, to.WaitTime)) p.step() time.sleep(1) unregCount = 0 for v in self.Validators: if not v.is_registered(url): unregCount += 1 if v.has_error(): v.dump_log() v.dump_stderr() raise ExitError( "{} crashed during initialization.".format(v.Name))
def test_join_after_delay_start(self): delayed_validator = None validator_urls = [] try: self.vnm.launch_network(5) validator_urls = self.vnm.urls() delayed_validator = self.vnm.launch_node(delay=True) time.sleep(5) command_url = delayed_validator.url + '/command' request = urllib2.Request( url=command_url, headers={'Content-Type': 'application/json'}) response = urllib2.urlopen(request, data='{"action": "start"}') response.close() self.assertEqual(response.code, 200, "Successful post to delayed validator") validator_urls.append(delayed_validator.url) ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls ] with Progress("Waiting for registration of 1 validator") as p: url = validator_urls[0] to = TimeOut(60) while not delayed_validator.is_registered(url): if to(): raise ExitError( "{} delayed validator failed to register " "within {}S.".format(1, to.WaitTime)) p.step() time.sleep(1) try: delayed_validator.check_error() except ValidatorManagerException as vme: delayed_validator.dump_log() delayed_validator.dump_stderr() raise ExitError(str(vme)) integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) finally: self.vnm.shutdown() if delayed_validator is not None and \ validator_urls is not [] and \ delayed_validator.url not in validator_urls: delayed_validator.shutdown() self.vnm.create_result_archive("TestDelayedStart.tar.gz")
def launch_network(self, count=1, max_time=None, others_daemon=False): validators = [] with Progress("Launching initial validator") as p: cfg = { 'LedgerURL': "**none**", 'Restore': self.block_chain_archive, } validator = self.launch_node(overrides=cfg, genesis=True, daemon=False) validators.append(validator) probe_func = validator.is_registered if self.validator_config.get('LedgerType', '') == 'quorum': probe_func = validator.is_started while not probe_func(): try: validator.check_error() except ValidatorManagerException as vme: validator.dump_log() validator.dump_stderr() raise ExitError(str(vme)) p.step() time.sleep(1) if count > 1: with Progress("Launching validator network") as p: cfg = { 'LedgerURL': validator.url, 'Restore': self.block_chain_archive, } for _ in range(1, count): v = self.launch_node(overrides=cfg, genesis=False, daemon=others_daemon) validators.append(v) p.step() self.wait_for_registration(validators, validator, max_time=max_time) return validators
def validator_shutdown(self, idx, sig, timeout, force): ''' Dispose of validator subprocesses by index Args: idx (int): which validator (in self.hdls) sig (str): valid values: SIG{TERM,INT,KILL}. timeout (int): time to wait in seconds force (bool): whether to try SIGKILL if another method has not worked within timeout seconds. Returns: None ''' assert isinstance(self.hdls[idx], ValidatorManager) cfg = self.net_config.get_node_cfg(idx) v_name = cfg['NodeName'] v = self.hdls[idx] print('sending %s to %s' % (sig, v_name)) if v.is_running(): if sig == 'SIGTERM': v.shutdown(term=True) elif sig == 'SIGINT': v.shutdown() elif sig == 'SIGKILL': v.shutdown(force=True) else: raise Exception('unrecognized argument for sig: %s', sig) # Would be ideal to move the waiting here into threads in self.commit. # Then we could shut down several in parallel, and (besides archive # collection, which really should be moved to a provider) self.shutdown # would basically just be a numpy.zeros update (re-using this code)! to = TimeOut(timeout) success = False ini = time.time() with Progress("giving %s %ss to shutdown: " % (v_name, timeout)) as p: while success is False: if not v.is_running(): success = True elif to.is_timed_out(): break else: time.sleep(1) p.step() dur = time.time() - ini if success is False: fail_msg = "%s is still running %.2f seconds after %s" fail_msg = fail_msg % (v_name, dur, force) if force is False or sig == 'SIGKILL': raise ValidatorManagerException(fail_msg) else: timeout = max(4, timeout) print('{}; trying SIGKILL, timeout {}...'.format( fail_msg, timeout)) self.validator_shutdown(idx, 'SIGKILL', timeout, force) if success is True: print("%s shut down %.2f seconds after %s" % (v_name, dur, sig)) self.hdls[idx] = None
def shutdown(self, archive_name=None): vals = [v for v in self.hdls if v is not None] if len(vals) > 0: with Progress("Sending interrupt signal to validators: ") as p: for v in vals: if v.is_running(): v.shutdown() p.step() running_count = 0 to = TimeOut(5) with Progress("Giving validators time to shutdown: ") as p: while True: running_count = 0 for v in vals: if v.is_running(): running_count += 1 if to.is_timed_out() or running_count == 0: break else: time.sleep(1) p.step() if running_count != 0: with Progress("Killing {} intransigent validators: ".format( running_count)) as p: for v in vals: if v.is_running(): v.shutdown(True) p.step() if (archive_name is not None and self.data_dir is not None and os.path.exists(self.data_dir) and len(os.listdir(self.data_dir)) > 0): tar = tarfile.open('%s.tar.gz' % archive_name, "w|gz") base_name = self.get_archive_base_name(archive_name) for (dir_path, _, filenames) in walk(self.data_dir): for f in filenames: fp = os.path.join(dir_path, f) tar.add(fp, os.path.join(base_name, f)) tar.close()
def setup(self): self.state = mktplace_state.MarketPlaceState(self.urls[0]) with Progress("Creating participants") as p: for i in range(0, self.count): name = "actor-{}".format(i) keyfile = os.path.join(self.testDir, "{}.wif".format(name)) if os.path.exists(keyfile): key = read_key_file(keyfile) else: key = signing.encode_privkey(signing.generate_privkey(), 'wif') write_key_file(keyfile, key) url = self.urls[random.randint(0, len(self.urls) - 1)] a = MktActor(name, url, key) self.Actors.append(a) p.step() with Progress("Registering actors assets") as p: for a in self.Actors: # create assets a.register_asset(a.Name + "-asset") p.step() self.wait_for_transaction_commits() with Progress("Registering holdings") as p: for a in self.Actors: a.update() a.offers = [] for a2 in self.Actors: count = 0 if a is a2: # for each iteration we need 1 to pay with and 1 to # give count = 2 * self.count * self.iterations for ast in a2.assets.keys(): a.register_holding(ast, count) p.step() self.wait_for_transaction_commits()
def _poll_for_convergence(urls): to = TimeOut(256) convergent = False task_str = 'checking for minimal convergence on: {}'.format(urls) with Progress(task_str) as p: while convergent is False: try: convergent = is_convergent(urls, standard=2, tolerance=0) except MessageException: if to.is_timed_out(): raise CliException('timed out {}'.format(task_str)) else: p.step() time.sleep(4)
def _wait_for_transaction_commits(self): to = TimeOut(240) txnCnt = len(self.transactions) with Progress("Waiting for transactions to commit") as p: while not to() and txnCnt > 0: p.step() time.sleep(1) txnCnt = self._update_uncommitted_transactions() if txnCnt != 0: if len(self.transactions) != 0: print "Uncommitted transactions: ", self.transactions raise Exception("{} transactions failed to commit in {}s".format( txnCnt, to.WaitTime))
def setup(self, urls, numKeys): self.localState = {} self.transactions = [] self.clients = [] self.state = IntegerKeyState(urls[0]) with Progress("Creating clients") as p: for u in urls: key = generate_private_key() self.clients.append(IntegerKeyClient(u, keystring=key)) p.step() with Progress("Creating initial key values") as p: for n in range(1, numKeys + 1): n = str(n) c = self._get_client() v = random.randint(5, 1000) self.localState[n] = v txnid = c.set(n, v) if txnid is None: raise Exception("Failed to set {} to {}".format(n, v)) self.transactions.append(txnid) self._wait_for_transaction_commits()
def shutdown(self): if len(self.Validators) == 0: # no validators to shutdown return with Progress("Sending shutdown message to validators: ") as p: for v in self.Validators: if v.is_running(): v.post_shutdown() p.step() running_count = 0 to = TimeOut(10) with Progress("Giving validators time to shutdown: ") as p: while True: running_count = 0 for v in self.Validators: if v.is_running(): running_count = running_count + 1 if to.is_timed_out() or running_count == 0: break else: time.sleep(1) p.step() if running_count != 0: with Progress("Killing {} intransigent validators: ".format( running_count)) as p: for v in self.Validators: if v.is_running(): v.shutdown(True) p.step() # wait for windows to learn that the subprocess are dead. if os.name == "nt": time.sleep(5)
def probe_validator(self, validator, max_time=30): with Progress("probing status of {0}".format(validator.name)) as p: to = TimeOut(max_time) success = False while success is False: if to(): raise ExitError( "{} failed to initialize within {}S.".format( validator.name, to.WaitTime)) try: success = validator.is_started() except Exception as e: print(e.message) p.step() time.sleep(1)
def shutdown(self): if len(self.Validators) == 0: # no validators to shutdown return with Progress("Sending shutdown message to validators: ") as p: for v in self.Validators: if v.is_running(): v.post_shutdown() p.step() print "Giving validators time to shutdown" time.sleep(10) with Progress("Shutting down validators: ") as p: for v in self.Validators: if v.is_running(): v.shutdown(True) p.step() # wait for windows to learn that the subprocess are dead. if os.name == "nt": time.sleep(5)
def _poll_for_convergence(self, timeout=256, tolerance=2, standard=5): convergent = False with Progress('awaiting convergence') as p: to = TimeOut(timeout) while convergent is False: self.assertFalse(to.is_timed_out(), 'timed out awaiting convergence') p.step() time.sleep(4) try: convergent = is_convergent(self.urls, standard=standard, tolerance=tolerance) except MessageException: pass sit_rep(self.urls, verbosity=1) return convergent
def _wait_for_no_transaction_commits(self): # for the case where no transactions are expected to commit to = TimeOut(120) startingTxnCnt = len(self.transactions) remainingTxnCnt = len(self.transactions) with Progress("Waiting for no transactions to commit") as p: while not to() and remainingTxnCnt > 0: p.step() time.sleep(1) self._has_uncommitted_transactions() remainingTxnCnt = len(self.transactions) if startingTxnCnt != remainingTxnCnt: raise Exception( "{} unexpected transaction commits after {}s".format( startingTxnCnt - remainingTxnCnt, to.WaitTime))
def setup(self, urls): self.global_store = {} self.running_url_list = urls self.global_keys = [] self.transactions = [] self.lastKeyTxn = {} self.clients = [] self.state = IntegerKeyState(urls[0]) with Progress("Creating clients") as p: print "Creating clients" for u in self.running_url_list: try: key = generate_private_key() self.clients.append(IntegerKeyClient(u, keystring=key)) p.step() except MessageException: print "Unable to connect to Url: {}".format(u)
def wait_for_transaction_commits(self): to = TimeOut(120) txn_cnt = 1 with Progress("Waiting for transactions to commit") as p: while not to() and txn_cnt > 0: p.step() time.sleep(1) txn_cnt = 0 for a in self.Actors: txn_cnt += a.has_uncommitted_transactions() if txn_cnt != 0: for a in self.Actors: if len(a.transactions) != 0: print "Uncommitted transactions: ", a.Name, a.transactions raise Exception("{} transactions failed to commit in {}s".format( txn_cnt, to.WaitTime))
def _wait_for_no_transaction_commits(self): # for the case where no transactions are expected to commit to = TimeOut(120) starting_txn_count = len(self.transactions) remaining_txn_cnt = len(self.transactions) with Progress("Waiting for transactions to NOT commit") as p: while not to() and remaining_txn_cnt > 0: p.step() time.sleep(1) remaining_txn_cnt = self._update_uncommitted_transactions() if remaining_txn_cnt != starting_txn_count: committedtxncount = starting_txn_count - remaining_txn_cnt raise Exception("{} transactions with missing dependencies " "were committed in {}s".format( committedtxncount, to.WaitTime)) else: print "No transactions with missing dependencies " \ "were committed in {0}s".format(to.WaitTime)
def _do_teardown(self): print 'destroying', str(self.__class__.__name__) if hasattr(self, '_node_ctrl') and self._node_ctrl is not None: # Shut down the network with Progress("terminating network") as p: for node_name in self._node_ctrl.get_node_names(): self._node_ctrl.stop(node_name) to = TimeOut(16) while len(self._node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in self._node_ctrl.get_node_names(): try: print "%s still 'up'; sending kill..." % node_name self._node_ctrl.kill(node_name) except Exception as e: print e.message self._node_ctrl.clean()
def extend_genesis_util(self, ledger_type, pre_overrides, post_overrides): print top = None try: # Get configs and resources for a ValidatorManager compliant nodes top = get_default_sim_controller(2, ledger_type=ledger_type) # Set up validator-0 cfg = top.get_configuration(0) cfg.update(pre_overrides) top.set_configuration(0, cfg) config_file = top.write_configuration(0) # Test genesis tool print 'testing genesis util...' gblock_file = genesis_info_file_name(cfg['DataDirectory']) self.assertFalse(os.path.exists(gblock_file)) cli_args = 'admin %s-genesis --config %s' % (ledger_type, config_file) sawtooth_cli_intercept(cli_args) # Get genesis block id self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network print 'testing efficacy...' # ...apply validator-related overrides to validator-0 cfg = top.get_configuration(0) cfg.update(post_overrides) top.set_configuration(0, cfg) # ...launch entire network top.launch(probe_seconds=0, reg_seconds=0) # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print 'block_lists: %s' % blk_lists if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print while (is_convergent(top.urls(), tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print 'network converged on root: %s' % root finally: if top is not None: archive_name = 'Test%sGenesisResults' % ledger_type.upper() top.shutdown(archive_name=archive_name)
def main(): node_ctrl = None try: opts = configure(sys.argv[1:]) except Exception as e: print(str(e), file=sys.stderr) sys.exit(1) try: count = opts['count'] # log_config = NEED currency_home = opts['data_dir'] http_port = int(opts['http_port']) gossip_port = int(opts['port']) try: ledger_type = opts["validator_config"]["LedgerType"] except KeyError: # None defaults to poet1 ledger_type = None node_ctrl = WrappedNodeController(SubprocessNodeController(), data_dir=currency_home) nodes = [] for idx in range(count): node = NodeArguments("validator-{:0>3}".format(idx), http_port=http_port + idx, gossip_port=gossip_port + idx, ledger_type=ledger_type) nodes.append(node) currency_home = node_ctrl.get_data_dir() if opts['log_config_dict']: file_name = 'launcher_cli_global_log_config.js' full_name = '{}/etc/{}'.format(currency_home, file_name) with open(full_name, 'w') as f: f.write(json.dumps(opts['log_config_dict'], indent=4)) opts['validator_config']['LogConfigFile'] = full_name if opts['validator_config']: file_name = 'launcher_cli_global_validator_config.js' with open('{}/etc/{}'.format(currency_home, file_name), 'w') as f: f.write(json.dumps(opts['validator_config'], indent=4)) for nd in nodes: nd.config_files.append(file_name) # set up our urls (external interface) urls = ['http://localhost:%s' % x.http_port for x in nodes] # Make genesis block print('creating genesis block...') nodes[0].genesis = True node_ctrl.create_genesis_block(nodes[0]) # Launch network (node zero will trigger bootstrapping) batch_size = 8 print('staged-launching network (batch_size: {})...' .format(batch_size)) lower_bound = 0 while lower_bound < count: upper_bound = lower_bound + min(count - lower_bound, batch_size) for idx in range(lower_bound, upper_bound): print("launching {}".format(nodes[idx].node_name)) node_ctrl.start(nodes[idx]) _poll_for_convergence(urls[lower_bound:upper_bound]) lower_bound = upper_bound run_stats(urls[0]) except KeyboardInterrupt: print("\nExiting") except ExitError as e: # this is an expected error/exit, don't print stack trace - # the code raising this exception is expected to have printed the error # details print("\nFailed!\nExiting: {}".format(e)) except: traceback.print_exc() print("\nFailed!\nExiting: {}".format(sys.exc_info()[0])) finally: if node_ctrl is not None: # stop all nodes for node_name in node_ctrl.get_node_names(): node_ctrl.stop(node_name) with Progress("terminating network") as p: to = TimeOut(16) while len(node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in node_ctrl.get_node_names(): print("%s still 'up'; sending kill..." % node_name) node_ctrl.kill(node_name) node_ctrl.archive('launcher') node_ctrl.clean()