def shutdown(self): if len(self._validators) == 0: # no validators to shutdown return with Progress("Sending interrupt signal to validators: ") as p: for v in self._validators: if v.is_running(): v.shutdown() p.step() running_count = 0 to = TimeOut(1) with Progress("Giving validators time to shutdown: ") as p: while True: running_count = 0 for v in self._validators: if v.is_running(): running_count += 1 if to.is_timed_out() or running_count == 0: break else: time.sleep(1) p.step() if running_count != 0: with Progress("Killing {} intransigent validators: " .format(running_count)) as p: for v in self._validators: if v.is_running(): v.shutdown(True) p.step()
def _do_teardown(self): print 'destroying', str(self.__class__.__name__) if hasattr(self, '_node_ctrl') and self._node_ctrl is not None: # Shut down the network with Progress("terminating network") as p: for node_name in self._node_ctrl.get_node_names(): self._node_ctrl.stop(node_name) to = TimeOut(16) while len(self._node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in self._node_ctrl.get_node_names(): try: print "%s still 'up'; sending kill..." % node_name self._node_ctrl.kill(node_name) except Exception as e: print e.message # Clean temp dir print 'deleting temp directory' tmp_dir = hasattr(self, '_currency_home') tmp_dir = None if tmp_dir is False else self._currency_home if tmp_dir is not None and os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) # Restore environmental vars if hasattr(self, '_old_currency_home'): print 'restoring environmental variables' if self._old_currency_home is None: os.unsetenv('CURRENCYHOME') else: os.environ['CURRENCYHOME'] = self._old_currency_home
def validator_shutdown(self, idx, sig, timeout, force): ''' Dispose of validator subprocesses by index Args: idx (int): which validator (in self.hdls) sig (str): valid values: SIG{TERM,INT,KILL}. timeout (int): time to wait in seconds force (bool): whether to try SIGKILL if another method has not worked within timeout seconds. Returns: None ''' assert isinstance(self.hdls[idx], ValidatorManager) cfg = self.net_config.get_node_cfg(idx) v_name = cfg['NodeName'] v = self.hdls[idx] print('sending %s to %s' % (sig, v_name)) if v.is_running(): if sig == 'SIGTERM': v.shutdown(term=True) elif sig == 'SIGINT': v.shutdown() elif sig == 'SIGKILL': v.shutdown(force=True) else: raise Exception('unrecognized argument for sig: %s', sig) # Would be ideal to move the waiting here into threads in self.commit. # Then we could shut down several in parallel, and (besides archive # collection, which really should be moved to a provider) self.shutdown # would basically just be a numpy.zeros update (re-using this code)! to = TimeOut(timeout) success = False ini = time.time() with Progress("giving %s %ss to shutdown: " % (v_name, timeout)) as p: while success is False: if not v.is_running(): success = True elif to.is_timed_out(): break else: time.sleep(1) p.step() dur = time.time() - ini if success is False: fail_msg = "%s is still running %.2f seconds after %s" fail_msg = fail_msg % (v_name, dur, force) if force is False or sig == 'SIGKILL': raise ValidatorManagerException(fail_msg) else: timeout = max(4, timeout) print('{}; trying SIGKILL, timeout {}...' .format(fail_msg, timeout)) self.validator_shutdown(idx, 'SIGKILL', timeout, force) if success is True: print("%s shut down %.2f seconds after %s" % (v_name, dur, sig)) self.hdls[idx] = None
def validator_shutdown(self, idx, sig, timeout, force): ''' Dispose of validator subprocesses by index Args: idx (int): which validator (in self.hdls) sig (str): valid values: SIG{TERM,INT,KILL}. timeout (int): time to wait in seconds force (bool): whether to try SIGKILL if another method has not worked within timeout seconds. Returns: None ''' assert isinstance(self.hdls[idx], ValidatorManager) cfg = self.net_config.get_node_cfg(idx) v_name = cfg['NodeName'] v = self.hdls[idx] print('sending %s to %s' % (sig, v_name)) if v.is_running(): if sig == 'SIGTERM': v.shutdown(term=True) elif sig == 'SIGINT': v.shutdown() elif sig == 'SIGKILL': v.shutdown(force=True) else: raise Exception('unrecognized argument for sig: %s', sig) # Would be ideal to move the waiting here into threads in self.commit. # Then we could shut down several in parallel, and (besides archive # collection, which really should be moved to a provider) self.shutdown # would basically just be a numpy.zeros update (re-using this code)! to = TimeOut(timeout) success = False ini = time.time() with Progress("giving %s %ss to shutdown: " % (v_name, timeout)) as p: while success is False: if not v.is_running(): success = True elif to.is_timed_out(): break else: time.sleep(1) p.step() dur = time.time() - ini if success is False: fail_msg = "%s is still running %.2f seconds after %s" fail_msg = fail_msg % (v_name, dur, force) if force is False or sig == 'SIGKILL': raise ValidatorManagerException(fail_msg) else: timeout = max(4, timeout) print('{}; trying SIGKILL, timeout {}...'.format( fail_msg, timeout)) self.validator_shutdown(idx, 'SIGKILL', timeout, force) if success is True: print("%s shut down %.2f seconds after %s" % (v_name, dur, sig)) self.hdls[idx] = None
def _poll_for_convergence(urls): to = TimeOut(256) convergent = False task_str = 'checking for minimal convergence on: {}'.format(urls) with Progress(task_str) as p: while convergent is False: try: convergent = is_convergent(urls, standard=2, tolerance=0) except MessageException: if to.is_timed_out(): raise CliException('timed out {}'.format(task_str)) else: p.step() time.sleep(4)
def _poll_for_convergence(self, timeout=256, tolerance=2, standard=5): convergent = False with Progress('awaiting convergence') as p: to = TimeOut(timeout) while convergent is False: self.assertFalse(to.is_timed_out(), 'timed out awaiting convergence') p.step() time.sleep(4) try: convergent = is_convergent(self.urls, standard=standard, tolerance=tolerance) except MessageException: pass sit_rep(self.urls, verbosity=1) return convergent
def wait_for_registration(self, validators, validator, max_time=None): """ Wait for newly launched validators to register. validators: list of validators on which to wait validator: running validator against which to verify registration """ max_time = 120 if max_time is None else max_time unregistered_count = len(validators) with Progress("{0} waiting for registration of {1} validators".format( validator.name, unregistered_count, )) as p: url = validator.url to = TimeOut(max_time) while unregistered_count > 0: if to(): raise ExitError( "{} extended validators failed to register " "within {}S.".format(unregistered_count, to.WaitTime)) p.step() time.sleep(1) unregistered_count = 0 for v in validators: if not v.is_registered(url): unregistered_count += 1 try: v.check_error() except ValidatorManagerException as vme: v.dump_log() v.dump_stderr() raise ExitError(str(vme)) return True
def test_join_after_delay_start(self): delayed_validator = None validator_urls = [] try: self.vnm.launch_network(5) validator_urls = self.vnm.urls() delayed_validator = self.vnm.launch_node(delay=True) time.sleep(5) command_url = delayed_validator.url + '/command' request = urllib2.Request( url=command_url, headers={'Content-Type': 'application/json'}) response = urllib2.urlopen(request, data='{"action": "start"}') response.close() self.assertEqual(response.code, 200, "Successful post to delayed validator") validator_urls.append(delayed_validator.url) ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls ] with Progress("Waiting for registration of 1 validator") as p: url = validator_urls[0] to = TimeOut(60) while not delayed_validator.is_registered(url): if to(): raise ExitError( "{} delayed validator failed to register " "within {}S.".format(1, to.WaitTime)) p.step() time.sleep(1) try: delayed_validator.check_error() except ValidatorManagerException as vme: delayed_validator.dump_log() delayed_validator.dump_stderr() raise ExitError(str(vme)) integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) finally: self.vnm.shutdown() if delayed_validator is not None and \ validator_urls is not [] and \ delayed_validator.url not in validator_urls: delayed_validator.shutdown() self.vnm.create_result_archive("TestDelayedStart.tar.gz")
def shutdown(self, archive_name=None): vals = [v for v in self.hdls if v is not None] if len(vals) > 0: with Progress("Sending interrupt signal to validators: ") as p: for v in vals: if v.is_running(): v.shutdown() p.step() running_count = 0 to = TimeOut(5) with Progress("Giving validators time to shutdown: ") as p: while True: running_count = 0 for v in vals: if v.is_running(): running_count += 1 if to.is_timed_out() or running_count == 0: break else: time.sleep(1) p.step() if running_count != 0: with Progress("Killing {} intransigent validators: " .format(running_count)) as p: for v in vals: if v.is_running(): v.shutdown(True) p.step() if (archive_name is not None and self.data_dir is not None and os.path.exists(self.data_dir) and len(os.listdir(self.data_dir)) > 0): tar = tarfile.open('%s.tar.gz' % archive_name, "w|gz") base_name = self.get_archive_base_name(archive_name) for (dir_path, _, filenames) in walk(self.data_dir): for f in filenames: fp = os.path.join(dir_path, f) tar.add(fp, os.path.join(base_name, f)) tar.close()
def shutdown(self, archive_name=None): vals = [v for v in self.hdls if v is not None] if len(vals) > 0: with Progress("Sending interrupt signal to validators: ") as p: for v in vals: if v.is_running(): v.shutdown() p.step() running_count = 0 to = TimeOut(5) with Progress("Giving validators time to shutdown: ") as p: while True: running_count = 0 for v in vals: if v.is_running(): running_count += 1 if to.is_timed_out() or running_count == 0: break else: time.sleep(1) p.step() if running_count != 0: with Progress("Killing {} intransigent validators: ".format( running_count)) as p: for v in vals: if v.is_running(): v.shutdown(True) p.step() if (archive_name is not None and self.data_dir is not None and os.path.exists(self.data_dir) and len(os.listdir(self.data_dir)) > 0): tar = tarfile.open('%s.tar.gz' % archive_name, "w|gz") base_name = self.get_archive_base_name(archive_name) for (dir_path, _, filenames) in walk(self.data_dir): for f in filenames: fp = os.path.join(dir_path, f) tar.add(fp, os.path.join(base_name, f)) tar.close()
def _do_teardown(self): print 'destroying', str(self.__class__.__name__) if hasattr(self, '_node_ctrl') and self._node_ctrl is not None: # Shut down the network with Progress("terminating network") as p: for node_name in self._node_ctrl.get_node_names(): self._node_ctrl.stop(node_name) to = TimeOut(16) while len(self._node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in self._node_ctrl.get_node_names(): try: print "%s still 'up'; sending kill..." % node_name self._node_ctrl.kill(node_name) except Exception as e: print e.message self._node_ctrl.clean()
def _do_teardown(self): print('destroying', str(self.__class__.__name__)) if hasattr(self, '_node_ctrl') and self._node_ctrl is not None: # Shut down the network with Progress("terminating network") as p: for node_name in self._node_ctrl.get_node_names(): self._node_ctrl.stop(node_name) to = TimeOut(16) while len(self._node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in self._node_ctrl.get_node_names(): try: print("%s still 'up'; sending kill..." % node_name) self._node_ctrl.kill(node_name) except Exception as e: print(e.message) self._node_ctrl.archive(self.__class__.__name__) self._node_ctrl.clean()
def _wait_for_transaction_commits(self): to = TimeOut(240) txnCnt = len(self.transactions) with Progress("Waiting for transactions to commit") as p: while not to() and txnCnt > 0: p.step() time.sleep(1) txnCnt = self._update_uncommitted_transactions() if txnCnt != 0: if len(self.transactions) != 0: print "Uncommitted transactions: ", self.transactions raise Exception("{} transactions failed to commit in {}s".format( txnCnt, to.WaitTime))
def shutdown(self): if len(self.Validators) == 0: # no validators to shutdown return with Progress("Sending shutdown message to validators: ") as p: for v in self.Validators: if v.is_running(): v.post_shutdown() p.step() running_count = 0 to = TimeOut(10) with Progress("Giving validators time to shutdown: ") as p: while True: running_count = 0 for v in self.Validators: if v.is_running(): running_count = running_count + 1 if to.is_timed_out() or running_count == 0: break else: time.sleep(1) p.step() if running_count != 0: with Progress("Killing {} intransigent validators: ".format( running_count)) as p: for v in self.Validators: if v.is_running(): v.shutdown(True) p.step() # wait for windows to learn that the subprocess are dead. if os.name == "nt": time.sleep(5)
def shutdown(self): if len(self.Validators) == 0: # no validators to shutdown return with Progress("Sending shutdown message to validators: ") as p: for v in self.Validators: if v.is_running(): v.post_shutdown() p.step() running_count = 0 to = TimeOut(10) with Progress("Giving validators time to shutdown: ") as p: while True: running_count = 0 for v in self.Validators: if v.is_running(): running_count = running_count + 1 if to.is_timed_out() or running_count == 0: break else: time.sleep(1) p.step() if running_count != 0: with Progress("Killing {} intransigent validators: " .format(running_count)) as p: for v in self.Validators: if v.is_running(): v.shutdown(True) p.step() # wait for windows to learn that the subprocess are dead. if os.name == "nt": time.sleep(5)
def probe_validator(self, validator, max_time=30): with Progress("probing status of {0}".format(validator.name)) as p: to = TimeOut(max_time) success = False while success is False: if to(): raise ExitError( "{} failed to initialize within {}S.".format( validator.name, to.WaitTime)) try: success = validator.is_started() except Exception as e: print(e.message) p.step() time.sleep(1)
def extend_genesis_util(self, overrides): print vnm = None try: vnm = get_default_vnm(2, overrides=overrides) # Test genesis util cfg = vnm.get_configuration(0) ledger_type = cfg['LedgerType'] gblock_file = genesis_info_file_name(cfg['DataDirectory']) self.assertFalse(os.path.exists(gblock_file)) vnm.do_genesis() self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network vnm.launch() # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print 'block_lists: %s' % blk_lists if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print while (is_convergent(vnm.urls(), tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print 'network converged on root: %s' % root finally: if vnm is not None: archive_name = 'Test%sGenesisResults' % ledger_type.upper() vnm.shutdown(archive_name=archive_name)
def _wait_for_no_transaction_commits(self): # for the case where no transactions are expected to commit to = TimeOut(120) startingTxnCnt = len(self.transactions) remainingTxnCnt = len(self.transactions) with Progress("Waiting for no transactions to commit") as p: while not to() and remainingTxnCnt > 0: p.step() time.sleep(1) self._has_uncommitted_transactions() remainingTxnCnt = len(self.transactions) if startingTxnCnt != remainingTxnCnt: raise Exception( "{} unexpected transaction commits after {}s".format( startingTxnCnt - remainingTxnCnt, to.WaitTime))
def launch_network(self, count=1): with Progress("Launching initial validator") as p: self.ValidatorConfig['LedgerURL'] = "**none**" self.ValidatorConfig['GenesisLedger'] = True if(self.blockChainArchive is not None): self.ValidatorConfig['Restore'] = True validator = self.launch_node() while not validator.is_registered(): if validator.has_error(): validator.dump_log() validator.dump_stderr() raise ExitError("Initial validator crashed.") p.step() time.sleep(1) with Progress("Launching validator network") as p: self.ValidatorConfig['LedgerURL'] = validator.Url self.ValidatorConfig['GenesisLedger'] = False self.ValidatorConfig['Restore'] = False for i in range(1, count): self.launch_node() p.step() with Progress("Waiting for validator registration") as p: unregCount = len(self.Validators) url = validator.Url to = TimeOut(120) while unregCount > 0: if to(): raise ExitError( "{} validators failed to register within {}S.".format( unregCount, to.WaitTime)) p.step() time.sleep(1) unregCount = 0 for v in self.Validators: if not v.is_registered(url): unregCount += 1 if v.has_error(): v.dump_log() v.dump_stderr() raise ExitError( "{} crashed during initialization.".format(v.Name))
def wait_for_transaction_commits(self): to = TimeOut(120) txn_cnt = 1 with Progress("Waiting for transactions to commit") as p: while not to() and txn_cnt > 0: p.step() time.sleep(1) txn_cnt = 0 for a in self.Actors: txn_cnt += a.has_uncommitted_transactions() if txn_cnt != 0: for a in self.Actors: if len(a.transactions) != 0: print "Uncommitted transactions: ", a.Name, a.transactions raise Exception("{} transactions failed to commit in {}s".format( txn_cnt, to.WaitTime))
def _wait_for_no_transaction_commits(self): # for the case where no transactions are expected to commit to = TimeOut(120) starting_txn_count = len(self.transactions) remaining_txn_cnt = len(self.transactions) with Progress("Waiting for transactions to NOT commit") as p: while not to() and remaining_txn_cnt > 0: p.step() time.sleep(1) remaining_txn_cnt = self._update_uncommitted_transactions() if remaining_txn_cnt != starting_txn_count: committedtxncount = starting_txn_count - remaining_txn_cnt raise Exception("{} transactions with missing dependencies " "were committed in {}s".format( committedtxncount, to.WaitTime)) else: print "No transactions with missing dependencies " \ "were committed in {0}s".format(to.WaitTime)
def test_validator_shutdown_sigkill_restart_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 timeout = 5 print("Testing transaction load.") test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(timeout) p.step() convergent = is_convergent(self.urls, tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print("all validators are on the same chain") report_before_shutdown = sit_rep(self.urls, verbosity=1) validator_report = report_before_shutdown[4] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print("validator_blocks", validator_blocks_shutdown) print("shutdown validator 4 w/ SIGKILL") node_names = self.node_controller.get_node_names() node_names.sort() self.node_controller.kill(node_names[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 4: if to.is_timed_out(): self.fail("Timed Out") print('check state of validators:') sit_rep(self.urls[:-1], verbosity=2) print("sending more txns after SIGKILL") urls = self.urls[:-1] self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print("turn off entire validator network") for node in node_names: self.node_controller.stop(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 0: if to.is_timed_out(): self.fail("Timed Out") print("relaunch validator 4") self.node_controller.start(self.nodes[4]) report_after_relaunch = None to = TimeOut(120) while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[4]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print("validator_blocks_relaunch", validator_blocks_relaunch) if len(validator_blocks_relaunch) == \ len(validator_blocks_shutdown): if validator_blocks_shutdown == validator_blocks_relaunch: print("relaunched validator restored from local db") else: for i in range(0, len(validator_blocks_shutdown)): self.assertEqual( validator_blocks_relaunch[i], validator_blocks_shutdown[i], "relaunched validator didn't" " restore fr local db") break print("relaunched validator restored from local database") finally: print("restart validators ") for node in self.nodes: self.node_controller.start(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 5: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep(self.urls, verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) print("No validators")
def test_validator_shutdown_sigkill_restart_ext(self): print try: print "launching a validator network of 5" vnm = get_default_vnm(5) vnm.do_genesis() vnm.launch() keys = 10 rounds = 2 txn_intv = 0 timeout = 5 print "Testing transaction load." test = IntKeyLoadTest() urls = vnm.urls() self.assertEqual(5, len(urls)) test.setup(vnm.urls(), keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(timeout) p.step() convergent = is_convergent(vnm.urls(), tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print "all validators are on the same chain" sit_rep(vnm.urls(), verbosity=1) report_before_shutdown = sit_rep(vnm.urls(), verbosity=1) validator_report = report_before_shutdown[4] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print "validator_blocks", validator_blocks_shutdown print "shutdown validator 4 w/ SIGKILL" vnm.deactivate_node(4, sig='SIGKILL', timeout=8) print 'check state of validators:' sit_rep(vnm.urls(), verbosity=2) print "sending more txns after SIGKILL" urls = vnm.urls() self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print "turn off entire validator network" for i in range(0, 4): vnm.deactivate_node(i, sig='SIGINT', timeout=8, force=False) print 'check state of validators after graceful shutdown:' sit_rep(vnm.urls(), verbosity=2) # set InitialConnectivity of individual # node to zero before relaunching cfg = vnm.get_configuration(4) cfg['InitialConnectivity'] = 0 vnm.set_configuration(4, cfg) print "relaunch validator 4" vnm.activate_node(4) report_after_relaunch = sit_rep(vnm.urls(), verbosity=1) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print "validator_blocks_relaunch", validator_blocks_relaunch if len(validator_blocks_relaunch) == \ len(validator_blocks_shutdown): if validator_blocks_shutdown == validator_blocks_relaunch: print "relaunched validator restored from local db" else: for i in range(0, len(validator_blocks_shutdown)): self.assertEqual(validator_blocks_relaunch[i], validator_blocks_shutdown[i], "relaunched validator didn't" " restore fr local db") break print "relaunched validator restored from local database" finally: if vnm is not None: # shutting down validator network vnm.shutdown(archive_name='TestValidatorShutdownRestore')
def extend_genesis_util(self, ledger_type, pre_overrides, post_overrides): print top = None try: # Get configs and resources for a ValidatorManager compliant nodes top = get_default_sim_controller(2, ledger_type=ledger_type) # Set up validator-0 cfg = top.get_configuration(0) cfg.update(pre_overrides) top.set_configuration(0, cfg) config_file = top.write_configuration(0) # Test genesis tool print 'testing genesis util...' gblock_file = genesis_info_file_name(cfg['DataDirectory']) self.assertFalse(os.path.exists(gblock_file)) cli_args = 'admin %s-genesis --config %s' % (ledger_type, config_file) sawtooth_cli_intercept(cli_args) # Get genesis block id self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network print 'testing efficacy...' # ...apply validator-related overrides to validator-0 cfg = top.get_configuration(0) cfg.update(post_overrides) top.set_configuration(0, cfg) # ...launch entire network top.launch(probe_seconds=0, reg_seconds=0) # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print 'block_lists: %s' % blk_lists if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print while (is_convergent(top.urls(), tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print 'network converged on root: %s' % root finally: if top is not None: archive_name = 'Test%sGenesisResults' % ledger_type.upper() top.shutdown(archive_name=archive_name)
def extend_genesis_util(self, overrides): print() vnm = None try: self._node_ctrl = None print('creating', str(self.__class__.__name__)) # set up our nodes (suite-internal interface) self._node_ctrl = WrappedNodeController( SubprocessLegacyNodeController()) cfg = overrides temp_dir = self._node_ctrl.get_data_dir() file_name = os.path.join(temp_dir, "config.js") with open(file_name, 'w') as config: config.write(json.dumps(cfg)) data_dir = os.path.join(temp_dir, "data") gblock_file = genesis_info_file_name(data_dir) self._nodes = [ NodeArguments('v%s' % i, 8800 + i, 9000 + i, config_files=[file_name], ledger_type=overrides["LedgerType"]) for i in range(2)] # set up our urls (external interface) self.urls = [ 'http://localhost:%s' % x.http_port for x in self._nodes] # Make genesis block print('creating genesis block...') self.assertFalse(os.path.exists(gblock_file)) self._nodes[0].genesis = True self._node_ctrl.create_genesis_block(self._nodes[0]) # Test genesis util self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network # Launch network (node zero will trigger bootstrapping) print('launching network...') for x in self._nodes: self._node_ctrl.start(x) # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print() while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print('block_lists: %s' % blk_lists) if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print() while (is_convergent(self.urls, tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print('network converged on root: %s' % root) finally: print('destroying', str(self.__class__.__name__)) if hasattr(self, '_node_ctrl') and self._node_ctrl is not None: # Shut down the network with Progress("terminating network") as p: for node_name in self._node_ctrl.get_node_names(): self._node_ctrl.stop(node_name) to = TimeOut(16) while len(self._node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in self._node_ctrl.get_node_names(): try: print("%s still 'up'; sending kill..." % node_name) self._node_ctrl.kill(node_name) except Exception as e: print(e.message) self._node_ctrl.archive(self.__class__.__name__) self._node_ctrl.clean()
def test_validator_shutdown_sigkill_restart_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 timeout = 5 print "Testing transaction load." test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(timeout) p.step() convergent = is_convergent(self.urls, tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print "all validators are on the same chain" report_before_shutdown = sit_rep(self.urls, verbosity=1) validator_report = report_before_shutdown[4] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print "validator_blocks", validator_blocks_shutdown print "shutdown validator 4 w/ SIGKILL" node_names = self.node_controller.get_node_names() node_names.sort() self.node_controller.kill(node_names[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 4: if to.is_timed_out(): self.fail("Timed Out") print 'check state of validators:' sit_rep(self.urls[:-1], verbosity=2) print "sending more txns after SIGKILL" urls = self.urls[:-1] self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print "turn off entire validator network" for node in node_names: self.node_controller.stop(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 0: if to.is_timed_out(): self.fail("Timed Out") print "relaunch validator 4" self.node_controller.start(self.nodes[4]) report_after_relaunch = None to = TimeOut(120) while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[4]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print "validator_blocks_relaunch", validator_blocks_relaunch if len(validator_blocks_relaunch) == \ len(validator_blocks_shutdown): if validator_blocks_shutdown == validator_blocks_relaunch: print "relaunched validator restored from local db" else: for i in range(0, len(validator_blocks_shutdown)): self.assertEqual(validator_blocks_relaunch[i], validator_blocks_shutdown[i], "relaunched validator didn't" " restore fr local db") break print "relaunched validator restored from local database" finally: print "restart validators " for node in self.nodes: self.node_controller.start(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 5: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep(self.urls, verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) print "No validators"
def test_validator_shutdown_restart_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 print("Testing transaction load.") test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() print("test validator shutdown w/ SIGTERM") node_names = self.node_controller.get_node_names() node_names.sort() self.node_controller.stop(node_names[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 4: if to.is_timed_out(): self.fail("Timed Out") print('check state of validators:') sit_rep(self.urls[:-1], verbosity=2) print("sending more txns after SIGTERM") urls = self.urls[:-1] self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print(("relaunching removed_validator", 4)) self.node_controller.start(self.nodes[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 1: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[4]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) print('check state of validators:') sit_rep(self.urls, verbosity=2) if is_convergent(self.urls, tolerance=2, standard=5) is True: print("all validators are on the same chain") else: print("all validators are not on the same chain") print("sending more txns after relaunching validator 4") urls = self.urls self.assertEqual(5, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() finally: print("No validators")
def main(): node_ctrl = None try: opts = configure(sys.argv[1:]) except Exception as e: print(str(e), file=sys.stderr) sys.exit(1) try: count = opts['count'] # log_config = NEED currency_home = opts['data_dir'] http_port = int(opts['http_port']) gossip_port = int(opts['port']) try: ledger_type = opts["validator_config"]["LedgerType"] except KeyError: # None defaults to poet1 ledger_type = None node_ctrl = WrappedNodeController(SubprocessNodeController(), data_dir=currency_home) nodes = [] for idx in range(count): node = NodeArguments("validator-{:0>3}".format(idx), http_port=http_port + idx, gossip_port=gossip_port + idx, ledger_type=ledger_type) nodes.append(node) currency_home = node_ctrl.get_data_dir() if opts['log_config_dict']: file_name = 'launcher_cli_global_log_config.js' full_name = '{}/etc/{}'.format(currency_home, file_name) with open(full_name, 'w') as f: f.write(json.dumps(opts['log_config_dict'], indent=4)) opts['validator_config']['LogConfigFile'] = full_name if opts['validator_config']: file_name = 'launcher_cli_global_validator_config.js' with open('{}/etc/{}'.format(currency_home, file_name), 'w') as f: f.write(json.dumps(opts['validator_config'], indent=4)) for nd in nodes: nd.config_files.append(file_name) # set up our urls (external interface) urls = ['http://localhost:%s' % x.http_port for x in nodes] # Make genesis block print('creating genesis block...') nodes[0].genesis = True node_ctrl.create_genesis_block(nodes[0]) # Launch network (node zero will trigger bootstrapping) batch_size = 8 print('staged-launching network (batch_size: {})...' .format(batch_size)) lower_bound = 0 while lower_bound < count: upper_bound = lower_bound + min(count - lower_bound, batch_size) for idx in range(lower_bound, upper_bound): print("launching {}".format(nodes[idx].node_name)) node_ctrl.start(nodes[idx]) _poll_for_convergence(urls[lower_bound:upper_bound]) lower_bound = upper_bound run_stats(urls[0]) except KeyboardInterrupt: print("\nExiting") except ExitError as e: # this is an expected error/exit, don't print stack trace - # the code raising this exception is expected to have printed the error # details print("\nFailed!\nExiting: {}".format(e)) except: traceback.print_exc() print("\nFailed!\nExiting: {}".format(sys.exc_info()[0])) finally: if node_ctrl is not None: # stop all nodes for node_name in node_ctrl.get_node_names(): node_ctrl.stop(node_name) with Progress("terminating network") as p: to = TimeOut(16) while len(node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in node_ctrl.get_node_names(): print("%s still 'up'; sending kill..." % node_name) node_ctrl.kill(node_name) node_ctrl.archive('launcher') node_ctrl.clean()
def run(self): # pylint: disable=too-many-nested-blocks try: rem = 0 for a in self.Actors: a.iteration = 0 rem += 1 self.state.fetch() to = TimeOut(180) while rem != 0 and not to(): for a in self.Actors: a.update() if (a.iteration < self.iterations and not a.has_uncommitted_transactions()): a.iteration += 1 for a2 in self.Actors: if a is not a2: for ast in a.assets.keys( ): # my assets (paying with) for ast2 in a2.assets.keys( ): # Their assets (purchasing) txn_id = a.register_exchange_offer( ast2, 1, ast, 1) a2.offers.append(txn_id) print "{} Offering {} for {} txn: " \ "{}".format(a.Name, ast2, ast, txn_id) for a in self.Actors: # Find any exchange offers for one of my assets. if not a.has_uncommitted_transactions(): for ast, astId in a.assets.iteritems(): by_type = mktplace_state.Filters.matchtype( 'Holding') by_asset = mktplace_state.Filters.matchvalue( 'asset', astId) holding_ids = self.state.lambdafilter( by_type, by_asset) filters = [ mktplace_state.Filters.offers(), mktplace_state.Filters.references( 'input', holding_ids) ] offerids = a.state.lambdafilter(*filters) for o in offerids: if o in a.state.State and \ o in a.offers: txn = a.exchange(o) a.offers.remove(o) print "{} accepting offer: {} with txn: " \ "{}".format(a.Name, o, txn) self.state.fetch() for a in self.Actors: a.update() rem = 0 transactions = [] for a in self.Actors: if a.iteration != self.iterations: rem += 1 transactions += a.transactions filters = [mktplace_state.Filters.offers()] offer_ids = self.state.lambdafilter(*filters) print "Agents remaining: {}, offers remaining: {}, " \ "unvalidated transactions: {}" \ .format(rem, len(offer_ids), transactions) rem += len(offer_ids) + len(transactions) if rem: time.sleep(1) if to(): for a in self.Actors: if len(a.transactions) != 0: print "Uncommitted transactions: ", a.Name, \ a.transactions raise Exception( "Failed to create all exchangeoffers and accept all " "exchanges in {}s ".format(to.WaitTime)) except Exception as e: print "Exception: ", e raise self.wait_for_transaction_commits()
def test_validator_shutdown_restart_restore_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 timeout = 20 print("Testing transaction load.") test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(5) p.step() convergent = is_convergent(self.urls, tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print("all validators are on the same chain") sit_rep(self.urls, verbosity=1) report_before_shutdown = sit_rep(self.urls, verbosity=1) validator_report = report_before_shutdown[2] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print("validator_blocks", validator_blocks_shutdown) print("turn off entire validator network") nodes_names = self.node_controller.get_node_names() for node in nodes_names: self.node_controller.stop(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 0: if to.is_timed_out(): self.fail("Timed Out") print("relaunch validator 0") self.node_controller.start(self.nodes[0]) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 1: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[0]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) report_after_relaunch = sit_rep([self.urls[0]], verbosity=1) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print("validator_blocks_relaunch", validator_blocks_relaunch) # the length of post-shutdown validator blocks might be bigger # than the length of pre-shutdown validator blocks for i in range(0, len(validator_blocks_shutdown)): self.assertEqual(validator_blocks_relaunch[i], validator_blocks_shutdown[i], "mismatch in post-shutdown validator blocks. " "Validator didn't restore fr local db") break print("relaunched validator restored from local database") finally: print("restart validators ") for node in self.nodes: self.node_controller.start(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 5: pass report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep(self.urls, verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4)
def test_validator_shutdown_restart_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 print "Testing transaction load." test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() print "test validator shutdown w/ SIGTERM" node_names = self.node_controller.get_node_names() node_names.sort() self.node_controller.stop(node_names[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 4: if to.is_timed_out(): self.fail("Timed Out") print 'check state of validators:' sit_rep(self.urls[:-1], verbosity=2) print "sending more txns after SIGTERM" urls = self.urls[:-1] self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print ("relaunching removed_validator", 4) self.node_controller.start(self.nodes[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 1: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[4]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) print 'check state of validators:' sit_rep(self.urls, verbosity=2) if is_convergent(self.urls, tolerance=2, standard=5) is True: print "all validators are on the same chain" else: print "all validators are not on the same chain" print "sending more txns after relaunching validator 4" urls = self.urls self.assertEqual(5, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() finally: print "No validators"
def test_validator_shutdown_restart_restore_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 timeout = 20 print "Testing transaction load." test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(5) p.step() convergent = is_convergent(self.urls, tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print "all validators are on the same chain" sit_rep(self.urls, verbosity=1) report_before_shutdown = sit_rep(self.urls, verbosity=1) validator_report = report_before_shutdown[2] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print "validator_blocks", validator_blocks_shutdown print "turn off entire validator network" nodes_names = self.node_controller.get_node_names() for node in nodes_names: self.node_controller.stop(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 0: if to.is_timed_out(): self.fail("Timed Out") print "relaunch validator 0" self.node_controller.start(self.nodes[0]) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 1: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[0]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) report_after_relaunch = sit_rep([self.urls[0]], verbosity=1) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print "validator_blocks_relaunch", validator_blocks_relaunch # the length of post-shutdown validator blocks might be bigger # than the length of pre-shutdown validator blocks for i in range(0, len(validator_blocks_shutdown)): self.assertEqual(validator_blocks_relaunch[i], validator_blocks_shutdown[i], "mismatch in post-shutdown validator blocks. " "Validator didn't restore fr local db") break print "relaunched validator restored from local database" finally: print "restart validators " for node in self.nodes: self.node_controller.start(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 5: pass report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep(self.urls, verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4)
def test_genesis_util(self): print old_home = os.getenv('CURRENCYHOME') tmp_home = tempfile.mkdtemp() vcc = None try: # Set up env and config v_file = find_txn_validator() os.environ['CURRENCYHOME'] = tmp_home cfg = get_validator_configuration([], {}) # ...rewire for ValidatorManager compatibility cfg['KeyDirectory'] = tmp_home cfg['DataDirectory'] = tmp_home cfg['LogDirectory'] = tmp_home # En route, test keygen client via main key_name = cfg['NodeName'] key_dir = cfg['KeyDirectory'] cmd = 'keygen %s --key-dir %s' % (key_name, key_dir) entry_point(args=cmd.split(), with_loggers=False) base_name = key_dir + os.path.sep + key_name self.assertTrue(os.path.exists('%s.wif' % base_name)) self.assertTrue(os.path.exists('%s.addr' % base_name)) cfg['KeyFile'] = '%s.wif' % base_name # Test admin poet0-genesis tool fname = get_genesis_block_id_file_name(cfg['DataDirectory']) self.assertFalse(os.path.exists(fname)) config_file = tmp_home + os.path.sep + 'cfg.json' with open(config_file, 'w') as f: f.write(json.dumps(cfg, indent=4) + '\n') cmd = 'admin poet0-genesis --config %s' % config_file entry_point(args=cmd.split(), with_loggers=False) self.assertTrue(os.path.exists(fname)) dat = None with open(fname, 'r') as f: dat = json.load(f) self.assertTrue('GenesisId' in dat.keys()) tgt_block = dat['GenesisId'] # Verify genesis tool (also tests blockchain restoration) # ...initial connectivity must be zero for the initial validator cfg['InitialConnectivity'] = 0 # ...launch validator net_cfg = NetworkConfig.from_config_list([cfg]) vcc = ValidatorCollectionController(net_cfg, data_dir=tmp_home, txnvalidator=v_file) vcc.activate(0, probe_seconds=120) # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'TEST ROOT RESTORATION (expect %s)' % tgt_block with Progress(prog_str) as p: print while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print 'block_lists: %s' % blk_lists if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(1) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(tgt_block, root) finally: # Shut down validator if vcc is not None: vcc.shutdown() # Restore environmental vars if old_home is None: os.unsetenv('CURRENCYHOME') else: os.environ['CURRENCYHOME'] = old_home # Delete temp dir if os.path.exists(tmp_home): shutil.rmtree(tmp_home)
def test_validator_shutdown_restart_restore_ext(self): print try: print "launching a validator network of 5" vnm = get_default_vnm(5) vnm.do_genesis() vnm.launch() keys = 10 rounds = 2 txn_intv = 0 timeout = 5 print "Testing transaction load." test = IntKeyLoadTest() urls = vnm.urls() self.assertEqual(5, len(urls)) test.setup(vnm.urls(), keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(timeout) p.step() convergent = is_convergent(vnm.urls(), tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print "all validators are on the same chain" sit_rep(vnm.urls(), verbosity=1) report_before_shutdown = sit_rep(vnm.urls(), verbosity=1) validator_report = report_before_shutdown[2] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print "validator_blocks", validator_blocks_shutdown print "turn off entire validator network" vnm.update(node_mat=numpy.zeros(shape=(5, 5)), timeout=8) # set InitialConnectivity of individual # node to zero before relaunching cfg = vnm.get_configuration(2) cfg['InitialConnectivity'] = 0 vnm.set_configuration(2, cfg) print "relaunch validator 2" vnm.activate_node(2) report_after_relaunch = sit_rep(vnm.urls(), verbosity=1) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print "validator_blocks_relaunch", validator_blocks_relaunch # the length of post-shutdown validator blocks might be bigger # than the length of pre-shutdown validator blocks for i in range(0, len(validator_blocks_shutdown)): self.assertEqual( validator_blocks_relaunch[i], validator_blocks_shutdown[i], "mismatch in post-shutdown validator blocks. " "Validator didn't restore fr local db") break print "relaunched validator restored from local database" finally: if vnm is not None: # Validator network shutting down vnm.shutdown(archive_name='TestValidatorShutdownRestore')
def extend_genesis_util(self, overrides): print() vnm = None try: self._node_ctrl = None print('creating', str(self.__class__.__name__)) # set up our nodes (suite-internal interface) self._node_ctrl = WrappedNodeController(SubprocessNodeController()) cfg = overrides temp_dir = self._node_ctrl.get_data_dir() file_name = os.path.join(temp_dir, "config.js") with open(file_name, 'w') as config: config.write(json.dumps(cfg)) data_dir = os.path.join(temp_dir, "data") gblock_file = genesis_info_file_name(data_dir) self._nodes = [ NodeArguments('v%s' % i, 8800 + i, 9000 + i, config_files=[file_name], ledger_type=overrides["LedgerType"]) for i in range(2)] # set up our urls (external interface) self.urls = [ 'http://localhost:%s' % x.http_port for x in self._nodes] # Make genesis block print('creating genesis block...') self.assertFalse(os.path.exists(gblock_file)) self._nodes[0].genesis = True self._node_ctrl.create_genesis_block(self._nodes[0]) # Test genesis util self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network # Launch network (node zero will trigger bootstrapping) print('launching network...') for x in self._nodes: self._node_ctrl.start(x) # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print() while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print('block_lists: %s' % blk_lists) if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print() while (is_convergent(self.urls, tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print('network converged on root: %s' % root) finally: print('destroying', str(self.__class__.__name__)) if hasattr(self, '_node_ctrl') and self._node_ctrl is not None: # Shut down the network with Progress("terminating network") as p: for node_name in self._node_ctrl.get_node_names(): self._node_ctrl.stop(node_name) to = TimeOut(16) while len(self._node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in self._node_ctrl.get_node_names(): try: print("%s still 'up'; sending kill..." % node_name) self._node_ctrl.kill(node_name) except Exception as e: print(e.message) self._node_ctrl.archive(self.__class__.__name__) self._node_ctrl.clean()