def _run_int_load(self, config, num_nodes, archive_name): """ Args: config (dict): Default config for each node num_nodes (int): Total number of nodes in network simulation archive_name (str): Name for tarball summary of test results """ vnm = None try: test = IntKeyLoadTest() if "TEST_VALIDATOR_URLS" not in os.environ: print "Launching validator network." vnm_config = config vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config) vnm.launch_network(num_nodes) urls = vnm.urls() else: print "Fetching Urls of Running Validators" # TEST_VALIDATORS_RUNNING is a list of validators urls # separated by commas. # e.g. 'http://localhost:8800,http://localhost:8801' urls = str(os.environ["TEST_VALIDATOR_URLS"]).split(",") print "Testing transaction load." test.setup(urls, 100) test.run(2) test.validate() self.assertTrue(is_convergent(urls, tolerance=2, standard=5)) finally: if vnm is not None: vnm.shutdown() vnm.create_result_archive("%s.tar.gz" % archive_name) else: print "No Validator data and logs to preserve"
def _run_int_load(self, config, num_nodes, archive_name): """ Args: config (dict): Default config for each node num_nodes (int): Total number of nodes in network simulation archive_name (str): Name for tarball summary of test results """ vnm = None try: test = IntKeyLoadTest() if "TEST_VALIDATOR_URLS" not in os.environ: print "Launching validator network." vnm_config = config vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config) vnm.launch_network(num_nodes) urls = vnm.urls() else: print "Fetching Urls of Running Validators" # TEST_VALIDATORS_RUNNING is a list of validators urls # separated by commas. # e.g. 'http://localhost:8800,http://localhost:8801' urls = str(os.environ["TEST_VALIDATOR_URLS"]).split(",") print "Testing transaction load." test.setup(urls, 100) test.run(2) test.validate() self.assertTrue(is_convergent(urls, tolerance=2, standard=5)) finally: if vnm is not None: vnm.shutdown() vnm.create_result_archive("%s.tar.gz" % archive_name) else: print "No Validator data and logs to preserve"
def extend_genesis_util(self, overrides): print vnm = None try: vnm = get_default_vnm(2, overrides=overrides) # Test genesis util cfg = vnm.get_configuration(0) ledger_type = cfg['LedgerType'] gblock_file = genesis_info_file_name(cfg['DataDirectory']) self.assertFalse(os.path.exists(gblock_file)) vnm.do_genesis() self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network vnm.launch() # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print 'block_lists: %s' % blk_lists if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print while (is_convergent(vnm.urls(), tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print 'network converged on root: %s' % root finally: if vnm is not None: archive_name = 'Test%sGenesisResults' % ledger_type.upper() vnm.shutdown(archive_name=archive_name)
def _poll_for_convergence(urls): to = TimeOut(256) convergent = False task_str = 'checking for minimal convergence on: {}'.format(urls) with Progress(task_str) as p: while convergent is False: try: convergent = is_convergent(urls, standard=2, tolerance=0) except MessageException: if to.is_timed_out(): raise CliException('timed out {}'.format(task_str)) else: p.step() time.sleep(4)
def _poll_for_convergence(self, timeout=256, tolerance=2, standard=5): convergent = False with Progress('awaiting convergence') as p: to = TimeOut(timeout) while convergent is False: self.assertFalse(to.is_timed_out(), 'timed out awaiting convergence') p.step() time.sleep(4) try: convergent = is_convergent(self.urls, standard=standard, tolerance=tolerance) except MessageException: pass sit_rep(self.urls, verbosity=1) return convergent
def _poll_for_convergence(self, timeout=256, tolerance=2, standard=5): convergent = False with Progress('awaiting convergence') as p: to = TimeOut(timeout) while convergent is False: self.assertFalse(to.is_timed_out(), 'timed out awaiting convergence') p.step() time.sleep(4) try: convergent = is_convergent(self.urls, standard=standard, tolerance=tolerance) except MessageException: pass sit_rep(self.urls, verbosity=1) return convergent
def _run_int_load(self): """ Args: num_nodes (int): Total number of nodes in network simulation archive_name (str): Name for tarball summary of test results overrides (dict): universal config overrides test validators """ vnm = None try: test = IntKeyLoadTest() print("Testing transaction load.") test.setup(self.urls, 100) test.run(2) test.validate() self.assertTrue(is_convergent(self.urls, tolerance=2, standard=3)) finally: print("No Validator data and logs to preserve")
def extend_genesis_util(self, overrides): print() vnm = None try: self._node_ctrl = None print('creating', str(self.__class__.__name__)) # set up our nodes (suite-internal interface) self._node_ctrl = WrappedNodeController(SubprocessNodeController()) cfg = overrides temp_dir = self._node_ctrl.get_data_dir() file_name = os.path.join(temp_dir, "config.js") with open(file_name, 'w') as config: config.write(json.dumps(cfg)) data_dir = os.path.join(temp_dir, "data") gblock_file = genesis_info_file_name(data_dir) self._nodes = [ NodeArguments('v%s' % i, 8800 + i, 9000 + i, config_files=[file_name], ledger_type=overrides["LedgerType"]) for i in range(2)] # set up our urls (external interface) self.urls = [ 'http://localhost:%s' % x.http_port for x in self._nodes] # Make genesis block print('creating genesis block...') self.assertFalse(os.path.exists(gblock_file)) self._nodes[0].genesis = True self._node_ctrl.create_genesis_block(self._nodes[0]) # Test genesis util self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network # Launch network (node zero will trigger bootstrapping) print('launching network...') for x in self._nodes: self._node_ctrl.start(x) # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print() while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print('block_lists: %s' % blk_lists) if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print() while (is_convergent(self.urls, tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print('network converged on root: %s' % root) finally: print('destroying', str(self.__class__.__name__)) if hasattr(self, '_node_ctrl') and self._node_ctrl is not None: # Shut down the network with Progress("terminating network") as p: for node_name in self._node_ctrl.get_node_names(): self._node_ctrl.stop(node_name) to = TimeOut(16) while len(self._node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in self._node_ctrl.get_node_names(): try: print("%s still 'up'; sending kill..." % node_name) self._node_ctrl.kill(node_name) except Exception as e: print(e.message) self._node_ctrl.archive(self.__class__.__name__) self._node_ctrl.clean()
def test_validator_shutdown_restart_ext(self): vnm = None print try: vnm = get_default_vnm(5) vnm.do_genesis() vnm.launch() keys = 10 rounds = 2 txn_intv = 0 print "Testing transaction load." test = IntKeyLoadTest() urls = vnm.urls() self.assertEqual(5, len(urls)) test.setup(vnm.urls(), keys) test.run(keys, rounds, txn_intv) test.validate() print "test validator shutdown w/ SIGINT" vnm.deactivate_node(2, sig='SIGINT', timeout=8, force=False) print 'check state of validators:' sit_rep(vnm.urls(), verbosity=2) print "sending more txns after SIGINT" urls = vnm.urls() self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print("relaunching removed_validator", 2) vnm.activate_node(2) print 'check state of validators:' sit_rep(vnm.urls(), verbosity=2) if is_convergent(vnm.urls(), tolerance=2, standard=5) is True: print "all validators are on the same chain" else: print "all validators are not on the same chain" print "sending more txns after relaunching validator 2" urls = vnm.urls() self.assertEqual(5, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print "test validator shutdown w/ SIGTERM" vnm.deactivate_node(4, sig='SIGTERM', timeout=8, force=False) print 'check state of validators:' sit_rep(vnm.urls(), verbosity=2) print "sending more txns after SIGTERM" urls = vnm.urls() self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print("relaunching removed_validator", 4) vnm.activate_node(4) print 'check state of validators:' sit_rep(vnm.urls(), verbosity=2) if is_convergent(vnm.urls(), tolerance=2, standard=5) is True: print "all validators are on the same chain" else: print "all validators are not on the same chain" print "sending more txns after relaunching validator 4" urls = vnm.urls() self.assertEqual(5, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() finally: vnm.shutdown(archive_name='TestValidatorShutdown')
def test_two_clique(self): # this topology forms 2 exclusive cliques when n2 is severed vulnerable_mat = [ [1, 1, 0, 0, 0], [1, 1, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [0, 0, 0, 1, 1], ] two_clique_mat = copy.deepcopy(vulnerable_mat) two_clique_mat[2][2] = 0 n = len(vulnerable_mat) top = SimController(n) print try: print 'phase 0: build vulnerably connected 5-net:' from txnintegration.netconfig import NetworkConfigProvider net_cfg = NetworkConfig(gen_dfl_cfg_poet0(), n, provider=NetworkConfigProvider()) net_cfg.set_nodes(vulnerable_mat) net_cfg.set_peers(vulnerable_mat) net_cfg.set_blacklist() vnm = ValidatorCollectionController(net_cfg) top.initialize(net_cfg, vnm, NopEdgeController(net_cfg)) print 'phase 1: launch vulnerably connected 5-net:' top.do_genesis(probe_seconds=0) top.launch(probe_seconds=0) print 'phase 2: validate state across 5-net:' sit_rep(top.urls(), verbosity=2) print 'phase 3: morph 5-net into two exclusive 2-net cliques:' top.update(node_mat=two_clique_mat, probe_seconds=0, reg_seconds=0) print 'providing time for convergence (likely partial)...' time.sleep(32) sit_rep(top.urls()) print 'phase 4: generate chain-ext A on clique {0, 1}:' url = top.urls()[0] print 'sending transactions to %s...' % (url) ikcA = IntegerKeyClient(baseurl=url, keystring=gen_pk()) self._do_work(ikcA, 5, 2) print 'providing time for partial convergence...' time.sleep(8) sit_rep(top.urls()) print 'phase 5: generate chain-ext B on clique {3, 4}, |B| = 2|A|:' url = top.urls()[-1] print 'sending transactions to %s...' % (url) ikcB = IntegerKeyClient(baseurl=url, keystring=gen_pk()) self._do_work(ikcB, 1, 4) print 'providing time for partial convergence...' time.sleep(8) sit_rep(top.urls()) print 'TEST 1: asserting network is forked' self.assertEquals(False, is_convergent(top.urls(), standard=3)) print 'phase 6: reconnect 5-net:' print 'rezzing validator-2 with InitialConnectivity = |Peers|...' cfg = top.get_configuration(2) cfg['InitialConnectivity'] = 2 top.set_configuration(2, cfg) top.update(node_mat=vulnerable_mat, probe_seconds=0, reg_seconds=0) print 'phase 7: validate state across 5-net:' print 'providing time for global convergence...' time.sleep(64) sit_rep(top.urls()) print 'TEST 2: asserting network is convergent' self.assertEquals(True, is_convergent(top.urls(), standard=4)) except Exception as e: print 'Exception encountered: %s' % (e.message) traceback.print_exc() sit_rep(top.urls()) raise finally: top.shutdown(archive_name="TestPartitionRecoveryResults")
def test_validator_shutdown_restart_restore_ext(self): print try: print "launching a validator network of 5" vnm = get_default_vnm(5) vnm.do_genesis() vnm.launch() keys = 10 rounds = 2 txn_intv = 0 timeout = 5 print "Testing transaction load." test = IntKeyLoadTest() urls = vnm.urls() self.assertEqual(5, len(urls)) test.setup(vnm.urls(), keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(timeout) p.step() convergent = is_convergent(vnm.urls(), tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print "all validators are on the same chain" sit_rep(vnm.urls(), verbosity=1) report_before_shutdown = sit_rep(vnm.urls(), verbosity=1) validator_report = report_before_shutdown[2] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print "validator_blocks", validator_blocks_shutdown print "turn off entire validator network" vnm.update(node_mat=numpy.zeros(shape=(5, 5)), timeout=8) # set InitialConnectivity of individual # node to zero before relaunching cfg = vnm.get_configuration(2) cfg['InitialConnectivity'] = 0 vnm.set_configuration(2, cfg) print "relaunch validator 2" vnm.activate_node(2) report_after_relaunch = sit_rep(vnm.urls(), verbosity=1) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print "validator_blocks_relaunch", validator_blocks_relaunch # the length of post-shutdown validator blocks might be bigger # than the length of pre-shutdown validator blocks for i in range(0, len(validator_blocks_shutdown)): self.assertEqual( validator_blocks_relaunch[i], validator_blocks_shutdown[i], "mismatch in post-shutdown validator blocks. " "Validator didn't restore fr local db") break print "relaunched validator restored from local database" finally: if vnm is not None: # Validator network shutting down vnm.shutdown(archive_name='TestValidatorShutdownRestore')
def test_validator_shutdown_sigkill_restart_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 timeout = 5 print "Testing transaction load." test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(timeout) p.step() convergent = is_convergent(self.urls, tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print "all validators are on the same chain" report_before_shutdown = sit_rep(self.urls, verbosity=1) validator_report = report_before_shutdown[4] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print "validator_blocks", validator_blocks_shutdown print "shutdown validator 4 w/ SIGKILL" node_names = self.node_controller.get_node_names() node_names.sort() self.node_controller.kill(node_names[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 4: if to.is_timed_out(): self.fail("Timed Out") print 'check state of validators:' sit_rep(self.urls[:-1], verbosity=2) print "sending more txns after SIGKILL" urls = self.urls[:-1] self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print "turn off entire validator network" for node in node_names: self.node_controller.stop(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 0: if to.is_timed_out(): self.fail("Timed Out") print "relaunch validator 4" self.node_controller.start(self.nodes[4]) report_after_relaunch = None to = TimeOut(120) while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[4]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print "validator_blocks_relaunch", validator_blocks_relaunch if len(validator_blocks_relaunch) == \ len(validator_blocks_shutdown): if validator_blocks_shutdown == validator_blocks_relaunch: print "relaunched validator restored from local db" else: for i in range(0, len(validator_blocks_shutdown)): self.assertEqual(validator_blocks_relaunch[i], validator_blocks_shutdown[i], "relaunched validator didn't" " restore fr local db") break print "relaunched validator restored from local database" finally: print "restart validators " for node in self.nodes: self.node_controller.start(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 5: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep(self.urls, verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) print "No validators"
def test_validator_shutdown_restart_restore_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 timeout = 20 print("Testing transaction load.") test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(5) p.step() convergent = is_convergent(self.urls, tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print("all validators are on the same chain") sit_rep(self.urls, verbosity=1) report_before_shutdown = sit_rep(self.urls, verbosity=1) validator_report = report_before_shutdown[2] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print("validator_blocks", validator_blocks_shutdown) print("turn off entire validator network") nodes_names = self.node_controller.get_node_names() for node in nodes_names: self.node_controller.stop(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 0: if to.is_timed_out(): self.fail("Timed Out") print("relaunch validator 0") self.node_controller.start(self.nodes[0]) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 1: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[0]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) report_after_relaunch = sit_rep([self.urls[0]], verbosity=1) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print("validator_blocks_relaunch", validator_blocks_relaunch) # the length of post-shutdown validator blocks might be bigger # than the length of pre-shutdown validator blocks for i in range(0, len(validator_blocks_shutdown)): self.assertEqual(validator_blocks_relaunch[i], validator_blocks_shutdown[i], "mismatch in post-shutdown validator blocks. " "Validator didn't restore fr local db") break print("relaunched validator restored from local database") finally: print("restart validators ") for node in self.nodes: self.node_controller.start(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 5: pass report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep(self.urls, verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4)
def test_validator_shutdown_sigkill_restart_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 timeout = 5 print("Testing transaction load.") test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(timeout) p.step() convergent = is_convergent(self.urls, tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print("all validators are on the same chain") report_before_shutdown = sit_rep(self.urls, verbosity=1) validator_report = report_before_shutdown[4] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print("validator_blocks", validator_blocks_shutdown) print("shutdown validator 4 w/ SIGKILL") node_names = self.node_controller.get_node_names() node_names.sort() self.node_controller.kill(node_names[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 4: if to.is_timed_out(): self.fail("Timed Out") print('check state of validators:') sit_rep(self.urls[:-1], verbosity=2) print("sending more txns after SIGKILL") urls = self.urls[:-1] self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print("turn off entire validator network") for node in node_names: self.node_controller.stop(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 0: if to.is_timed_out(): self.fail("Timed Out") print("relaunch validator 4") self.node_controller.start(self.nodes[4]) report_after_relaunch = None to = TimeOut(120) while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[4]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print("validator_blocks_relaunch", validator_blocks_relaunch) if len(validator_blocks_relaunch) == \ len(validator_blocks_shutdown): if validator_blocks_shutdown == validator_blocks_relaunch: print("relaunched validator restored from local db") else: for i in range(0, len(validator_blocks_shutdown)): self.assertEqual( validator_blocks_relaunch[i], validator_blocks_shutdown[i], "relaunched validator didn't" " restore fr local db") break print("relaunched validator restored from local database") finally: print("restart validators ") for node in self.nodes: self.node_controller.start(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 5: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep(self.urls, verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) print("No validators")
def extend_genesis_util(self, ledger_type, pre_overrides, post_overrides): print top = None try: # Get configs and resources for a ValidatorManager compliant nodes top = get_default_sim_controller(2, ledger_type=ledger_type) # Set up validator-0 cfg = top.get_configuration(0) cfg.update(pre_overrides) top.set_configuration(0, cfg) config_file = top.write_configuration(0) # Test genesis tool print 'testing genesis util...' gblock_file = genesis_info_file_name(cfg['DataDirectory']) self.assertFalse(os.path.exists(gblock_file)) cli_args = 'admin %s-genesis --config %s' % (ledger_type, config_file) sawtooth_cli_intercept(cli_args) # Get genesis block id self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network print 'testing efficacy...' # ...apply validator-related overrides to validator-0 cfg = top.get_configuration(0) cfg.update(post_overrides) top.set_configuration(0, cfg) # ...launch entire network top.launch(probe_seconds=0, reg_seconds=0) # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print 'block_lists: %s' % blk_lists if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print while (is_convergent(top.urls(), tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print 'network converged on root: %s' % root finally: if top is not None: archive_name = 'Test%sGenesisResults' % ledger_type.upper() top.shutdown(archive_name=archive_name)
def _run_int_load(self, config, num_nodes, archive_name, tolerance=2, standard=5, block_id=True, static_network=None, vnm_timeout=None, txn_timeout=None, n_keys=100, n_runs=2, ): """ This test is getting really beat up and needs a refactor Args: config (dict): Default config for each node num_nodes (int): Total number of nodes in network simulation archive_name (str): Name for tarball summary of test results tolerance (int): Length in blocks of permissible fork (if forks are permissible) standard (int): A variable intended to guarantee that our block level identity checks have significant data to operate on. Conceptually, depends on the value of tolerance: case(tolerance): 0: minimum # of blocks required per validator otherwise: minimum # of converged blocks required per divergent block (per validator) Motivation: We want to compare identity across the network on some meaningfully large set of blocks. Introducing fork tolerance is problematic: the variable tolerance which is used to trim the ends of each ledger's block-chain could be abused to trivialize the test. Therefore, as tolerance is increased (if non-zero), we use standard to proportionally increase the minimum number of overall blocks required by the test. block_id (bool): check for block (hash) identity static_network (StaticNetworkConfig): optional static network configuration vnm_timeout (int): timeout for initiating network txn_timeout (int): timeout for batch transactions """ vnm = None try: test = IntKeyLoadTest(timeout=txn_timeout) if "TEST_VALIDATOR_URLS" not in os.environ: print "Launching validator network." vnm_config = config vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config, static_network=static_network) vnm.launch_network(num_nodes, max_time=vnm_timeout) urls = vnm.urls() else: print "Fetching Urls of Running Validators" # TEST_VALIDATORS_RUNNING is a list of validators urls # separated by commas. # e.g. 'http://localhost:8800,http://localhost:8801' urls = str(os.environ["TEST_VALIDATOR_URLS"]).split(",") print "Testing transaction load." test.setup(urls, n_keys) test.run(n_runs) test.validate() if block_id: self.assertEqual(True, is_convergent(urls, tolerance=tolerance, standard=standard)) if vnm: vnm.shutdown() except Exception: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() raise finally: if vnm: vnm.create_result_archive("%s.tar.gz" % archive_name) else: print "No Validator data and logs to preserve"
def test_validator_shutdown_restart_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 print "Testing transaction load." test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() print "test validator shutdown w/ SIGTERM" node_names = self.node_controller.get_node_names() node_names.sort() self.node_controller.stop(node_names[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 4: if to.is_timed_out(): self.fail("Timed Out") print 'check state of validators:' sit_rep(self.urls[:-1], verbosity=2) print "sending more txns after SIGTERM" urls = self.urls[:-1] self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print ("relaunching removed_validator", 4) self.node_controller.start(self.nodes[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 1: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[4]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) print 'check state of validators:' sit_rep(self.urls, verbosity=2) if is_convergent(self.urls, tolerance=2, standard=5) is True: print "all validators are on the same chain" else: print "all validators are not on the same chain" print "sending more txns after relaunching validator 4" urls = self.urls self.assertEqual(5, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() finally: print "No validators"
def test_validator_shutdown_sigkill_restart_ext(self): print try: print "launching a validator network of 5" vnm = get_default_vnm(5) vnm.do_genesis() vnm.launch() keys = 10 rounds = 2 txn_intv = 0 timeout = 5 print "Testing transaction load." test = IntKeyLoadTest() urls = vnm.urls() self.assertEqual(5, len(urls)) test.setup(vnm.urls(), keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(timeout) p.step() convergent = is_convergent(vnm.urls(), tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print "all validators are on the same chain" sit_rep(vnm.urls(), verbosity=1) report_before_shutdown = sit_rep(vnm.urls(), verbosity=1) validator_report = report_before_shutdown[4] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print "validator_blocks", validator_blocks_shutdown print "shutdown validator 4 w/ SIGKILL" vnm.deactivate_node(4, sig='SIGKILL', timeout=8) print 'check state of validators:' sit_rep(vnm.urls(), verbosity=2) print "sending more txns after SIGKILL" urls = vnm.urls() self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print "turn off entire validator network" for i in range(0, 4): vnm.deactivate_node(i, sig='SIGINT', timeout=8, force=False) print 'check state of validators after graceful shutdown:' sit_rep(vnm.urls(), verbosity=2) # set InitialConnectivity of individual # node to zero before relaunching cfg = vnm.get_configuration(4) cfg['InitialConnectivity'] = 0 vnm.set_configuration(4, cfg) print "relaunch validator 4" vnm.activate_node(4) report_after_relaunch = sit_rep(vnm.urls(), verbosity=1) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print "validator_blocks_relaunch", validator_blocks_relaunch if len(validator_blocks_relaunch) == \ len(validator_blocks_shutdown): if validator_blocks_shutdown == validator_blocks_relaunch: print "relaunched validator restored from local db" else: for i in range(0, len(validator_blocks_shutdown)): self.assertEqual(validator_blocks_relaunch[i], validator_blocks_shutdown[i], "relaunched validator didn't" " restore fr local db") break print "relaunched validator restored from local database" finally: if vnm is not None: # shutting down validator network vnm.shutdown(archive_name='TestValidatorShutdownRestore')
def test_two_clique(self): # this topology forms 2 exclusive cliques when n2 is severed vulnerable_mat = [ [1, 1, 0, 0, 0], [1, 1, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [0, 0, 0, 1, 1], ] two_clique_mat = copy.deepcopy(vulnerable_mat) two_clique_mat[2][2] = 0 n = len(vulnerable_mat) top = SimController(n) print try: print 'phase 0: build vulnerably connected 5-net:' from txnintegration.netconfig import NetworkConfigProvider net_cfg = NetworkConfig(gen_dfl_cfg_poet0(), n, provider=NetworkConfigProvider()) net_cfg.set_nodes(vulnerable_mat) net_cfg.set_peers(vulnerable_mat) net_cfg.set_blacklist() vnm = ValidatorCollectionController(net_cfg) top.initialize(net_cfg, vnm, NopEdgeController(net_cfg)) print 'phase 1: launch vulnerably connected 5-net:' top.do_genesis(probe_seconds=0) top.launch(probe_seconds=0) print 'phase 2: validate state across 5-net:' sit_rep(top.urls(), verbosity=2) print 'phase 3: morph 5-net into two exclusive 2-net cliques:' top.update(node_mat=two_clique_mat, probe_seconds=0, reg_seconds=0) print 'providing time for convergence (likely partial)...' time.sleep(32) sit_rep(top.urls()) print 'phase 4: generate chain-ext A on clique {0, 1}:' url = top.urls()[0] print 'sending transactions to %s...' % (url) ikcA = IntegerKeyClient(baseurl=url, keystring=gen_pk()) self._do_work(ikcA, 5, 2) print 'providing time for partial convergence...' time.sleep(8) sit_rep(top.urls()) print 'phase 5: generate chain-ext B on clique {3, 4}, |B| = 2|A|:' url = top.urls()[-1] print 'sending transactions to %s...' % (url) ikcB = IntegerKeyClient(baseurl=url, keystring=gen_pk()) self._do_work(ikcB, 1, 4) print 'providing time for partial convergence...' time.sleep(8) sit_rep(top.urls()) print 'TEST 1: asserting network is forked' self.assertEquals(False, is_convergent(top.urls(), standard=3)) print 'phase 6: reconnect 5-net:' print 'rezzing validator-2 with InitialConnectivity = |Peers|...' cfg = top.get_configuration(2) cfg['InitialConnectivity'] = 2 top.set_configuration(2, cfg) top.update(node_mat=vulnerable_mat, probe_seconds=0, reg_seconds=0) print 'phase 7: validate state across 5-net:' print 'providing time for global convergence...' time.sleep(64) sit_rep(top.urls()) print 'TEST 2: asserting network is convergent' self.assertEquals(True, is_convergent(top.urls(), standard=4)) except Exception as e: print 'Exception encountered: %s' % (e.message) traceback.print_exc() sit_rep(top.urls()) raise finally: top.shutdown(archive_name="TestPartitionRecoveryResults")
def extend_genesis_util(self, overrides): print() vnm = None try: self._node_ctrl = None print('creating', str(self.__class__.__name__)) # set up our nodes (suite-internal interface) self._node_ctrl = WrappedNodeController( SubprocessLegacyNodeController()) cfg = overrides temp_dir = self._node_ctrl.get_data_dir() file_name = os.path.join(temp_dir, "config.js") with open(file_name, 'w') as config: config.write(json.dumps(cfg)) data_dir = os.path.join(temp_dir, "data") gblock_file = genesis_info_file_name(data_dir) self._nodes = [ NodeArguments('v%s' % i, 8800 + i, 9000 + i, config_files=[file_name], ledger_type=overrides["LedgerType"]) for i in range(2)] # set up our urls (external interface) self.urls = [ 'http://localhost:%s' % x.http_port for x in self._nodes] # Make genesis block print('creating genesis block...') self.assertFalse(os.path.exists(gblock_file)) self._nodes[0].genesis = True self._node_ctrl.create_genesis_block(self._nodes[0]) # Test genesis util self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network # Launch network (node zero will trigger bootstrapping) print('launching network...') for x in self._nodes: self._node_ctrl.start(x) # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print() while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print('block_lists: %s' % blk_lists) if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print() while (is_convergent(self.urls, tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print('network converged on root: %s' % root) finally: print('destroying', str(self.__class__.__name__)) if hasattr(self, '_node_ctrl') and self._node_ctrl is not None: # Shut down the network with Progress("terminating network") as p: for node_name in self._node_ctrl.get_node_names(): self._node_ctrl.stop(node_name) to = TimeOut(16) while len(self._node_ctrl.get_node_names()) > 0: if to.is_timed_out(): break time.sleep(1) p.step() # force kill anything left over for node_name in self._node_ctrl.get_node_names(): try: print("%s still 'up'; sending kill..." % node_name) self._node_ctrl.kill(node_name) except Exception as e: print(e.message) self._node_ctrl.archive(self.__class__.__name__) self._node_ctrl.clean()
def test_validator_shutdown_restart_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 print("Testing transaction load.") test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() print("test validator shutdown w/ SIGTERM") node_names = self.node_controller.get_node_names() node_names.sort() self.node_controller.stop(node_names[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 4: if to.is_timed_out(): self.fail("Timed Out") print('check state of validators:') sit_rep(self.urls[:-1], verbosity=2) print("sending more txns after SIGTERM") urls = self.urls[:-1] self.assertEqual(4, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() print(("relaunching removed_validator", 4)) self.node_controller.start(self.nodes[4]) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 1: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[4]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) print('check state of validators:') sit_rep(self.urls, verbosity=2) if is_convergent(self.urls, tolerance=2, standard=5) is True: print("all validators are on the same chain") else: print("all validators are not on the same chain") print("sending more txns after relaunching validator 4") urls = self.urls self.assertEqual(5, len(urls)) test.setup(urls, keys) test.run(keys, rounds, txn_intv) test.validate() finally: print("No validators")
def test_validator_shutdown_restart_restore_ext(self): try: keys = 10 rounds = 2 txn_intv = 0 timeout = 20 print "Testing transaction load." test = IntKeyLoadTest() urls = self.urls self.assertEqual(5, len(urls)) test.setup(self.urls, keys) test.run(keys, rounds, txn_intv) test.validate() to = TimeOut(timeout) convergent = False with Progress("Checking for validators convergence") as p: while convergent is False or not to.is_timed_out(): time.sleep(5) p.step() convergent = is_convergent(self.urls, tolerance=2, standard=5) self.assertTrue(convergent, "All validators are " "not on the same chain.") print "all validators are on the same chain" sit_rep(self.urls, verbosity=1) report_before_shutdown = sit_rep(self.urls, verbosity=1) validator_report = report_before_shutdown[2] valid_dict_value = validator_report['Status'] validator_blocks_shutdown = valid_dict_value['Blocks'] print "validator_blocks", validator_blocks_shutdown print "turn off entire validator network" nodes_names = self.node_controller.get_node_names() for node in nodes_names: self.node_controller.stop(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) > 0: if to.is_timed_out(): self.fail("Timed Out") print "relaunch validator 0" self.node_controller.start(self.nodes[0]) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 1: if to.is_timed_out(): self.fail("Timed Out") report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep([self.urls[0]], verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4) report_after_relaunch = sit_rep([self.urls[0]], verbosity=1) validator_report = report_after_relaunch[0] valid_dict_value = validator_report['Status'] validator_blocks_relaunch = valid_dict_value['Blocks'] print "validator_blocks_relaunch", validator_blocks_relaunch # the length of post-shutdown validator blocks might be bigger # than the length of pre-shutdown validator blocks for i in range(0, len(validator_blocks_shutdown)): self.assertEqual(validator_blocks_relaunch[i], validator_blocks_shutdown[i], "mismatch in post-shutdown validator blocks. " "Validator didn't restore fr local db") break print "relaunched validator restored from local database" finally: print "restart validators " for node in self.nodes: self.node_controller.start(node) to = TimeOut(120) while len(self.node_controller.get_node_names()) < 5: pass report_after_relaunch = None while report_after_relaunch is None: try: report_after_relaunch = \ sit_rep(self.urls, verbosity=1) except MessageException: if to.is_timed_out(): self.fail("Timed Out") time.sleep(4)
def _run_int_load( self, config, num_nodes, archive_name, tolerance=2, standard=5, block_id=True, static_network=None, vnm_timeout=None, txn_timeout=None, n_keys=100, n_runs=2, ): """ This test is getting really beat up and needs a refactor Args: config (dict): Default config for each node num_nodes (int): Total number of nodes in network simulation archive_name (str): Name for tarball summary of test results tolerance (int): Length in blocks of permissible fork (if forks are permissible) standard (int): A variable intended to guarantee that our block level identity checks have significant data to operate on. Conceptually, depends on the value of tolerance: case(tolerance): 0: minimum # of blocks required per validator otherwise: minimum # of converged blocks required per divergent block (per validator) Motivation: We want to compare identity across the network on some meaningfully large set of blocks. Introducing fork tolerance is problematic: the variable tolerance which is used to trim the ends of each ledger's block-chain could be abused to trivialize the test. Therefore, as tolerance is increased (if non-zero), we use standard to proportionally increase the minimum number of overall blocks required by the test. block_id (bool): check for block (hash) identity static_network (StaticNetworkConfig): optional static network configuration vnm_timeout (int): timeout for initiating network txn_timeout (int): timeout for batch transactions """ vnm = None try: test = IntKeyLoadTest(timeout=txn_timeout) if "TEST_VALIDATOR_URLS" not in os.environ: print "Launching validator network." vnm_config = config vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config, static_network=static_network) vnm.launch_network(num_nodes, max_time=vnm_timeout) urls = vnm.urls() else: print "Fetching Urls of Running Validators" # TEST_VALIDATORS_RUNNING is a list of validators urls # separated by commas. # e.g. 'http://localhost:8800,http://localhost:8801' urls = str(os.environ["TEST_VALIDATOR_URLS"]).split(",") print "Testing transaction load." test.setup(urls, n_keys) test.run(n_runs) test.validate() if block_id: self.assertEqual( True, is_convergent(urls, tolerance=tolerance, standard=standard)) if vnm: vnm.shutdown() except Exception: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() raise finally: if vnm: vnm.create_result_archive("%s.tar.gz" % archive_name) else: print "No Validator data and logs to preserve"
def extend_genesis_util(self, ledger_type, pre_overrides, post_overrides): print top = None try: # Get configs and resources for a ValidatorManager compliant nodes top = get_default_sim_controller(2, ledger_type=ledger_type) # Set up validator-0 cfg = top.get_configuration(0) cfg.update(pre_overrides) top.set_configuration(0, cfg) config_file = top.write_configuration(0) # Test genesis tool print 'testing genesis util...' gblock_file = genesis_info_file_name(cfg['DataDirectory']) self.assertFalse(os.path.exists(gblock_file)) cli_args = 'admin %s-genesis --config %s' % (ledger_type, config_file) sawtooth_cli_intercept(cli_args) # Get genesis block id self.assertTrue(os.path.exists(gblock_file)) genesis_dat = None with open(gblock_file, 'r') as f: genesis_dat = json.load(f) self.assertTrue('GenesisId' in genesis_dat.keys()) head = genesis_dat['GenesisId'] # Verify genesis tool efficacy on a minimal network print 'testing efficacy...' # ...apply validator-related overrides to validator-0 cfg = top.get_configuration(0) cfg.update(post_overrides) top.set_configuration(0, cfg) # ...launch entire network top.launch(probe_seconds=0, reg_seconds=0) # ...verify validator is extending tgt_block to = TimeOut(64) blk_lists = None prog_str = 'testing root extension (expect root: %s)' % head with Progress(prog_str) as p: print while not to.is_timed_out() and blk_lists is None: try: blk_lists = get_blocklists(['http://localhost:8800']) print 'block_lists: %s' % blk_lists if len(blk_lists) < 1 or len(blk_lists[0]) < 2: blk_lists = None except MessageException as e: pass time.sleep(2) p.step() self.assertIsNotNone(blk_lists) root = blk_lists[0][0] self.assertEqual(head, root) # ...verify general convergence to = TimeOut(32) with Progress('testing root convergence') as p: print while (is_convergent(top.urls(), tolerance=1, standard=1) is False and not to.is_timed_out()): time.sleep(2) p.step() # ...verify convergence on the genesis block blk_lists = get_blocklists(['http://localhost:8800']) root = blk_lists[0][0] self.assertEqual(head, root) print 'network converged on root: %s' % root finally: if top is not None: archive_name = 'Test%sGenesisResults' % ledger_type.upper() top.shutdown(archive_name=archive_name)