def test_intkey_load_ext(self): vnm = None try: print "Launching validator network." vnm_config = defaultValidatorConfig.copy() vnm_config['LogLevel'] = 'DEBUG' vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config) firstwavevalidators = vnm.launch_network(5) print "Testing transaction load." test = IntKeyLoadTest() test.setup(vnm.urls(), 10) test.run(1) vnm.expand_network(firstwavevalidators, 1) test.run(1) test.run_with_missing_dep(1) test.validate() vnm.shutdown() except Exception as e: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() vnm.create_result_archive("TestIntegrationResults.tar.gz") print "Validator data and logs preserved in: " \ "TestIntegrationResults.tar.gz" raise e
def test_missing_dependencies(self): vnm = None try: print "Launching validator network." vnm_config = defaultValidatorConfig.copy() vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config) firstwavevalidators = vnm.launch_network(5) print "Testing limit of missing dependencies." test = IntKeyLoadTest() test.setup(vnm.urls(), 10) test.run(1) test.run_with_limit_txn_dependencies(1) test.validate() vnm.shutdown() except Exception as e: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() vnm.create_result_archive("TestIntegrationResults.tar.gz") print "Validator data and logs preserved in: " \ "TestIntegrationResults.tar.gz" raise e
def test_local_validation_errors(self): cfg = defaultValidatorConfig.copy() cfg['LedgerType'] = 'dev_mode' cfg['BlockWaitTime'] = 0 cfg['LocalValidation'] = True vnm = None try: print "Launching validator network." vnm = ValidatorNetworkManager(http_port=9300, udp_port=9350, cfg=cfg) vnm.launch_network(1) urls = vnm.urls() self._generate_invalid_transactions(urls[0]) if vnm: vnm.shutdown() except Exception: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() raise finally: if vnm: vnm.create_result_archive("%s.tar.gz" % self._testMethodName) else: print "No Validator data and logs to preserve"
def test_intkey_load_ext(self): vnm = None try: print "Launching validator network." vnm_config = defaultValidatorConfig.copy() vnm_config['LogLevel'] = 'DEBUG' vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config) firstwavevalidators = vnm.launch_network(5) print "Testing transaction load." test = IntKeyLoadTest() test.setup(vnm.urls(), 10) test.run(1) vnm.expand_network(firstwavevalidators, 1) test.run(1) test.run_missing_dep_test(1) test.validate() vnm.shutdown() except Exception as e: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() vnm.create_result_archive("TestIntegrationResults.tar.gz") print "Validator data and logs preserved in: " \ "TestIntegrationResults.tar.gz" raise e
def test_intkey_load_quorum(self): vnm = None vote_cfg = defaultValidatorConfig.copy() vote_cfg['LedgerType'] = 'quorum' try: vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vote_cfg) vnm.launch_network(5) print "Testing transaction load." test = IntKeyLoadTest() test.setup(vnm.urls(), 100) test.run(2) test.run_missing_dep_test(1) test.validate() vnm.shutdown() except Exception: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() raise finally: if vnm: vnm.create_result_archive("TestIntegrationResultsVote.tar.gz")
def _run_int_load(self, config, num_nodes, archive_name): """ Args: config (dict): Default config for each node num_nodes (int): Total number of nodes in network simulation archive_name (str): Name for tarball summary of test results """ vnm = None try: test = IntKeyLoadTest() if "TEST_VALIDATOR_URLS" not in os.environ: print "Launching validator network." vnm_config = config vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config) vnm.launch_network(num_nodes) urls = vnm.urls() else: print "Fetching Urls of Running Validators" # TEST_VALIDATORS_RUNNING is a list of validators urls # separated by commas. # e.g. 'http://localhost:8800,http://localhost:8801' urls = str(os.environ["TEST_VALIDATOR_URLS"]).split(",") print "Testing transaction load." test.setup(urls, 100) test.run(2) test.validate() self.assertTrue(is_convergent(urls, tolerance=2, standard=5)) finally: if vnm is not None: vnm.shutdown() vnm.create_result_archive("%s.tar.gz" % archive_name) else: print "No Validator data and logs to preserve"
def test_intkey_load(self): vnm = None try: vnm = ValidatorNetworkManager(httpPort=9000, udpPort=9100) vnm.launch_network(5) print "Testing transaction load." test = IntKeyLoadTest() test.setup(vnm.urls(), 100) test.run(2) test.validate() vnm.shutdown() except Exception as e: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() vnm.create_result_archive("TestSmokeResults.tar.gz") print "Validator data and logs preserved in: " \ "TestSmokeResults.tar.gz" raise e
def test_mktplace_load(self): vnm = None try: print "Launching validator network." vnm_config = defaultValidatorConfig.copy() if 'mktplace.transactions.market_place' not in \ vnm_config['TransactionFamilies']: vnm_config['TransactionFamilies'].append( 'mktplace.transactions.market_place') vnm_config['LogLevel'] = 'DEBUG' vnm = ValidatorNetworkManager( httpPort=9500, udpPort=9600, cfg=vnm_config) vnm.launch_network(5) print "Testing transaction load." test_case = MktPlaceLoad(num_traders=5, iterations=1, urls=vnm.urls(), test_dir=vnm.DataDir) test_case.setup() test_case.run() test_case.validate() if SAVE_INTEGRATION_TEST_DATA: vnm.create_result_archive("TestSmokeResults.tar.gz") print "Validator data and logs preserved in: " \ "TestSmokeResults.tar.gz" vnm.shutdown() except: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() if vnm.create_result_archive("TestSmokeResults.tar.gz"): print "Validator data and logs preserved in: " \ "TestSmokeResults.tar.gz" else: print "No Validator data and logs to preserve." raise
def test_mktplace_load(self): vnm = None try: print "Launching validator network." vnm_config = defaultValidatorConfig.copy() if 'mktplace.transactions.market_place' not in \ vnm_config['TransactionFamilies']: vnm_config['TransactionFamilies'].append( 'mktplace.transactions.market_place') vnm = ValidatorNetworkManager(http_port=9500, udp_port=9600, cfg=vnm_config) vnm.launch_network(5) print "Testing transaction load." test_case = MktPlaceLoad(num_traders=5, iterations=1, urls=vnm.urls(), test_dir=vnm.data_dir) test_case.setup() test_case.run() test_case.validate() if SAVE_INTEGRATION_TEST_DATA: vnm.create_result_archive("TestSmokeResults.tar.gz") print "Validator data and logs preserved in: " \ "TestSmokeResults.tar.gz" vnm.shutdown() except: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() if vnm.create_result_archive("TestSmokeResults.tar.gz"): print "Validator data and logs preserved in: " \ "TestSmokeResults.tar.gz" else: print "No Validator data and logs to preserve." raise
def test_intkey_load_voting(self): vnm = None vote_cfg = defaultValidatorConfig.copy() vote_cfg['LedgerType'] = 'voting' try: vnm = ValidatorNetworkManager(httpPort=9000, udpPort=9100, cfg=vote_cfg) vnm.launch_network(5) print "Testing transaction load." test = IntKeyLoadTest() test.setup(vnm.urls(), 100) test.run(2) test.validate() vnm.shutdown() except Exception as e: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() vnm.create_result_archive("TestSmokeResultsVote.tar.gz") print "Validator data and logs preserved in: " \ "TestSmokeResultsVote.tar.gz" raise e
def main(): network_manager = None error_occurred = False try: opts = configure(sys.argv[1:]) except Exception as e: print >> sys.stderr, str(e) sys.exit(1) try: network_manager = ValidatorNetworkManager( txnvalidator=opts['validator'], cfg=opts['validator_config'], log_config=opts['log_config_dict'], data_dir=opts['data_dir'], block_chain_archive=opts['load_blockchain'], http_port=int(opts['http_port']), udp_port=int(opts['port']), host=opts['host'], endpoint_host=opts['endpoint']) network_manager.staged_launch_network(opts['count']) if opts['interactive']: console = ValidatorNetworkConsole(network_manager) console.cmdloop("\nWelcome to the sawtooth validator network ") else: run_stats(network_manager.urls()[0]) except KeyboardInterrupt: print "\nExiting" except ExitError as e: # this is an expected error/exit, don't print stack trace - # the code raising this exception is expected to have printed the error # details error_occurred = True print "\nFailed!\nExiting: {}".format(e) except: error_occurred = True traceback.print_exc() print "\nFailed!\nExiting: {}".format(sys.exc_info()[0]) if network_manager: network_manager.shutdown() if opts['save_blockchain']: print "Saving blockchain to {}".format(opts['save_blockchain']) network_manager.create_result_archive(opts['save_blockchain']) # if dir was auto-generated if opts and "data_dir_is_tmp" in opts \ and opts['data_dir_is_tmp'] \ and os.path.exists(opts['data_dir']): delete_test_dir = True if error_occurred: delete_test_dir = prompt_yes_no( "Do you want to delete the data dir(logs, configs, etc)") if delete_test_dir: print "Cleaning temp data store {}".format(opts['data_dir']) if os.path.exists(opts['data_dir']): shutil.rmtree(opts['data_dir']) else: print "Data directory {}".format(opts['data_dir']) else: print "Data directory {}".format(opts['data_dir'])
def test_validator_shutdown_ext(self): urls = [] validators = [] vnm = None try: print "Launching validator network." vnm_config = defaultValidatorConfig.copy() vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config) firstwavevalidators = vnm.launch_network(5) urls = vnm.urls() for i in range(0, len(urls)): validators.append(vnm.validator(i)) keys = 10 rounds = 2 txn_intv = 0 print "Testing transaction load." test = IntKeyLoadTest() test.setup(urls, keys) test.validate() test.run(keys, rounds, txn_intv) validator_to_be_removed = 4 print ("shutting down validator ", validator_to_be_removed) vnm.validator_shutdown(validator_to_be_removed, force=True, term=False, archive=None ) print "sending more txns after SIGKILL" urls.pop(validator_to_be_removed) test.setup(urls, keys) test.validate() test.run(keys, rounds, txn_intv) validator_to_be_removed = 2 print "now validator shutdown w/ SIGINT" print ("shutdown(SIGINT) of validator ", validator_to_be_removed) vnm.validator_shutdown(validator_to_be_removed, force=False, term=False, archive=None ) print "sending more txns after SIGINT" urls = [] urls = vnm.urls() test.setup(urls, keys) test.validate() test.run(keys, rounds, txn_intv) validator_to_be_removed = len(vnm.urls()) - 1 print ("shutdown(SIGTERM) of validator ", validator_to_be_removed) vnm.validator_shutdown(validator_to_be_removed, force=False, term=True, archive=None ) print "sending more txns after SIGTERM" urls.pop(validator_to_be_removed) test.setup(urls, keys) test.validate() test.run(keys, rounds, txn_intv) vnm.shutdown() except Exception as e: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() vnm.create_result_archive("TestValidatorShutdown.tar.gz") print "Validator data and logs preserved in: " \ "TestValidatorShutdown.tar.gz" raise e
def main(): network_manager = None error_occurred = False try: opts = configure(sys.argv[1:]) except Exception as e: print >> sys.stderr, str(e) sys.exit(1) try: network_manager = ValidatorNetworkManager( txnvalidator=opts['validator'], cfg=opts['validator_config'], log_config=opts['log_config_dict'], data_dir=opts['data_dir'], block_chain_archive=opts['load_blockchain'], http_port=int(opts['http_port']), udp_port=int(opts['port']) ) network_manager.staged_launch_network(opts['count']) if opts['interactive']: console = ValidatorNetworkConsole(network_manager) console.cmdloop("\nWelcome to the sawtooth validator network ") else: run_stats(network_manager.urls()[0]) except KeyboardInterrupt: print "\nExiting" except ExitError as e: # this is an expected error/exit, don't print stack trace - # the code raising this exception is expected to have printed the error # details error_occurred = True print "\nFailed!\nExiting: {}".format(e) except: error_occurred = True traceback.print_exc() print "\nFailed!\nExiting: {}".format(sys.exc_info()[0]) if network_manager: network_manager.shutdown() if opts['save_blockchain']: print "Saving blockchain to {}".format(opts['save_blockchain']) network_manager.create_result_archive(opts['save_blockchain']) # if dir was auto-generated if opts and "data_dir_is_tmp" in opts \ and opts['data_dir_is_tmp'] \ and os.path.exists(opts['data_dir']): delete_test_dir = True if error_occurred: delete_test_dir = prompt_yes_no( "Do you want to delete the data dir(logs, configs, etc)") if delete_test_dir: print "Cleaning temp data store {}".format(opts['data_dir']) if os.path.exists(opts['data_dir']): shutil.rmtree(opts['data_dir']) else: print "Data directory {}".format(opts['data_dir']) else: print "Data directory {}".format(opts['data_dir'])
def _run_int_load(self, config, num_nodes, archive_name, tolerance=2, standard=5, block_id=True, static_network=None, vnm_timeout=None, txn_timeout=None, n_keys=100, n_runs=2, ): """ This test is getting really beat up and needs a refactor Args: config (dict): Default config for each node num_nodes (int): Total number of nodes in network simulation archive_name (str): Name for tarball summary of test results tolerance (int): Length in blocks of permissible fork (if forks are permissible) standard (int): A variable intended to guarantee that our block level identity checks have significant data to operate on. Conceptually, depends on the value of tolerance: case(tolerance): 0: minimum # of blocks required per validator otherwise: minimum # of converged blocks required per divergent block (per validator) Motivation: We want to compare identity across the network on some meaningfully large set of blocks. Introducing fork tolerance is problematic: the variable tolerance which is used to trim the ends of each ledger's block-chain could be abused to trivialize the test. Therefore, as tolerance is increased (if non-zero), we use standard to proportionally increase the minimum number of overall blocks required by the test. block_id (bool): check for block (hash) identity static_network (StaticNetworkConfig): optional static network configuration vnm_timeout (int): timeout for initiating network txn_timeout (int): timeout for batch transactions """ vnm = None try: test = IntKeyLoadTest(timeout=txn_timeout) if "TEST_VALIDATOR_URLS" not in os.environ: print "Launching validator network." vnm_config = config vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config, static_network=static_network) vnm.launch_network(num_nodes, max_time=vnm_timeout) urls = vnm.urls() else: print "Fetching Urls of Running Validators" # TEST_VALIDATORS_RUNNING is a list of validators urls # separated by commas. # e.g. 'http://localhost:8800,http://localhost:8801' urls = str(os.environ["TEST_VALIDATOR_URLS"]).split(",") print "Testing transaction load." test.setup(urls, n_keys) test.run(n_runs) test.validate() if block_id: self.assertEqual(True, is_convergent(urls, tolerance=tolerance, standard=standard)) if vnm: vnm.shutdown() except Exception: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() raise finally: if vnm: vnm.create_result_archive("%s.tar.gz" % archive_name) else: print "No Validator data and logs to preserve"
def _run_int_load(self, config, num_nodes, archive_name, tolerance=2, standard=5, block_id=True, static_network=None, vnm_timeout=None, txn_timeout=None, n_keys=100, n_runs=2, ): """ This test is getting really beat up and needs a refactor Args: config (dict): Default config for each node num_nodes (int): Total number of nodes in network simulation archive_name (str): Name for tarball summary of test results tolerance (int): Length in blocks of permissible fork (if forks are permissible) standard (int): A variable intended to guarantee that our block level identity checks have significant data to operate on. Conceptually, depends on the value of tolerance: case(tolerance): 0: minimum # of blocks required per validator otherwise: minimum # of converged blocks required per divergent block (per validator) Motivation: We want to compare identity across the network on some meaningfully large set of blocks. Introducing fork tolerance is problematic: the variable tolerance which is used to trim the ends of each ledger's block-chain could be abused to trivialize the test. Therefore, as tolerance is increased (if non-zero), we use standard to proportionally increase the minimum number of overall blocks required by the test. block_id (bool): check for block (hash) identity static_network (StaticNetworkConfig): optional static network configuration vnm_timeout (int): timeout for initiating network txn_timeout (int): timeout for batch transactions """ vnm = None urls = "" try: test = IntKeyLoadTest(timeout=txn_timeout) if "TEST_VALIDATOR_URLS" not in os.environ: print "Launching validator network." vnm_config = config vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config, static_network=static_network) vnm.launch_network(num_nodes, max_time=vnm_timeout) urls = vnm.urls() else: print "Fetching Urls of Running Validators" # TEST_VALIDATORS_RUNNING is a list of validators urls # separated by commas. # e.g. 'http://localhost:8800,http://localhost:8801' urls = str(os.environ["TEST_VALIDATOR_URLS"]).split(",") print "Testing transaction load." test.setup(urls, n_keys) test.run(n_runs) test.validate() if block_id: # check for block id convergence across network: sample_size = max(1, tolerance) * standard print "Testing block-level convergence with min sample size:", print " %s (after tolerance: %s)" % (sample_size, tolerance) # ...get all blockids from each server, newest last block_lists = [ LedgerWebClient(x).get_block_list() for x in urls] for ls in block_lists: ls.reverse() # ...establish preconditions max_mag = len(max(block_lists, key=len)) min_mag = len(min(block_lists, key=len)) self.assertGreaterEqual( tolerance, max_mag - min_mag, 'block list magnitude differences (%s) ' 'exceed tolerance (%s)' % ( max_mag - min_mag, tolerance)) effective_sample_size = max_mag - tolerance print 'effective sample size: %s' % (effective_sample_size) self.assertGreaterEqual( effective_sample_size, sample_size, 'not enough target samples to determine convergence') # ...(optionally) permit reasonable forks by normalizing lists if tolerance > 0: block_lists = [ block_list[0:effective_sample_size] for block_list in block_lists ] # ...id-check (possibly normalized) cross-server block chains for (i, block_list) in enumerate(block_lists): self.assertEqual( block_lists[0], block_list, '%s is divergent:\n\t%s vs.\n\t%s' % ( urls[i], block_lists[0], block_list)) if vnm: vnm.shutdown() except Exception: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() raise finally: if vnm: vnm.create_result_archive("%s.tar.gz" % (archive_name)) else: print "No Validator data and logs to preserve"
def test_two_clique(self): # this topology forms 2 exclusive cliques when n2 is severed vulnerable_mat = [ [1, 1, 0, 0, 0], [1, 1, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 1, 1], [0, 0, 0, 1, 1], ] two_clique_mat = copy.deepcopy(vulnerable_mat) two_clique_mat[2][2] = 0 n = len(vulnerable_mat) vnm = ValidatorNetworkManager(n) print try: print 'phase 0: build vulnerably connected 5-net:' from txnintegration.netconfig import NetworkConfigProvider provider = NetworkConfigProvider() net_cfg = NetworkConfig(n, provider=provider) net_cfg.set_nodes(vulnerable_mat) net_cfg.set_peers(vulnerable_mat) net_cfg.set_blacklist() vcc = ValidatorCollectionController(net_cfg) vnm.initialize(net_cfg, vcc, NopEdgeController(net_cfg)) print 'phase 1: launch vulnerably connected 5-net:' vnm.do_genesis(probe_seconds=0) vnm.launch(probe_seconds=0) print 'phase 2: validate state across 5-net:' sit_rep(vnm.urls(), verbosity=2) print 'phase 3: morph 5-net into two exclusive 2-net cliques:' vnm.update(node_mat=two_clique_mat, probe_seconds=0, reg_seconds=0) print 'providing time for convergence (likely partial)...' time.sleep(32) sit_rep(vnm.urls()) print 'phase 4: generate chain-ext A on clique {0, 1}:' url = vnm.urls()[0] print 'sending transactions to %s...' % (url) ikcA = IntegerKeyClient(baseurl=url, keystring=gen_pk()) self._do_work(ikcA, 5, 2) print 'providing time for partial convergence...' time.sleep(8) sit_rep(vnm.urls()) print 'phase 5: generate chain-ext B on clique {3, 4}, |B| = 2|A|:' url = vnm.urls()[-1] print 'sending transactions to %s...' % (url) ikcB = IntegerKeyClient(baseurl=url, keystring=gen_pk()) self._do_work(ikcB, 1, 4) print 'providing time for partial convergence...' time.sleep(8) sit_rep(vnm.urls()) print 'TEST 1: asserting network is forked' self.assertEquals(False, is_convergent(vnm.urls(), standard=3)) print 'phase 6: reconnect 5-net:' print 'rezzing validator-2 with InitialConnectivity = |Peers|...' cfg = vnm.get_configuration(2) cfg['InitialConnectivity'] = 2 vnm.set_configuration(2, cfg) vnm.update(node_mat=vulnerable_mat, probe_seconds=0, reg_seconds=0) print 'phase 7: validate state across 5-net:' print 'providing time for global convergence...' time.sleep(64) sit_rep(vnm.urls()) print 'TEST 2: asserting network is convergent' self.assertEquals(True, is_convergent(vnm.urls(), standard=4)) except Exception as e: print 'Exception encountered: %s' % (e.message) traceback.print_exc() sit_rep(vnm.urls()) raise finally: vnm.shutdown(archive_name="TestPartitionRecoveryResults")
class TestBasicStartup(unittest.TestCase): def setUp(self): self.number_of_daemons = int(os.environ.get("NUMBER_OF_DAEMONS", 5)) self.vnm = ValidatorNetworkManager(cfg=defaultValidatorConfig.copy()) def _verify_equality_of_block_lists(self, webclients): block_lists = [] for ledger_client in webclients: block_list = [] node_ids = set(ledger_client.get_store( txntype=EndpointRegistryTransaction)) for b in ledger_client.get_block_list(): tids_from_blocks = ledger_client.get_block( blockid=b, field='TransactionIDs') node_ids_from_blocks = [] for tid in tids_from_blocks: node = ledger_client.\ get_transaction(tid, 'Update').get('NodeIdentifier') node_ids_from_blocks.append(node) if len(node_ids.intersection(node_ids_from_blocks)) > 0: block_list.append(b) block_lists.append(block_list) self.assertEqual(len(max(block_lists, key=len)), len(min(block_lists, key=len)), "The length of the EndpointRegistry " "block lists are the same for all validators") zeroth_block_list = block_lists[0] for bl in block_lists[1:]: self.assertEqual(zeroth_block_list, bl, "The block lists are the same for each validator") def _verify_orderly_transactions(self, webclients, node_identifiers): for ledger_client in webclients: node_ids = [] for b in ledger_client.get_block_list(): if not ledger_client.get_block(blockid=b, field='BlockNum') == 0L: # the genesis block has no transactions tids_from_blocks = ledger_client.get_block( blockid=b, field='TransactionIDs') self.assertEqual(len(tids_from_blocks), 1, "One transaction per block") node = ledger_client.get_transaction( tids_from_blocks[0], 'Update').get('NodeIdentifier') node_ids.append(node) node_ids.reverse() self.assertEqual(len(node_identifiers), len(node_ids), "The node list lengths are the same") self.assertEqual(node_ids, node_identifiers, "The node lists are the same") def test_basic_startup(self): try: self.vnm.launch_network(count=self.number_of_daemons, others_daemon=True) validator_urls = self.vnm.urls() # IntegerKeyClient is only needed to send one more transaction # so n-1=number of EndpointRegistryTransactions integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) finally: self.vnm.shutdown() self.vnm.create_result_archive('TestDaemonStartup.tar.gz') def test_join_after_delay_start(self): delayed_validator = None validator_urls = [] try: self.vnm.launch_network(5) validator_urls = self.vnm.urls() delayed_validator = self.vnm.launch_node(delay=True) time.sleep(5) command_url = delayed_validator.url + '/command' request = urllib2.Request( url=command_url, headers={'Content-Type': 'application/json'}) response = urllib2.urlopen(request, data='{"action": "start"}') response.close() self.assertEqual(response.code, 200, "Successful post to delayed validator") validator_urls.append(delayed_validator.url) ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls ] with Progress("Waiting for registration of 1 validator") as p: url = validator_urls[0] to = TimeOut(60) while not delayed_validator.is_registered(url): if to(): raise ExitError( "{} delayed validator failed to register " "within {}S.".format( 1, to.WaitTime)) p.step() time.sleep(1) try: delayed_validator.check_error() except ValidatorManagerException as vme: delayed_validator.dump_log() delayed_validator.dump_stderr() raise ExitError(str(vme)) integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) finally: self.vnm.shutdown() if delayed_validator is not None and \ validator_urls is not [] and \ delayed_validator.url not in validator_urls: delayed_validator.shutdown() self.vnm.create_result_archive("TestDelayedStart.tar.gz") def test_initial_connectivity_n_minus_1(self): try: self.vnm.validator_config['LedgerURL'] = "**none**" self.vnm.validator_config['Restore'] = False validator = self.vnm.launch_node(genesis=True) validators = [validator] with Progress("Launching validator network") as p: self.vnm.validator_config['LedgerURL'] = validator.url self.vnm.validator_config['Restore'] = False node_identifiers = [validator.Address] for i in range(1, 5): self.vnm.validator_config['InitialConnectivity'] = i v = self.vnm.launch_node(genesis=False, daemon=False) validators.append(v) node_identifiers.append(v.Address) p.step() self.vnm.wait_for_registration(validators, validator) validator_urls = self.vnm.urls() ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls ] integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) self._verify_orderly_transactions(ledger_web_clients, node_identifiers) finally: self.vnm.shutdown() self.vnm.create_result_archive( 'TestOrderlyInitialConnectivity.tar.gz') def test_adding_node_with_nodelist(self): try: validators = self.vnm.launch_network(5) validator_urls = self.vnm.urls() endpoint_client = EndpointRegistryClient(validator_urls[0]) nodes = [] for epl in endpoint_client.get_endpoint_list(): node = {} node['Host'] = epl['Host'] node['Port'] = epl['Port'] node['Identifier'] = epl['NodeIdentifier'] node['NodeName'] = epl['Name'] nodes.append(node) peers = [nodes[0]['NodeName'], nodes[2]['NodeName'], 'validator-x'] self.vnm.validator_config['Nodes'] = nodes self.vnm.validator_config['Peers'] = peers v = self.vnm.launch_node() validator_urls.append(v.url) self.vnm.wait_for_registration([v], validators[0]) ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls ] integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) finally: self.vnm.shutdown() self.vnm.create_result_archive('TestNodeList.tar.gz')
def _run_int_load( self, config, num_nodes, archive_name, tolerance=2, standard=5, block_id=True, static_network=None, vnm_timeout=None, txn_timeout=None, n_keys=100, n_runs=2, ): """ This test is getting really beat up and needs a refactor Args: config (dict): Default config for each node num_nodes (int): Total number of nodes in network simulation archive_name (str): Name for tarball summary of test results tolerance (int): Length in blocks of permissible fork (if forks are permissible) standard (int): A variable intended to guarantee that our block level identity checks have significant data to operate on. Conceptually, depends on the value of tolerance: case(tolerance): 0: minimum # of blocks required per validator otherwise: minimum # of converged blocks required per divergent block (per validator) Motivation: We want to compare identity across the network on some meaningfully large set of blocks. Introducing fork tolerance is problematic: the variable tolerance which is used to trim the ends of each ledger's block-chain could be abused to trivialize the test. Therefore, as tolerance is increased (if non-zero), we use standard to proportionally increase the minimum number of overall blocks required by the test. block_id (bool): check for block (hash) identity static_network (StaticNetworkConfig): optional static network configuration vnm_timeout (int): timeout for initiating network txn_timeout (int): timeout for batch transactions """ vnm = None try: test = IntKeyLoadTest(timeout=txn_timeout) if "TEST_VALIDATOR_URLS" not in os.environ: print "Launching validator network." vnm_config = config vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config, static_network=static_network) vnm.launch_network(num_nodes, max_time=vnm_timeout) urls = vnm.urls() else: print "Fetching Urls of Running Validators" # TEST_VALIDATORS_RUNNING is a list of validators urls # separated by commas. # e.g. 'http://localhost:8800,http://localhost:8801' urls = str(os.environ["TEST_VALIDATOR_URLS"]).split(",") print "Testing transaction load." test.setup(urls, n_keys) test.run(n_runs) test.validate() if block_id: # check for block id convergence across network: sample_size = max(1, tolerance) * standard print "Testing block-level convergence with min sample size:", print " %s (after tolerance: %s)" % (sample_size, tolerance) # ...get all blockids from each server, newest last block_lists = [ LedgerWebClient(x).get_block_list() for x in urls ] for ls in block_lists: ls.reverse() # ...establish preconditions max_mag = len(max(block_lists, key=len)) min_mag = len(min(block_lists, key=len)) self.assertGreaterEqual( tolerance, max_mag - min_mag, 'block list magnitude differences (%s) ' 'exceed tolerance (%s)' % (max_mag - min_mag, tolerance)) effective_sample_size = max_mag - tolerance print 'effective sample size: %s' % effective_sample_size self.assertGreaterEqual( effective_sample_size, sample_size, 'not enough target samples to determine convergence') # ...(optionally) permit reasonable forks by normalizing lists if tolerance > 0: block_lists = [ block_list[0:effective_sample_size] for block_list in block_lists ] # ...id-check (possibly normalized) cross-server block chains for (i, block_list) in enumerate(block_lists): self.assertEqual( block_lists[0], block_list, '%s is divergent:\n\t%s vs.\n\t%s' % (urls[i], block_lists[0], block_list)) if vnm: vnm.shutdown() except Exception: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() raise finally: if vnm: vnm.create_result_archive("%s.tar.gz" % archive_name) else: print "No Validator data and logs to preserve"
def _run_int_load( self, config, num_nodes, archive_name, tolerance=2, standard=5, block_id=True, static_network=None, vnm_timeout=None, txn_timeout=None, n_keys=100, n_runs=2, ): """ This test is getting really beat up and needs a refactor Args: config (dict): Default config for each node num_nodes (int): Total number of nodes in network simulation archive_name (str): Name for tarball summary of test results tolerance (int): Length in blocks of permissible fork (if forks are permissible) standard (int): A variable intended to guarantee that our block level identity checks have significant data to operate on. Conceptually, depends on the value of tolerance: case(tolerance): 0: minimum # of blocks required per validator otherwise: minimum # of converged blocks required per divergent block (per validator) Motivation: We want to compare identity across the network on some meaningfully large set of blocks. Introducing fork tolerance is problematic: the variable tolerance which is used to trim the ends of each ledger's block-chain could be abused to trivialize the test. Therefore, as tolerance is increased (if non-zero), we use standard to proportionally increase the minimum number of overall blocks required by the test. block_id (bool): check for block (hash) identity static_network (StaticNetworkConfig): optional static network configuration vnm_timeout (int): timeout for initiating network txn_timeout (int): timeout for batch transactions """ vnm = None try: test = IntKeyLoadTest(timeout=txn_timeout) if "TEST_VALIDATOR_URLS" not in os.environ: print "Launching validator network." vnm_config = config vnm = ValidatorNetworkManager(http_port=9000, udp_port=9100, cfg=vnm_config, static_network=static_network) vnm.launch_network(num_nodes, max_time=vnm_timeout) urls = vnm.urls() else: print "Fetching Urls of Running Validators" # TEST_VALIDATORS_RUNNING is a list of validators urls # separated by commas. # e.g. 'http://localhost:8800,http://localhost:8801' urls = str(os.environ["TEST_VALIDATOR_URLS"]).split(",") print "Testing transaction load." test.setup(urls, n_keys) test.run(n_runs) test.validate() if block_id: self.assertEqual( True, is_convergent(urls, tolerance=tolerance, standard=standard)) if vnm: vnm.shutdown() except Exception: print "Exception encountered in test case." traceback.print_exc() if vnm: vnm.shutdown() raise finally: if vnm: vnm.create_result_archive("%s.tar.gz" % archive_name) else: print "No Validator data and logs to preserve"
class TestBasicStartup(unittest.TestCase): def setUp(self): self.number_of_daemons = int(os.environ.get("NUMBER_OF_DAEMONS", 5)) self.vnm = ValidatorNetworkManager(cfg=defaultValidatorConfig.copy()) def _verify_equality_of_block_lists(self, webclients): block_lists = [] for ledger_client in webclients: block_list = [] node_ids = set( ledger_client.get_store(txntype=EndpointRegistryTransaction)) for b in ledger_client.get_block_list(): tids_from_blocks = ledger_client.get_block( blockid=b, field='TransactionIDs') node_ids_from_blocks = [] for tid in tids_from_blocks: node = ledger_client.\ get_transaction(tid, 'Update').get('NodeIdentifier') node_ids_from_blocks.append(node) if len(node_ids.intersection(node_ids_from_blocks)) > 0: block_list.append(b) block_lists.append(block_list) self.assertEqual( len(max(block_lists, key=len)), len(min(block_lists, key=len)), "The length of the EndpointRegistry " "block lists are the same for all validators") zeroth_block_list = block_lists[0] for bl in block_lists[1:]: self.assertEqual( zeroth_block_list, bl, "The block lists are the same for each validator") def _verify_orderly_transactions(self, webclients, node_identifiers): for ledger_client in webclients: node_ids = [] for b in ledger_client.get_block_list(): if not ledger_client.get_block(blockid=b, field='BlockNum') == 0L: # the genesis block has no transactions tids_from_blocks = ledger_client.get_block( blockid=b, field='TransactionIDs') self.assertEqual(len(tids_from_blocks), 1, "One transaction per block") node = ledger_client.get_transaction( tids_from_blocks[0], 'Update').get('NodeIdentifier') node_ids.append(node) node_ids.reverse() self.assertEqual(len(node_identifiers), len(node_ids), "The node list lengths are the same") self.assertEqual(node_ids, node_identifiers, "The node lists are the same") def test_basic_startup(self): try: self.vnm.launch_network(count=self.number_of_daemons, others_daemon=True) validator_urls = self.vnm.urls() # IntegerKeyClient is only needed to send one more transaction # so n-1=number of EndpointRegistryTransactions integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) finally: self.vnm.shutdown() self.vnm.create_result_archive('TestDaemonStartup.tar.gz') def test_join_after_delay_start(self): delayed_validator = None validator_urls = [] try: self.vnm.launch_network(5) validator_urls = self.vnm.urls() delayed_validator = self.vnm.launch_node(delay=True) time.sleep(5) command_url = delayed_validator.url + '/command' request = urllib2.Request( url=command_url, headers={'Content-Type': 'application/json'}) response = urllib2.urlopen(request, data='{"action": "start"}') response.close() self.assertEqual(response.code, 200, "Successful post to delayed validator") validator_urls.append(delayed_validator.url) ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls ] with Progress("Waiting for registration of 1 validator") as p: url = validator_urls[0] to = TimeOut(60) while not delayed_validator.is_registered(url): if to(): raise ExitError( "{} delayed validator failed to register " "within {}S.".format(1, to.WaitTime)) p.step() time.sleep(1) try: delayed_validator.check_error() except ValidatorManagerException as vme: delayed_validator.dump_log() delayed_validator.dump_stderr() raise ExitError(str(vme)) integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) finally: self.vnm.shutdown() if delayed_validator is not None and \ validator_urls is not [] and \ delayed_validator.url not in validator_urls: delayed_validator.shutdown() self.vnm.create_result_archive("TestDelayedStart.tar.gz") def test_initial_connectivity_n_minus_1(self): try: self.vnm.validator_config['LedgerURL'] = "**none**" self.vnm.validator_config['Restore'] = False validator = self.vnm.launch_node(genesis=True) validators = [validator] with Progress("Launching validator network") as p: self.vnm.validator_config['LedgerURL'] = validator.url self.vnm.validator_config['Restore'] = False node_identifiers = [validator.Address] for i in range(1, 5): self.vnm.validator_config['InitialConnectivity'] = i v = self.vnm.launch_node(genesis=False, daemon=False) validators.append(v) node_identifiers.append(v.Address) p.step() self.vnm.wait_for_registration(validators, validator) validator_urls = self.vnm.urls() ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls ] integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) self._verify_orderly_transactions(ledger_web_clients, node_identifiers) finally: self.vnm.shutdown() self.vnm.create_result_archive( 'TestOrderlyInitialConnectivity.tar.gz') def test_adding_node_with_nodelist(self): try: validators = self.vnm.launch_network(5) validator_urls = self.vnm.urls() endpoint_client = EndpointRegistryClient(validator_urls[0]) nodes = [] for epl in endpoint_client.get_endpoint_list(): node = {} node['Host'] = epl['Host'] node['Port'] = epl['Port'] node['Identifier'] = epl['NodeIdentifier'] node['ShortName'] = epl['Name'] nodes.append(node) peers = [ nodes[0]['ShortName'], nodes[2]['ShortName'], 'validator-x' ] self.vnm.validator_config['Nodes'] = nodes self.vnm.validator_config['Peers'] = peers v = self.vnm.launch_node() validator_urls.append(v.url) self.vnm.wait_for_registration([v], validators[0]) ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls ] integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) finally: self.vnm.shutdown() self.vnm.create_result_archive('TestNodeList.tar.gz')