def wait_for_registration(self, validators, validator, max_time=None): """ Wait for newly launched validators to register. validators: list of validators on which to wait validator: running validator against which to verify registration """ max_time = 120 if max_time is None else max_time unregistered_count = len(validators) with Progress("{0} waiting for registration of {1} validators".format( validator.name, unregistered_count, )) as p: url = validator.url to = TimeOut(max_time) while unregistered_count > 0: if to(): raise ExitError( "{} extended validators failed to register " "within {}S.".format(unregistered_count, to.WaitTime)) p.step() time.sleep(1) unregistered_count = 0 for v in validators: if not v.is_registered(url): unregistered_count += 1 try: v.check_error() except ValidatorManagerException as vme: v.dump_log() v.dump_stderr() raise ExitError(str(vme)) return True
def test_join_after_delay_start(self): delayed_validator = None validator_urls = [] try: self.vnm.launch_network(5) validator_urls = self.vnm.urls() delayed_validator = self.vnm.launch_node(delay=True) time.sleep(5) command_url = delayed_validator.url + '/command' request = urllib2.Request( url=command_url, headers={'Content-Type': 'application/json'}) response = urllib2.urlopen(request, data='{"action": "start"}') response.close() self.assertEqual(response.code, 200, "Successful post to delayed validator") validator_urls.append(delayed_validator.url) ledger_web_clients = [ LedgerWebClient(url=u) for u in validator_urls ] with Progress("Waiting for registration of 1 validator") as p: url = validator_urls[0] to = TimeOut(60) while not delayed_validator.is_registered(url): if to(): raise ExitError( "{} delayed validator failed to register " "within {}S.".format(1, to.WaitTime)) p.step() time.sleep(1) try: delayed_validator.check_error() except ValidatorManagerException as vme: delayed_validator.dump_log() delayed_validator.dump_stderr() raise ExitError(str(vme)) integer_key_clients = [ IntegerKeyClient(baseurl=u, keystring=generate_private_key()) for u in validator_urls ] for int_key_client in integer_key_clients: int_key_client.set(key=str(1), value=20) self._verify_equality_of_block_lists(ledger_web_clients) finally: self.vnm.shutdown() if delayed_validator is not None and \ validator_urls is not [] and \ delayed_validator.url not in validator_urls: delayed_validator.shutdown() self.vnm.create_result_archive("TestDelayedStart.tar.gz")
def __init__( self, net_config, txnvalidator=None, data_dir=None, endpoint_host=None, block_chain_archive=None, log_config=None, ): super(ValidatorCollectionController, self).__init__(net_config.n_mag) self._validators = [[None, x] for x in net_config.get_config_list()] if txnvalidator is None: txnvalidator = find_txn_validator() self.txnvalidator = txnvalidator self._endpoint_host = endpoint_host self.validator_log_config = log_config self.temp_data_dir = False if data_dir is None: self.temp_data_dir = True data_dir = tempfile.mkdtemp() self.data_dir = data_dir self.block_chain_archive = block_chain_archive if block_chain_archive is not None: if not os.path.isfile(block_chain_archive): raise ExitError("Block chain archive to load {} does not " "exist.".format(block_chain_archive)) else: self.unpack_blockchain(block_chain_archive) self.admin_node = ValidatorCollectionController.AdminNode()
def find_txn_validator(): validator = None scriptDir = os.path.dirname(os.path.realpath(__file__)) search_path = "" if "CURRENCYHOME" in os.environ: search_path = os.path.join( os.environ['CURRENCYHOME'], 'bin') \ + os.pathsep \ + os.path.realpath(os.path.join(scriptDir, '..', 'bin')) else: search_path = os.path.realpath( os.path.join(scriptDir, '..', 'bin')) if 'PATH' in os.environ: search_path = search_path + os.pathsep + os.environ['PATH'] for directory in search_path.split(os.pathsep): if os.path.exists(os.path.join(directory, 'txnvalidator')): validator = os.path.join(directory, 'txnvalidator') return validator if validator is None: print "txnvalidator: {}".format(validator) raise ExitError("Could not find txnvalidator in your $PATH") return validator
def load_log_config(log_config_file): log_dic = None if log_config_file.split(".")[-1] == "js": try: with open(log_config_file) as log_config_fd: log_dic = json.load(log_config_fd) except IOError, ex: raise ExitError("Could not read log config: {}".format(str(ex)))
def configure(args): opts = parse_args(args) validator_config = {} if opts.config is not None: if os.path.exists(opts.config): validator_config = parse_configuration_file(opts.config) else: raise ExitError("Config file does not exist: {}".format( opts.config)) opts.log_config_dict = None if opts.log_config is not None: if not os.path.exists(opts.log_config): raise ExitError("log-config file does not exist: {}" .format(opts.log_config)) else: opts.log_config_dict = load_log_config(opts.log_config) keys = [ 'NodeName', 'Listen', 'KeyFile', 'AdministrationNode', 'DataDirectory', 'LedgerURL', ] if any(k in validator_config for k in keys): print("Overriding the following keys from validator configuration " "file: {}".format(opts.config)) for k in keys: if k in validator_config: print("\t{}".format(k)) del validator_config[k] if opts.log_config: print("\tLogConfigFile") opts.validator_config = validator_config opts.count = max(1, opts.count) print("Configuration:") pp.pprint(opts.__dict__) return vars(opts)
def __init__( self, txnvalidator=None, cfg=None, data_dir=None, http_port=8800, udp_port=5500, host='localhost', endpoint_host=None, block_chain_archive=None, log_config=None, static_network=None, ): self.static_network = static_network self._validators = [] self._validator_map = {} self.validator_config = None self._next_validator_id = 0 self._host = host or 'localhost' self._http_port_base = http_port self._udp_port_base = udp_port self._endpoint_host = endpoint_host self.validator_config = cfg or defaultValidatorConfig self.validator_log_config = log_config self.txnvalidator = txnvalidator or find_txn_validator() self.temp_data_dir = False if data_dir is None: self.temp_data_dir = True data_dir = tempfile.mkdtemp() self.data_dir = data_dir self.block_chain_archive = block_chain_archive if block_chain_archive is not None: if not os.path.isfile(block_chain_archive): raise ExitError("Block chain archive to load {} does not " "exist.".format(block_chain_archive)) else: self.unpack_blockchain(block_chain_archive) self.admin_node = ValidatorNetworkManager.AdminNode() self.validator_config['DataDirectory'] = self.data_dir self.validator_config["AdministrationNode"] = self.admin_node.Address self.timeout = 3
def probe_validator(self, validator, max_time=30): with Progress("probing status of {0}".format(validator.name)) as p: to = TimeOut(max_time) success = False while success is False: if to(): raise ExitError( "{} failed to initialize within {}S.".format( validator.name, to.WaitTime)) try: success = validator.is_started() except Exception as e: print(e.message) p.step() time.sleep(1)
def launch_network(self, count=1, max_time=None, others_daemon=False): validators = [] with Progress("Launching initial validator") as p: cfg = { 'LedgerURL': "**none**", 'Restore': self.block_chain_archive, } validator = self.launch_node(overrides=cfg, genesis=True, daemon=False) validators.append(validator) probe_func = validator.is_registered if self.validator_config.get('LedgerType', '') == 'quorum': probe_func = validator.is_started while not probe_func(): try: validator.check_error() except ValidatorManagerException as vme: validator.dump_log() validator.dump_stderr() raise ExitError(str(vme)) p.step() time.sleep(1) if count > 1: with Progress("Launching validator network") as p: cfg = { 'LedgerURL': validator.url, 'Restore': self.block_chain_archive, } for _ in range(1, count): v = self.launch_node(overrides=cfg, genesis=False, daemon=others_daemon) validators.append(v) p.step() self.wait_for_registration(validators, validator, max_time=max_time) return validators
def find_executable(executable_name): ret_val = None scriptDir = os.path.dirname(os.path.realpath(__file__)) search_path = "" if "CURRENCYHOME" in os.environ: search_path = os.path.join( os.environ['CURRENCYHOME'], 'bin') \ + os.pathsep \ + os.path.realpath(os.path.join(scriptDir, '..', 'bin')) else: search_path = os.path.realpath(os.path.join(scriptDir, '..', 'bin')) if 'PATH' in os.environ: search_path = search_path + os.pathsep + os.environ['PATH'] for directory in search_path.split(os.pathsep): if os.path.exists(os.path.join(directory, executable_name)): ret_val = os.path.join(directory, executable_name) return ret_val if ret_val is None: print("%s: %s" % (executable_name, ret_val)) raise ExitError("Could not find %s in your $PATH" % executable_name) return ret_val
def load_log_config(log_config_file): log_dic = None if log_config_file.split(".")[-1] == "js": try: with open(log_config_file) as log_config_fd: log_dic = json.load(log_config_fd) except IOError, ex: raise ExitError("Could not read log config: {}" .format(str(ex))) elif log_config_file.split(".")[-1] == "yaml": try: with open(log_config_file) as log_config_fd: log_dic = yaml.load(log_config_fd) except IOError, ex: raise ExitError("Could not read log config: {}" .format(str(ex))) else: raise ExitError("LogConfigFile type not supported: {}" .format(log_config_file)) return log_dic class StatsCollector(object): def __init__(self): self.statslist = [] def get_names(self): """ Returns: All data element names as list - for csv writer (header) """ names = []
def configure(args): opts = parse_args(args) script_dir = os.path.dirname(os.path.realpath(__file__)) # Find the validator to use if opts.validator is None: opts.validator = find_executable('txnvalidator') if not os.path.isfile(opts.validator): print "txnvalidator: {}".format(opts.validator) raise ExitError("Could not find txnvalidator.") else: if not os.path.isfile(opts.validator): print "txnvalidator: {}".format(opts.validator) raise ExitError("txnvalidator script does not exist.") # Create directory -- after the params have been validated if opts.data_dir is None: opts.data_dir_is_tmp = True # did we make up a directory opts.data_dir = tempfile.mkdtemp() else: opts.data_dir = os.path.abspath(opts.data_dir) if not os.path.exists(opts.data_dir): os.makedirs(opts.data_dir) if opts.load_blockchain is not None: if not os.path.isfile(opts.load_blockchain): raise ExitError("Blockchain archive to load {} does not " "exist.".format(opts.load_blockchain)) else: opts.config = get_archive_config(opts.data_dir, opts.load_blockchain) if opts.config is None: raise ExitError("Could not read config from Blockchain " "archive: {}".format(opts.load_blockchain)) if opts.config is not None: if os.path.exists(opts.config): validator_config = parse_configuration_file(opts.config) else: raise ExitError("Config file does not exist: {}".format( opts.config)) else: opts.config = os.path.realpath( os.path.join(script_dir, "..", "etc", "txnvalidator.js")) print "No config file specified, loading {}".format(opts.config) if os.path.exists(opts.config): validator_config = parse_configuration_file(opts.config) else: raise ExitError("Default config file does not exist: {}".format( opts.config)) opts.log_config_dict = None if opts.log_config is not None: if not os.path.exists(opts.log_config): raise ExitError("log-config file does not exist: {}".format( opts.log_config)) else: opts.log_config_dict = load_log_config(opts.log_config) keys = [ 'NodeName', 'Listen', 'KeyFile', 'AdministrationNode', 'DataDirectory', 'LedgerURL', ] if any(k in validator_config for k in keys): print "Overriding the following keys from validator configuration " \ "file: {}".format(opts.config) for k in keys: if k in validator_config: print "\t{}".format(k) del validator_config[k] if opts.log_config: print "\tLogConfigFile" opts.validator_config = validator_config opts.count = max(1, opts.count) print "Configuration:" pp.pprint(opts.__dict__) return vars(opts)