def _run_single(self, hosts, rate, bench_parameters, node_parameters, debug=False): Print.info('Booting testbed...') # Kill any potentially unfinished run and delete logs. self.kill(hosts=hosts, delete_logs=True) Print.info('Killed previous instances') sleep(10) # Run the clients (they will wait for the nodes to be ready). # Filter all faulty nodes from the client addresses (or they will wait # for the faulty nodes to be online). committee = Committee.load(PathMaker.committee_file()) addresses = [f'{x}:{self.settings.front_port}' for x in hosts] rate_share = ceil(rate / committee.size()) # Take faults into account. timeout = node_parameters.timeout_delay client_logs = [PathMaker.client_log_file(i) for i in range(len(hosts))] for host, addr, log_file in zip(hosts, addresses, client_logs): cmd = CommandMaker.run_client(addr, bench_parameters.tx_size, rate_share, timeout, nodes=addresses) self._background_run(host, cmd, log_file) Print.info('Clients boosted...') sleep(10) # Run the nodes. key_files = [PathMaker.key_file(i) for i in range(len(hosts))] dbs = [PathMaker.db_path(i) for i in range(len(hosts))] node_logs = [PathMaker.node_log_file(i) for i in range(len(hosts))] threshold_key_files = [ PathMaker.threshold_key_file(i) for i in range(len(hosts)) ] for host, key_file, threshold_key_file, db, log_file in zip( hosts, key_files, threshold_key_files, dbs, node_logs): cmd = CommandMaker.run_node(key_file, threshold_key_file, PathMaker.committee_file(), db, PathMaker.parameters_file(), debug=debug) self._background_run(host, cmd, log_file) # Wait for the nodes to synchronize Print.info('Waiting for the nodes to synchronize...') sleep(node_parameters.timeout_delay / 1000) # Wait for all transactions to be processed. duration = bench_parameters.duration for _ in progress_bar(range(100), prefix=f'Running benchmark ({duration} sec):'): sleep(ceil(duration / 100)) self.kill(hosts=hosts, delete_logs=False)
def _run_single(self, hosts, rate, bench_parameters, node_parameters, debug=False): Print.info('Booting testbed...') # Kill any potentially unfinished run and delete logs. self.kill(hosts=hosts, delete_logs=True) # Run the clients (they will wait for the nodes to be ready). committee = Committee.load(PathMaker.committee_file()) addresses = committee.front_addresses() rate_share = ceil(rate / committee.size()) timeout = node_parameters.timeout_delay client_logs = [PathMaker.client_log_file(i) for i in range(len(hosts))] for host, addr, log_file in zip(hosts, addresses, client_logs): cmd = CommandMaker.run_client(addr, bench_parameters.tx_size, rate_share, timeout, nodes=addresses) self._background_run(host, cmd, log_file) # Run the nodes. key_files = [PathMaker.key_file(i) for i in range(len(hosts))] dbs = [PathMaker.db_path(i) for i in range(len(hosts))] node_logs = [PathMaker.node_log_file(i) for i in range(len(hosts))] for host, key_file, db, log_file in zip(hosts, key_files, dbs, node_logs): cmd = CommandMaker.run_node(key_file, PathMaker.committee_file(), db, PathMaker.parameters_file(), debug=debug) self._background_run(host, cmd, log_file) # Wait for the nodes to synchronize Print.info('Waiting for the nodes to synchronize...') sleep(2 * node_parameters.timeout_delay / 1000) # Wait for all transactions to be processed. duration = bench_parameters.duration for _ in progress_bar(range(20), prefix=f'Running benchmark ({duration} sec):'): sleep(ceil(duration / 20)) self.kill(hosts=hosts, delete_logs=False)
def run(self, debug=False): assert isinstance(debug, bool) Print.heading('Starting local benchmark') # Kill any previous testbed. self._kill_nodes() try: Print.info('Setting up testbed...') nodes, rate = self.nodes[0], self.rate[0] # Cleanup all files. cmd = f'{CommandMaker.clean_logs()} ; {CommandMaker.cleanup()}' subprocess.run([cmd], shell=True, stderr=subprocess.DEVNULL) sleep(0.5) # Removing the store may take time. # Recompile the latest code. cmd = CommandMaker.compile().split() subprocess.run(cmd, check=True, cwd=PathMaker.node_crate_path()) # Create alias for the client and nodes binary. cmd = CommandMaker.alias_binaries(PathMaker.binary_path()) subprocess.run([cmd], shell=True) # Generate configuration files. keys = [] key_files = [PathMaker.key_file(i) for i in range(nodes)] for filename in key_files: cmd = CommandMaker.generate_key(filename).split() subprocess.run(cmd, check=True) keys += [Key.from_file(filename)] names = [x.name for x in keys] committee = LocalCommittee(names, self.BASE_PORT) committee.print(PathMaker.committee_file()) self.node_parameters.print(PathMaker.parameters_file()) # Run the clients (they will wait for the nodes to be ready). addresses = committee.front_addresses() rate_share = ceil(rate / nodes) timeout = self.node_parameters.timeout_delay client_logs = [PathMaker.client_log_file(i) for i in range(nodes)] for addr, log_file in zip(addresses, client_logs): cmd = CommandMaker.run_client(addr, self.tx_size, rate_share, timeout) self._background_run(cmd, log_file) # Run the nodes. dbs = [PathMaker.db_path(i) for i in range(nodes)] node_logs = [PathMaker.node_log_file(i) for i in range(nodes)] for key_file, db, log_file in zip(key_files, dbs, node_logs): cmd = CommandMaker.run_node(key_file, PathMaker.committee_file(), db, PathMaker.parameters_file(), debug=debug) self._background_run(cmd, log_file) # Wait for the nodes to synchronize Print.info('Waiting for the nodes to synchronize...') sleep(2 * self.node_parameters.timeout_delay / 1000) # Wait for all transactions to be processed. Print.info(f'Running benchmark ({self.duration} sec)...') sleep(self.duration) self._kill_nodes() # Parse logs and return the parser. Print.info('Parsing logs...') return LogParser.process('./logs') except (subprocess.SubprocessError, ParseError) as e: self._kill_nodes() raise BenchError('Failed to run benchmark', e)
def run(self, debug=False): assert isinstance(debug, bool) Print.heading('Starting local benchmark') # Kill any previous testbed. self._kill_nodes() try: Print.info('Setting up testbed...') nodes, rate = self.nodes[0], self.rate[0] # Cleanup all files. cmd = f'{CommandMaker.clean_logs()} ; {CommandMaker.cleanup()}' subprocess.run([cmd], shell=True, stderr=subprocess.DEVNULL) sleep(0.5) # Removing the store may take time. # Recompile the latest code. cmd = CommandMaker.compile().split() subprocess.run(cmd, check=True, cwd=PathMaker.node_crate_path()) # Create alias for the client and nodes binary. cmd = CommandMaker.alias_binaries(PathMaker.binary_path()) subprocess.run([cmd], shell=True) # Generate configuration files. keys = [] key_files = [PathMaker.key_file(i) for i in range(nodes)] for filename in key_files: cmd = CommandMaker.generate_key(filename).split() subprocess.run(cmd, check=True) keys += [Key.from_file(filename)] # Generate threshold signature files. cmd = './node threshold_keys' for i in range(nodes): cmd += ' --filename ' + PathMaker.threshold_key_file(i) # print(cmd) cmd = cmd.split() subprocess.run(cmd, capture_output=True, check=True) names = [x.name for x in keys] tss_keys = [] for i in range(nodes): tss_keys += [TSSKey.from_file(PathMaker.threshold_key_file(i))] ids = [x.id for x in tss_keys] committee = LocalCommittee(names, ids, self.BASE_PORT) committee.print(PathMaker.committee_file()) self.node_parameters.print(PathMaker.parameters_file()) # Do not boot faulty nodes. nodes = nodes - self.faults # Run the clients (they will wait for the nodes to be ready). addresses = committee.front rate_share = ceil(rate / nodes) timeout = self.node_parameters.timeout_delay client_logs = [PathMaker.client_log_file(i) for i in range(nodes)] for addr, log_file in zip(addresses, client_logs): cmd = CommandMaker.run_client(addr, self.tx_size, rate_share, timeout) self._background_run(cmd, log_file) if self.node_parameters.protocol == 0: Print.info('Running HotStuff') elif self.node_parameters.protocol == 1: Print.info('Running Async HotStuff') elif self.node_parameters.protocol == 2: Print.info('Running TwoChainVABA') else: Print.info('Wrong protocol type!') return Print.info(f'{self.faults} faults') Print.info( f'Timeout {self.node_parameters.timeout_delay} ms, Network delay {self.node_parameters.network_delay} ms' ) Print.info(f'DDOS attack {self.node_parameters.ddos}') # Run the nodes. dbs = [PathMaker.db_path(i) for i in range(nodes)] node_logs = [PathMaker.node_log_file(i) for i in range(nodes)] threshold_key_files = [ PathMaker.threshold_key_file(i) for i in range(nodes) ] for key_file, threshold_key_file, db, log_file in zip( key_files, threshold_key_files, dbs, node_logs): cmd = CommandMaker.run_node(key_file, threshold_key_file, PathMaker.committee_file(), db, PathMaker.parameters_file(), debug=debug) self._background_run(cmd, log_file) # Wait for the nodes to synchronize Print.info('Waiting for the nodes to synchronize...') sleep(2 * self.node_parameters.timeout_delay / 1000) # Wait for all transactions to be processed. Print.info(f'Running benchmark ({self.duration} sec)...') sleep(self.duration) self._kill_nodes() # Parse logs and return the parser. Print.info('Parsing logs...') return LogParser.process('./logs', self.faults, self.node_parameters.protocol, self.node_parameters.ddos) except (subprocess.SubprocessError, ParseError) as e: self._kill_nodes() raise BenchError('Failed to run benchmark', e)