def plot(ctx): ''' Plot performance using the logs generated by "fab remote" ''' LogAggregator().print() try: Ploter.plot_robustness(Ploter.nodes) Ploter.plot_latency(Ploter.nodes) Ploter.plot_tps(Ploter.max_latency) except PlotError as e: Print.error(BenchError('Failed to plot performance', e))
def plot(ctx): ''' Plot performance using the logs generated by "fab remote" ''' plot_params = { 'nodes': [10, 20, 50], 'tx_size': 512, 'faults': [0], 'max_latency': [3_000, 6_000] } try: Ploter.plot(plot_params) except PlotError as e: Print.error(BenchError('Failed to plot performance', e))
def run(self, bench_parameters_dict, node_parameters_dict, debug=False): assert isinstance(debug, bool) Print.heading('Starting remote benchmark') try: bench_parameters = BenchParameters(bench_parameters_dict) node_parameters = NodeParameters(node_parameters_dict) except ConfigError as e: raise BenchError('Invalid nodes or bench parameters', e) # Select which hosts to use. selected_hosts = self._select_hosts(bench_parameters) if not selected_hosts: Print.warn('There are not enough instances available') return # Update nodes. try: self._update(selected_hosts) except (GroupException, ExecutionError) as e: e = FabricError(e) if isinstance(e, GroupException) else e raise BenchError('Failed to update nodes', e) # Run benchmarks. for n in bench_parameters.nodes: for r in bench_parameters.rate: Print.heading(f'\nRunning {n} nodes (input rate: {r:,} tx/s)') hosts = selected_hosts[:n] # Upload all configuration files. try: self._config(hosts, node_parameters) except (subprocess.SubprocessError, GroupException) as e: e = FabricError(e) if isinstance(e, GroupException) else e Print.error(BenchError('Failed to configure nodes', e)) continue # Run the benchmark. for i in range(bench_parameters.runs): Print.heading(f'Run {i+1}/{bench_parameters.runs}') try: self._run_single(hosts, r, bench_parameters, node_parameters, debug) self._logs(hosts).print( PathMaker.result_file(n, r, bench_parameters.tx_size)) except (subprocess.SubprocessError, GroupException, ParseError) as e: self.kill(hosts=hosts) if isinstance(e, GroupException): e = FabricError(e) Print.error(BenchError('Benchmark failed', e)) continue
def install(ctx): ''' Install HotStuff on all machines ''' try: Bench(ctx).install() except BenchError as e: Print.error(e)
def info(ctx): ''' Display connect information about all the available machines ''' try: InstanceManager.make().print_info() except BenchError as e: Print.error(e)
def stop(ctx): ''' Stop all machines ''' try: InstanceManager.make().stop_instances() except BenchError as e: Print.error(e)
def destroy(ctx): ''' Destroy the testbed ''' try: InstanceManager.make().terminate_instances() except BenchError as e: Print.error(e)
def create(ctx, nodes=2): ''' Create a testbed''' try: InstanceManager.make().create_instances(nodes) except BenchError as e: Print.error(e)
def logs(ctx): ''' Print a summary of the logs ''' try: print(LogParser.process('./logs').result()) except ParseError as e: Print.error(BenchError('Failed to parse logs', e))
def kill(ctx): ''' Stop any HotStuff execution on all machines ''' try: Bench(ctx).kill() except BenchError as e: Print.error(e)
'sync_retry_delay': 10_000, 'max_payload_size': 500, 'min_block_delay': 0 }, 'mempool': { 'queue_capacity': 10_000, 'sync_retry_delay': 100_000, 'max_payload_size': 15_000, 'min_block_delay': 0 } } try: ret = LocalBench(bench_params, node_params).run(debug=False).result() print(ret) except BenchError as e: Print.error(e) @task def create(ctx, nodes=2): ''' Create a testbed''' try: InstanceManager.make().create_instances(nodes) except BenchError as e: Print.error(e) @task def destroy(ctx): ''' Destroy the testbed ''' try:
def start(ctx, max=10): ''' Start at most `max` machines per data center ''' try: InstanceManager.make().start_instances(max) except BenchError as e: Print.error(e)
def run(self, bench_parameters_dict, node_parameters_dict, debug=False): assert isinstance(debug, bool) Print.heading('Starting remote benchmark') try: bench_parameters = BenchParameters(bench_parameters_dict) node_parameters = NodeParameters(node_parameters_dict) except ConfigError as e: raise BenchError('Invalid nodes or bench parameters', e) # Select which hosts to use. selected_hosts = self._select_hosts(bench_parameters) if not selected_hosts: Print.warn('There are not enough instances available') return # Update nodes. try: self._update(selected_hosts) except (GroupException, ExecutionError) as e: e = FabricError(e) if isinstance(e, GroupException) else e raise BenchError('Failed to update nodes', e) if node_parameters.protocol == 0: Print.info('Running HotStuff') elif node_parameters.protocol == 1: Print.info('Running AsyncHotStuff') elif node_parameters.protocol == 2: Print.info('Running TwoChainVABA') else: Print.info('Wrong protocol type!') return Print.info(f'{bench_parameters.faults} faults') Print.info( f'Timeout {node_parameters.timeout_delay} ms, Network delay {node_parameters.network_delay} ms' ) Print.info(f'DDOS attack {node_parameters.ddos}') hosts = selected_hosts[:bench_parameters.nodes[0]] # Upload all configuration files. try: self._config(hosts, node_parameters) except (subprocess.SubprocessError, GroupException) as e: e = FabricError(e) if isinstance(e, GroupException) else e Print.error(BenchError('Failed to configure nodes', e)) # Run benchmarks. for n in bench_parameters.nodes: for r in bench_parameters.rate: Print.heading(f'\nRunning {n} nodes (input rate: {r:,} tx/s)') hosts = selected_hosts[:n] # # Upload all configuration files. # try: # self._config(hosts, node_parameters) # except (subprocess.SubprocessError, GroupException) as e: # e = FabricError(e) if isinstance(e, GroupException) else e # Print.error(BenchError('Failed to configure nodes', e)) # continue # Do not boot faulty nodes. faults = bench_parameters.faults hosts = hosts[:n - faults] protocol = node_parameters.protocol ddos = node_parameters.ddos # Run the benchmark. for i in range(bench_parameters.runs): Print.heading(f'Run {i+1}/{bench_parameters.runs}') try: self._run_single(hosts, r, bench_parameters, node_parameters, debug) self._logs(hosts, faults, protocol, ddos).print( PathMaker.result_file(n, r, bench_parameters.tx_size, faults)) except (subprocess.SubprocessError, GroupException, ParseError) as e: self.kill(hosts=hosts) if isinstance(e, GroupException): e = FabricError(e) Print.error(BenchError('Benchmark failed', e)) continue