def _collect(q_cpu_time, q_memory): cpu_time = bash.check_output('cat /proc/stat | head -1') memory = bash.check_output('cat /proc/meminfo | head -3') q_cpu_time.put(CpuTimeSnapshot.from_bash(cpu_time)) q_memory.put(MemorySnapshot.from_bash(memory)) logging.info('Collected cpu_time and memory usage')
def _recreate_network(): exit_code = bash.call_silent(dockercmd.inspect_network()) if exit_code == 0: bash.check_output(dockercmd.rm_network()) bash.check_output(dockercmd.create_network()) logging.info('Docker network {} created'.format(config.network_name)) utils.sleep(1)
def _prepare(): os.makedirs(config.multi_run_dir) if os.path.islink(config.soft_link_to_multi_run_dir): bash.check_output( 'unlink {}'.format( config.soft_link_to_multi_run_dir)) bash.check_output('cd {}; ln -s {} {}'.format(config.data_dir, config.multi_run_dir_name, config.last_multi_run))
def clean_up(self): node_utils.graceful_rm(self._thread_pool, self._context.nodes.values()) logging.info('Removed all nodes') utils.sleep(1) bash.check_output( bitcoincmd.fix_data_dirs_permissions(self._context.run_dir)) logging.info('Fixed permissions of dirs')
def _create_report(): out = "" try: out += bash.check_output(rcmd.preprocess(config.postprocessing_dir)) out += bash.check_output(rcmd.create_report(config.postprocessing_dir)) logging.info('Created report in folder={}'.format( config.postprocessing_dir)) except Exception as e: print("LLLAAA") print(out) print(e)
def clean_up_docker_safe(self): try: with Pool(1) as pool: node_utils.graceful_rm(pool, self._context.nodes.values()) utils.sleep(1) bash.check_output(dockercmd.rm_network()) bash.check_output( dockercmd.fix_data_dirs_permissions(self._context.run_dir)) except Exception: logging.debug("clean_up_docker_safe failed") pass
def clean_up_docker(self): node_utils.graceful_rm(self._thread_pool, self._context.nodes.values()) logging.info('Removed all nodes') utils.sleep(1) bash.check_output(dockercmd.rm_network()) logging.info('Deleted docker network') bash.check_output( dockercmd.fix_data_dirs_permissions(self._context.run_dir)) logging.info('Fixed permissions of dirs used by docker')
def run(): args = _parse_args() logging.info("Parsed arguments in {}: {}".format(__name__, args)) _prepare() nodes_config.create(unknown_arguments=True) ticks_config.create(unknown_arguments=True) network_config.create(unknown_arguments=True) for i in range(args.repeat): run_number = str(i + 1) logging.info('Starting {}/{} simulation'.format(run_number, args.repeat)) utils.update_args(Namespace(tag_appendix='_' + run_number)) simulation_cmd.run(unknown_arguments=True) bash.check_output('cp -r {}/postprocessing {}/run-{}' .format(config.soft_link_to_run_dir, config.soft_link_to_multi_run_dir, run_number)) bash.check_output('cp {} {}/run-{}'.format(config.run_log, config.soft_link_to_multi_run_dir, run_number)) logging.info('Finished {}/{} simulation'.format(run_number, args.repeat)) for file in [config.args_csv, config.ticks_csv, config.analysed_ticks_csv, config.general_infos_csv, config.nodes_csv, config.network_csv]: bash.check_output('cp {} {}/.'.format(file, config.soft_link_to_multi_run_dir)) _concat_files() bash.check_output(rcmd.create_report(config.soft_link_to_multi_run_dir)) logging.info('Created report in folder={}'.format(config.soft_link_to_multi_run_dir))
def _prepare_simulation_dir(self): if not os.path.exists(self._context.run_dir): os.makedirs(self._context.run_dir) if os.path.islink(config.soft_link_to_run_dir): bash.check_output('unlink {}'.format(config.soft_link_to_run_dir)) bash.check_output('cd {}; ln -s {} {}'.format(config.data_dir, self._context.run_name, config.last_run)) os.makedirs(config.postprocessing_dir) for file in [ config.network_csv_file_name, config.ticks_csv_file_name, config.nodes_csv_file_name, config.args_csv_file_name ]: bash.check_output('cp {}{} {}'.format(config.data_dir, file, self._context.run_dir)) bash.check_output('cd {}; ln -s ../{} {}'.format( config.postprocessing_dir, file, file)) os.makedirs(config.node_config) self._pool.map(node_utils.create_conf_file, self._context.nodes.values()) logging.info('Simulation directory created')
def wait_until_rpc_ready(self): while True: try: bash.check_output("nc -z -w1 {} {}".format( self._ip, config.rpc_port)) break except Exception: logging.debug("Port not open") while True: try: self.execute_rpc('getnetworkinfo') break except JSONRPCError: logging.debug('RPC not ready yet, sleeping for 2') utils.sleep(2)
def wait_until_rpc_ready(self): while True: try: bash.check_output("nc -z -w1 {} {}".format( self._ip, config.rpc_port)) break except Exception: logging.debug("Waiting with netcat until port is open") while True: try: self.execute_rpc('getnetworkinfo') break except JSONRPCError: logging.debug('Waiting until RPC of node={} is ready.'.format( self._name)) utils.sleep(1)
def test_check_output(mock): mock.return_value = b'test\ntest\ttest\t\n\n' output = bash.check_output('cmd') assert output == 'test\ntest\ttest'
def is_running(self): return bash.check_output(bitcoincmd.check_if_running( self._name)) == 'true'
def run(self, connect_to_ips): bash.check_output( bitcoincmd.start(self._name, str(self._ip), self._path, connect_to_ips))
def calcu_latency(self, zones): bash.check_output(cmd)
def rm_peers_file(self): return bash.check_output(bitcoincmd.rm_peers(self._name))
def add_latency(self, zones): for cmd in tccmd.create(self._name, zones, self._latency): bash.check_output(cmd)
def _remove_old_containers_if_exists(): containers = bash.check_output(dockercmd.ps_containers()) if len(containers) > 0: bash.check_output(dockercmd.remove_all_containers(), lvl=logging.DEBUG) logging.info('Old containers removed')
def rm(self): return bash.check_output(dockercmd.rm_container(self._name))
def _concat_files(): for file in files_to_concat: bash.check_output('head -n 1 {}/run-1/{} > {}/{}' .format(config.multi_run_dir, file, config.multi_run_dir, file)) bash.check_output('sed -s 1d {}/*/{} >> {}/{}' .format(config.multi_run_dir, file, config.multi_run_dir, file))
def is_running(self): return bash.check_output(dockercmd.check_if_running( self._name)) == 'true'
def _create_report(): bash.check_output(rcmd.preprocess(config.postprocessing_dir)) bash.check_output(rcmd.create_report(config.postprocessing_dir)) logging.info('Created report in folder={}'.format( config.postprocessing_dir))
def _try_cmd(cmd): try: return bash.check_output(cmd) except subprocess.CalledProcessError: return 'cmd={} failed'.format(cmd)
def test_check_output(self, mock): mock.return_value = b'test\ntest\ttest\t\n\n' output = bash.check_output('cmd') self.assertEqual(output, 'test\ntest\ttest')