def run_tests(args): git_summary = get_git_summary( args.mode, getattr(args, 'remote_path', None)) config = parse_config() schemes_config = config['schemes'] if args.all: cc_schemes = schemes_config.keys() elif args.schemes is not None: cc_schemes = args.schemes.split() if args.random_order: random.shuffle(cc_schemes) ssh_cmd = None if args.mode == 'remote': r = parse_remote_path(args.remote_path) ssh_cmd = r['ssh_cmd'] # For each run of each scheme, change the queueing discipline and # receiving socket buffer sizes before and after the test. # Check config.yml for values. for run_id in xrange(args.start_run_id, args.start_run_id + args.run_times): # clean the contents in /tmp/pantheon-tmp clean_tmp_cmd = 'rm -rf /tmp/pantheon-tmp/*' if args.mode == 'remote': call(ssh_cmd + [clean_tmp_cmd]) call(clean_tmp_cmd, shell=True) for cc in cc_schemes: default_qdisc = get_default_qdisc(ssh_cmd) old_recv_bufsizes = get_recv_sock_bufsizes(ssh_cmd) if 'qdisc' in schemes_config[cc]: test_qdisc = schemes_config[cc]['qdisc'] else: test_qdisc = config['kernel_attrs']['default_qdisc'] test_recv_sock_bufs = config['kernel_attrs']['sock_recv_bufs'] try: if default_qdisc != test_qdisc: set_default_qdisc(test_qdisc, ssh_cmd) set_recv_sock_bufsizes(test_recv_sock_bufs, ssh_cmd) Test(args, run_id, cc).run() finally: set_default_qdisc(default_qdisc, ssh_cmd) set_recv_sock_bufsizes(old_recv_bufsizes, ssh_cmd) if not args.no_metadata: meta = vars(args).copy() meta['cc_schemes'] = sorted(cc_schemes) save_test_metadata(meta, path.abspath(args.data_dir), git_summary)
def pkill(args): sys.stderr.write('Cleaning up using pkill...' '(enabled by --pkill-cleanup)\n') if args.mode == 'remote': r = parse_remote_path(args.remote_path) remote_pkill_src = path.join(r['pantheon_dir'], 'helpers', 'pkill.py') cmd = r['ssh_cmd'] + [ 'python', remote_pkill_src, '--kill-dir', r['pantheon_dir']] call(cmd) pkill_src = path.join(project_root.DIR, 'helpers', 'pkill.py') cmd = ['python', pkill_src, '--kill-dir', project_root.DIR] call(cmd)
def __init__(self, args, run_id, cc): self.mode = args.mode self.run_id = run_id self.cc = cc self.data_dir = path.abspath(args.data_dir) # shared arguments between local and remote modes self.flows = args.flows self.runtime = args.runtime self.interval = args.interval self.run_times = args.run_times # used for cleanup self.proc_first = None self.proc_second = None self.ts_manager = None self.tc_manager = None self.test_start_time = None self.test_end_time = None # local mode if self.mode == 'local': self.datalink_trace = args.uplink_trace self.acklink_trace = args.downlink_trace self.prepend_mm_cmds = args.prepend_mm_cmds self.append_mm_cmds = args.append_mm_cmds self.extra_mm_link_args = args.extra_mm_link_args # for convenience self.sender_side = 'remote' self.server_side = 'local' # remote mode if self.mode == 'remote': self.sender_side = args.sender_side self.server_side = args.server_side self.local_addr = args.local_addr self.local_if = args.local_if self.remote_if = args.remote_if self.local_desc = args.local_desc self.remote_desc = args.remote_desc self.ntp_addr = args.ntp_addr self.local_ofst = None self.remote_ofst = None self.r = parse_remote_path(args.remote_path, self.cc)
def run_tests(args): git_summary = get_git_summary(args.mode, getattr(args, 'remote_path', None)) config = parse_config() schemes_config = config['schemes'] if args.all: cc_schemes = schemes_config.keys() if args.random_order: random.shuffle(cc_schemes) elif args.schemes is not None: cc_schemes = args.schemes.split() if args.random_order: random.shuffle(cc_schemes) else: assert (args.test_config is not None) if args.random_order: random.shuffle(args.test_config['flows']) cc_schemes = [flow['scheme'] for flow in args.test_config['flows']] ssh_cmd = None if args.mode == 'remote': r = parse_remote_path(args.remote_path) ssh_cmd = r['ssh_cmd'] # For each run of each scheme, change the queueing discipline and # receiving socket buffer sizes before and after the test. # Check config.yml for values. for run_id in xrange(args.start_run_id, args.start_run_id + args.run_times): # clean the contents in /tmp/pantheon-tmp clean_tmp_cmd = 'rm -rf /tmp/pantheon-tmp/*' if args.mode == 'remote': call(ssh_cmd + [clean_tmp_cmd]) call(clean_tmp_cmd, shell=True) # ISSUE (ranysha): no support for multiple schemes where each uses diff # qdisc. since version 4.13 of the kernel, TCP supports packet pacing # so you don't need to specify qdisc for BBR. when running with config # file, going to ignore qdisc setting for now. if args.test_config is None: for cc in cc_schemes: default_qdisc = get_default_qdisc(ssh_cmd) old_recv_bufsizes = get_recv_sock_bufsizes(ssh_cmd) if 'qdisc' in schemes_config[cc]: test_qdisc = schemes_config[cc]['qdisc'] else: test_qdisc = config['kernel_attrs']['default_qdisc'] test_recv_sock_bufs = config['kernel_attrs']['sock_recv_bufs'] try: if default_qdisc != test_qdisc: set_default_qdisc(test_qdisc, ssh_cmd) set_recv_sock_bufsizes(test_recv_sock_bufs, ssh_cmd) Test(args, run_id, cc).run() finally: set_default_qdisc(default_qdisc, ssh_cmd) set_recv_sock_bufsizes(old_recv_bufsizes, ssh_cmd) else: default_qdisc = get_default_qdisc(ssh_cmd) old_recv_bufsizes = get_recv_sock_bufsizes(ssh_cmd) test_qdisc = config['kernel_attrs']['default_qdisc'] test_recv_sock_bufs = config['kernel_attrs']['sock_recv_bufs'] try: if default_qdisc != test_qdisc: set_default_qdisc(test_qdisc, ssh_cmd) set_recv_sock_bufsizes(test_recv_sock_bufs, ssh_cmd) Test(args, run_id, None).run() finally: set_default_qdisc(default_qdisc, ssh_cmd) set_recv_sock_bufsizes(old_recv_bufsizes, ssh_cmd) if not args.no_metadata: meta = vars(args).copy() meta['cc_schemes'] = sorted(cc_schemes) save_test_metadata(meta, path.abspath(args.data_dir), git_summary)
def __init__(self, args, run_id, cc): self.mode = args.mode self.run_id = run_id self.cc = cc self.data_dir = path.abspath(args.data_dir) # shared arguments between local and remote modes self.flows = args.flows self.runtime = args.runtime self.interval = args.interval self.run_times = args.run_times # used for cleanup self.proc_first = None self.proc_second = None self.ts_manager = None self.tc_manager = None self.test_start_time = None self.test_end_time = None # local mode if self.mode == 'local': self.datalink_trace = args.uplink_trace self.acklink_trace = args.downlink_trace self.prepend_mm_cmds = args.prepend_mm_cmds self.append_mm_cmds = args.append_mm_cmds self.extra_mm_link_args = args.extra_mm_link_args # for convenience self.sender_side = 'remote' self.server_side = 'local' # remote mode if self.mode == 'remote': self.sender_side = args.sender_side self.server_side = args.server_side self.local_addr = args.local_addr self.local_if = args.local_if self.remote_if = args.remote_if self.local_desc = args.local_desc self.remote_desc = args.remote_desc self.ntp_addr = args.ntp_addr self.local_ofst = None self.remote_ofst = None self.r = parse_remote_path(args.remote_path, self.cc) # arguments when there's a config self.test_config = None if hasattr(args, 'test_config'): self.test_config = args.test_config if self.test_config is not None: self.cc = self.test_config['test-name'] self.flow_objs = [] cc_src_remote_dir = '' if self.mode == 'remote': cc_src_remote_dir = r['pantheon_dir'] for flow in args.test_config['flows']: cc = flow['scheme'] run_first, run_second = who_runs_first(cc) self.flow_objs.append( Flow(cc=cc, cc_src_local=path.join(project_root.DIR, 'src', cc + '.py'), cc_src_remote=path.join(cc_src_remote_dir, 'src', cc + '.py'), run_first=run_first, run_second=run_second))