def cleanup_files(self, kill_9_iperf=True): #1. delete .log files explicitly log_types = [ 'acklink', 'datalink', 'mm_acklink', 'mm_datalink', 'stats' ] run_ids = list(range(1, self.runs + 1)) file_names = [ '%s_%s_run%d.log' % (self.experiment_name, t, i) for t, i in itertools.product(log_types, run_ids) ] file_paths = [os.path.join(self.tmp_dir, f) for f in file_names] for f in file_paths: os.remove(f) #2. delete all .log.ingress and .log.egress containing experiment name for filename in os.listdir(utils.tmp_dir): if not self.experiment_name in filename: continue path = os.path.join(utils.tmp_dir, filename) os.remove(path) #3. move all remaining files (graphs) to persistent data folder utils.make_sure_dir_exists(self.data_dir) for filename in os.listdir(self.tmp_dir): if not self.experiment_name in filename: continue path = os.path.join(self.tmp_dir, filename) new_path = os.path.join(self.data_dir, filename) shutil.move(path, new_path) if kill_9_iperf: try: check_output('pkill -9 iperf', shell=True) except: pass
def install_deps(cc_src): deps = check_output([cc_src, 'deps']).strip() if deps: if call('sudo apt-get -y install ' + str(deps), shell=True) != 0: sys.stderr.write('Some dependencies failed to install ' 'but assuming things okay.\n')
def install_deps(cc_src): deps = check_output([cc_src, 'deps']).strip() if deps: if call('sudo apt-get -y install ' + deps, shell=True) != 0: sys.stderr.write('Some dependencies failed to install ' 'but assuming things okay.\n')
def set_qdisc(qdisc): curr_qdisc = check_output('sysctl net.core.default_qdisc', shell=True) curr_qdisc = curr_qdisc.split('=')[-1].strip() if curr_qdisc != qdisc: check_call('sudo sysctl -w net.core.default_qdisc=%s' % qdisc, shell=True) sys.stderr.write('Changed default_qdisc from %s to %s\n' % (curr_qdisc, qdisc))
def get_sys_info(): sys_info = '' sys_info += check_output(['uname', '-sr']) sys_info += check_output(['sysctl', 'net.core.default_qdisc']) sys_info += check_output(['sysctl', 'net.core.rmem_default']) sys_info += check_output(['sysctl', 'net.core.rmem_max']) sys_info += check_output(['sysctl', 'net.core.wmem_default']) sys_info += check_output(['sysctl', 'net.core.wmem_max']) sys_info += check_output(['sysctl', 'net.ipv4.tcp_rmem']) sys_info += check_output(['sysctl', 'net.ipv4.tcp_wmem']) return sys_info
def who_runs_first(cc): cc_src = path.join(context.src_dir, 'wrappers', cc + '.py') cmd = [cc_src, 'run_first'] run_first = check_output(cmd).strip() if run_first == 'receiver': run_second = 'sender' elif run_first == 'sender': run_second = 'receiver' else: sys.exit('Must specify "receiver" or "sender" runs first') return run_first, run_second
def get_git_summary(pem, mode='local', remote_path=None): git_summary_src = path.join(context.src_dir, 'experiments', 'git_summary.sh') local_git_summary = check_output(git_summary_src, cwd=context.base_dir) if mode == 'remote': r = parse_remote_path(remote_path, pem) git_summary_src = path.join(r['src_dir'], 'experiments', 'git_summary.sh') ssh_cmd = 'cd %s; %s' % (r['base_dir'], git_summary_src) ssh_cmd = ' '.join(r['ssh_cmd']) + ' "%s"' % ssh_cmd print(os.getcwd()) remote_git_summary = check_output(ssh_cmd, shell=True) if local_git_summary != remote_git_summary: sys.stderr.write('--- local git summary ---\n%s\n' % local_git_summary) sys.stderr.write('--- remote git summary ---\n%s\n' % remote_git_summary) sys.exit('Repository differed between local and remote sides') return local_git_summary
def __init__(self, scheme, ramdisk=True, tmp_dir='./tmp_data', data_dir='./data', verbose=False): check_output( 'python %s --schemes %s' % (os.path.join(context.src_dir, 'experiments/setup.py'), scheme), shell=True) #loads all schemes after reboot self.tmp_dir = tmp_dir self.data_dir = data_dir if ramdisk: utils.make_sure_dir_exists(self.tmp_dir) res = check_output('df -T %s' % self.tmp_dir, shell=True) if not 'tmpfs' in res: check_output('sudo mount -t tmpfs -o size=300M tmpfs %s' % self.tmp_dir, shell=True) else: print('%s is already a ramdisk' % self.tmp_dir) self.scheme = scheme self.verbose = verbose self.build_experiments()
def test_schemes(args): wrappers_dir = path.join(context.src_dir, 'wrappers') if args.all: schemes = utils.parse_config()['schemes'].keys() elif args.schemes is not None: schemes = args.schemes.split() for scheme in schemes: sys.stderr.write('Testing %s...\n' % scheme) src = path.join(wrappers_dir, scheme + '.py') run_first = check_output([src, 'run_first']).strip() run_second = 'receiver' if run_first == 'sender' else 'sender' port = utils.get_open_port() # run first to run cmd = [src, run_first, port] first_proc = Popen(cmd, preexec_fn=os.setsid) # wait for 'run_first' to be ready time.sleep(3) # run second to run cmd = [src, run_second, '127.0.0.1', port] second_proc = Popen(cmd, preexec_fn=os.setsid) # test lasts for 3 seconds signal.signal(signal.SIGALRM, utils.timeout_handler) signal.alarm(3) try: for proc in [first_proc, second_proc]: proc.wait() if proc.returncode != 0: sys.exit('%s failed in tests' % scheme) except utils.TimeoutError: pass except Exception as exception: sys.exit('test_schemes.py: %s\n' % exception) else: signal.alarm(0) sys.exit('test exited before time limit') finally: # cleanup utils.kill_proc_group(first_proc) utils.kill_proc_group(second_proc)
def enable_congestion_control(cc): cc_list = check_output('sysctl net.ipv4.tcp_allowed_congestion_control', shell=True) cc_list = str(cc_list) cc_list = cc_list.split('=') cc_list = cc_list[-1] cc_list = cc_list.split() # return if cc is already in the allowed congestion control list if cc in cc_list: return cc_list.append(cc) check_call('sudo sysctl -w net.ipv4.tcp_allowed_congestion_control="%s"' % ' '.join(cc_list), shell=True)
def query_clock_offset(ntp_addr, ssh_cmd): local_clock_offset = None remote_clock_offset = None ntp_cmds = {} ntpdate_cmd = ['ntpdate', '-t', '5', '-quv', ntp_addr] ntp_cmds['local'] = ntpdate_cmd ntp_cmds['remote'] = ssh_cmd + ntpdate_cmd for side in ['local', 'remote']: cmd = ntp_cmds[side] fail = True for _ in xrange(3): try: offset = check_output(cmd) sys.stderr.write(offset) offset = offset.rsplit(' ', 2)[-2] offset = str(float(offset) * 1000) except subprocess.CalledProcessError: sys.stderr.write('Failed to get clock offset\n') except ValueError: sys.stderr.write('Cannot convert clock offset to float\n') else: if side == 'local': local_clock_offset = offset else: remote_clock_offset = offset fail = False break if fail: sys.stderr.write('Failed after 3 queries to NTP server\n') return local_clock_offset, remote_clock_offset
def check_qdisc(qdisc): curr_qdisc = check_output('sysctl net.core.default_qdisc', shell=True) curr_qdisc = str(curr_qdisc).split('=')[-1].strip() if qdisc != curr_qdisc: sys.exit('Error: current qdisc %s is not %s' % (curr_qdisc, qdisc))