def create_experiment(btlbw, queue_size, flows, experiment_name, host_client, host_server, server_nat_ip): exp = cctestbed.Experiment(name=experiment_name, btlbw=btlbw, queue_size=queue_size, flows=flows, server=host_server, client=host_client, config_filename='None', server_nat_ip=server_nat_ip) return exp
def run_experiment(website, url, btlbw=10, queue_size=128, rtt=35, force=False): experiment_name = '{}bw-{}rtt-{}q-{}'.format(btlbw, rtt, queue_size, website) if not force and is_completed_experiment(experiment_name): return (None, '') logging.info('Creating experiment for website: {}'.format(website)) url_ip = get_website_ip(url) logging.info('Got website IP: {}'.format(url_ip)) website_rtt = int(float(get_nping_rtt(url_ip))) logging.info('Got website RTT: {}'.format(website_rtt)) if website_rtt >= rtt: logging.warning('Skipping experiment with website RTT {} >= {}'.format( website_rtt, rtt)) return (-1, '') client = HOST_CLIENT_TEMPLATE client['ip_wan'] = url_ip client = cctestbed.Host(**client) server = HOST_SERVER server_nat_ip = HOST_CLIENT.ip_wan #'128.104.222.182' taro server_port = 5201 client_port = 5555 flow = {'ccalg': 'reno', 'end_time': 60, 'rtt': rtt - website_rtt, 'start_time': 0} flows = [cctestbed.Flow(ccalg=flow['ccalg'], start_time=flow['start_time'], end_time=flow['end_time'], rtt=flow['rtt'], server_port=server_port, client_port=client_port, client_log=None, server_log=None, kind='website', client=client)] exp = cctestbed.Experiment(name=experiment_name, btlbw=btlbw, queue_size=queue_size, flows=flows, server=server, client=client, config_filename='experiments-all-ccalgs-aws.yaml', server_nat_ip=server_nat_ip) logging.info('Running experiment: {}'.format(exp.name)) # make sure tcpdump cleaned up logging.info('Making sure tcpdump is cleaned up') with cctestbed.get_ssh_client( exp.server.ip_wan, username=exp.server.username, key_filename=exp.server.key_filename) as ssh_client: cctestbed.exec_command( ssh_client, exp.client.ip_wan, 'sudo pkill -9 tcpdump') with ExitStack() as stack: # add DNAT rule stack.enter_context(add_dnat_rule(exp, url_ip)) # add route to URL stack.enter_context(add_route(exp, url_ip)) # add dns entry stack.enter_context(add_dns_rule(exp, website, url_ip)) exp._run_tcpdump('server', stack) # run the flow # turns out there is a bug when using subprocess and Popen in Python 3.5 # so skip ping needs to be true # https://bugs.python.org/issue27122 cctestbed.stop_bess() stack.enter_context(exp._run_bess(ping_source='server', skip_ping=False)) # give bess some time to start time.sleep(5) exp._show_bess_pipeline() stack.enter_context(exp._run_bess_monitor()) stack.enter_context(exp._run_rtt_monitor()) with cctestbed.get_ssh_client(exp.server.ip_wan, exp.server.username, key_filename=exp.server.key_filename) as ssh_client: filename = os.path.basename(url) if filename.strip() == '': logging.warning('Could not get filename from URL') start_flow_cmd = 'timeout 65s wget --no-check-certificate --no-cache --delete-after --connect-timeout=10 --tries=3 --bind-address {} -P /tmp/ "{}" || rm -f /tmp/{}.tmp*'.format(exp.server.ip_lan, url, filename) # won't return until flow is done flow_start_time = time.time() _, stdout, _ = cctestbed.exec_command(ssh_client, exp.server.ip_wan, start_flow_cmd) exit_status = stdout.channel.recv_exit_status() flow_end_time = time.time() logging.info('Flow ran for {} seconds'.format(flow_end_time - flow_start_time)) exp._show_bess_pipeline() cmd = '/opt/bess/bessctl/bessctl command module queue0 get_status EmptyArg' print(cctestbed.run_local_command(cmd)) logging.info('Dumping website data to log: {}'.format(exp.logs['website_log'])) with open(exp.logs['website_log'], 'w') as f: website_info = {} website_info['website'] = website website_info['url'] = url website_info['website_rtt'] = website_rtt website_info['url_ip'] = url_ip website_info['flow_runtime'] = flow_end_time - flow_start_time json.dump(website_info, f) if exit_status != 0: if exit_status == 124: # timeout exit status print('Timeout. Flow longer than 65s.') logging.warning('Timeout. Flow longer than 65s.') else: logging.error(stdout.read()) raise RuntimeError('Error running flow.') proc = exp._compress_logs_url() return (proc, '{}-{}'.format(experiment_name, exp.exp_time))
def run_ec2_experiment(ec2, instance, ccalg, btlbw, rtt, queue_size, region, loss_rate=None, force=False): if loss_rate is not None: experiment_name = '{}-{}bw-{}rtt-{}q-{}loss-{}'.format( ccalg, btlbw, rtt, queue_size, loss_rate, region) else: experiment_name = '{}-{}bw-{}rtt-{}q-{}'.format( ccalg, btlbw, rtt, queue_size, region) if not force and ccalg_predict.is_completed_experiment(experiment_name): return else: if ccalg_predict.ran_experiment_today(experiment_name): return logging.info('Creating experiment for instance: {}-{}'.format( region, ccalg)) instance_rtt = int(float(get_ping_rtt(instance.public_ip_address))) logging.info('Got instance RTT: {}'.format(instance_rtt)) if instance_rtt >= rtt: logging.warning( 'Skipping experiment with instance RTT {} >= {}'.format( instance_rtt, rtt)) return # server = generate_experiments.HOST_SERVER //Commented this due to TypeError: type object argument after ** must be a mapping, not Host server = generate_experiments.HOST_SERVER_TEMPLATE server = generate_experiments.HOST_SERVER client = generate_experiments.HOST_AWS_TEMPLATE client['ip_wan'] = instance.public_ip_address client['ip_lan'] = instance.private_ip_address client['key_filename'] = get_key_pair_path(ec2) server_nat_ip_lan = generate_experiments.HOST_CLIENT.ip_lan print('server_nat_ip_lan:', server_nat_ip_lan) server_nat_ip = generate_experiments.HOST_CLIENT.ip_wan print('server_nat_ip_wan:', server_nat_ip) client = cctestbed.Host(**client) print("AWS Client:", client) print("Server:", server) # server = cctestbed.Host(**server) cloudlab_client = generate_experiments.HOST_CLIENT #CLARIFY print("Cloudlab_Client:", cloudlab_client) # cloudlab_client['ip_wan'] = server_nat_ip # cloudlab_client = cctestbed.Host(**cloudlab_client) server_port = 5201 client_port = 5555 #print('Connecting dpdk') #cctestbed.connect_dpdk(server, client) flow = { 'ccalg': ccalg, 'end_time': 60, 'rtt': rtt - instance_rtt, 'start_time': 0 } flows = [ cctestbed.Flow(ccalg=flow['ccalg'], start_time=flow['start_time'], end_time=flow['end_time'], rtt=flow['rtt'], server_port=server_port, client_port=client_port, client_log=None, server_log=None, kind='iperf', client=cloudlab_client) ] exp = cctestbed.Experiment( name=experiment_name, btlbw=btlbw, queue_size=queue_size, flows=flows, server=server, client=client, config_filename='experiments-all-ccalgs-aws.yaml', server_nat_ip=server_nat_ip, loss_rate=loss_rate) try: # make sure old stuff closed exp.cleanup_last_experiment(cleanup_tail=False) logging.info('Running experiment: {}'.format(exp.name)) with ExitStack() as stack: # add DNAT rule stack.enter_context( ccalg_predict.add_dnat_rule(exp, exp.client.ip_wan)) # add route to URL # stack.enter_context(ccalg_predict.add_route(exp, exp.client.ip_wan, # gateway_ip=exp.client.ip_lan)) #CLARIFY stack.enter_context( ccalg_predict.add_route(exp, exp.client.ip_wan, gateway_ip=server_nat_ip_lan)) exp._run_tcpdump('server', stack) exp._run_tcpdump('client', stack) exp._run_tcpprobe(stack) stack.enter_context(exp._run_rtt_monitor(program='ping')) exp._run_all_flows(stack, bess_config_name='active-middlebox-pmd') # compress all log files proc = exp._compress_logs_url() logging.info('Finished experiment: {}'.format(exp.name)) return proc except Exception as e: logging.error('Error occurred while running experiment ' + exp.name) exp._delete_logs(delete_description=False) raise e
def run_experiment(website1, url1, website2, url2, btlbw=10, queue_size=128, rtt=35, force=False): experiment_name = '{}bw-{}rtt-{}q-{}-{}'.format(btlbw, rtt, queue_size, website1, website2) if not force and is_completed_experiment(experiment_name): return else: if ran_experiment_today(experiment_name): return logging.info('Creating experiment for website1: {} website2: {}'.format( website1, website2)) url_ip1 = get_website_ip(url1) url_ip2 = get_website_ip(url2) logging.info('Got website1 IP: {} website2 IP: {}'.format( url_ip1, url_ip2)) website_rtt1 = int(float(get_nping_rtt(url_ip1))) website_rtt2 = int(float(get_nping_rtt(url_ip2))) logging.info('Got website1 RTT: {} website2 RTT: {}'.format( website_rtt1, website_rtt2)) if website_rtt1 >= rtt: logging.warning( 'Skipping experiment with website1 RTT {} >= {}'.format( website_rtt1, rtt)) return -1 elif website_rtt2 >= rtt: logging.warning( 'Skipping experiment with website2 RTT {} >= {}'.format( website_rtt2, rtt)) return -1 client = HOST_CLIENT_TEMPLATE # TODO: Which IP should be used for client? client['ip_wan'] = url_ip1 client = cctestbed.Host(**client) server = HOST_SERVER server_nat_ip = HOST_CLIENT.ip_wan #'128.104.222.182' taro server_port = 5201 client_port = 5555 flow1 = { 'ccalg': 'reno', 'end_time': 60, 'rtt': rtt - website_rtt1, 'start_time': 0 } flow2 = { 'ccalg': 'reno', 'end_time': 60, 'rtt': rtt - website_rtt2, 'start_time': 0 } flows = [ cctestbed.Flow(ccalg=flow1['ccalg'], start_time=flow1['start_time'], end_time=flow1['end_time'], rtt=flow1['rtt'], server_port=server_port, client_port=client_port, client_log=None, server_log=None), cctestbed.Flow(ccalg=flow2['ccalg'], start_time=flow2['start_time'], end_time=flow2['end_time'], rtt=flow2['rtt'], server_port=server_port, client_port=client_port, client_log=None, server_log=None), ] exp = cctestbed.Experiment( name=experiment_name, btlbw=btlbw, queue_size=queue_size, flows=flows, server=server, client=client, config_filename='experiments-all-ccalgs-aws.yaml', server_nat_ip=server_nat_ip) logging.info('Running experiment: {}'.format(exp.name)) # make sure tcpdump cleaned up logging.info('Making sure tcpdump is cleaned up') with cctestbed.get_ssh_client( exp.server.ip_wan, username=exp.server.username, key_filename=exp.server.key_filename) as ssh_client: cctestbed.exec_command(ssh_client, exp.client.ip_wan, 'sudo pkill -9 tcpdump') with ExitStack() as stack: # add DNAT rule stack.enter_context(add_dnat_rule(exp, url_ip1)) stack.enter_context(add_dnat_rule(exp, url_ip2)) # add route to URL stack.enter_context(add_route(exp, url_ip1)) stack.enter_context(add_route(exp, url_ip2)) # add dns entry stack.enter_context(add_dns_rule(exp, website1, url_ip1)) stack.enter_context(add_dns_rule(exp, website2, url_ip2)) exp._run_tcpdump('server', stack) # run the flow # turns out there is a bug when using subprocess and Popen in Python 3.5 # so skip ping needs to be true # https://bugs.python.org/issue27122 cctestbed.stop_bess() stack.enter_context( exp._run_bess(ping_source='server', skip_ping=False)) # give bess some time to start time.sleep(5) exp._show_bess_pipeline() stack.enter_context(exp._run_bess_monitor()) stack.enter_context(exp._run_rtt_monitor()) with cctestbed.get_ssh_client( exp.server.ip_wan, exp.server.username, key_filename=exp.server.key_filename) as ssh_client: filename1 = os.path.basename(url1) filename2 = os.path.basename(url2) if filename1.strip() == '': logging.warning('Could not get filename from URL 1') if filename2.strip() == '': logging.warning('Could not get filename from URL 2') # Start first flow in background and second in foreground start_flow_cmd1 = 'timeout 65s wget --no-cache --delete-after --connect-timeout=10 --tries=3 --bind-address {} -P /tmp/ {} || rm -f /tmp/{}.tmp* &'.format( exp.server.ip_lan, url1, filename1) start_flow_cmd2 = 'timeout 65s wget --no-cache --delete-after --connect-timeout=10 --tries=3 --bind-address {} -P /tmp/ {} || rm -f /tmp/{}.tmp*'.format( exp.server.ip_lan, url2, filename2) # won't return until flow is done flow_start_time = time.time() _, _, _ = cctestbed.exec_command(ssh_client, exp.server.ip_wan, start_flow_cmd1) _, stdout, _ = cctestbed.exec_command(ssh_client, exp.server.ip_wan, start_flow_cmd2) exit_status = stdout.channel.recv_exit_status() flow_end_time = time.time() logging.info('Flow ran for {} seconds'.format(flow_end_time - flow_start_time)) exp._show_bess_pipeline() cmd = '/opt/bess/bessctl/bessctl command module queue0 get_status EmptyArg' print(cctestbed.run_local_command(cmd)) if exit_status != 0: if exit_status == 124: # timeout exit status print('Timeout. Flow longer than 65s.') logging.warning('Timeout. Flow longer than 65s.') else: logging.error(stdout.read()) raise RuntimeError('Error running flow.') proc = exp._compress_logs_url() return (proc, exp.tar_filename, experiment_name)