예제 #1
0
def start_bess_for_local_video(exp, duration):
    with ExitStack() as stack:
        exp._run_tcpdump('server', stack)
        exp._run_tcpdump('server', stack, capture_http=True)
        cctestbed.stop_bess()
        stack.enter_context(exp._run_bess(
            ping_source='client',
            skip_ping=False,
            bess_config_name='active-middlebox-pmd-fairness'))
        # give bess time to start
        time.sleep(5)
        exp._show_bess_pipeline()
        stack.enter_context(exp._run_bess_monitor())
        util.start_apache_server(exp.flows[0])
        video_flow = start_single_local_video_flow(exp.flows[0], exp, stack)
        logging.info('Waiting for flow to finish')
        # wait for flow to finish
        video_flow._wait()
        # add add a time buffer before finishing up experiment
        logging.info('Video flow finished')
        # add add a time buffer before finishing up experiment
        time.sleep(5)
        exp._show_bess_pipeline()
        cmd = '/opt/bess/bessctl/bessctl command module queue0 get_status EmptyArg'
        print(cctestbed.run_local_command(cmd))
        util.stop_local_server_and_cleanup(exp)
예제 #2
0
def start_bess_for_iperf(exp, duration):
    with ExitStack() as stack:
        exp._run_tcpdump('server', stack)
        cctestbed.stop_bess()
        stack.enter_context(exp._run_bess(ping_source='client',
                                        skip_ping=False,
                                        bess_config_name='active-middlebox-pmd-fairness'))
        # give bess time to start
        time.sleep(5)
        exp._show_bess_pipeline()
        stack.enter_context(exp._run_bess_monitor())
        start_iperf_flows(exp, stack)
        time.sleep(duration+5)
        exp._show_bess_pipeline()
        cmd = '/opt/bess/bessctl/bessctl command module queue0 get_status EmptyArg'
        print(cctestbed.run_local_command(cmd))
예제 #3
0
def start_bess_for_website(exp, duration, web_data):
    with ExitStack() as stack:
        print(web_data)
        stack.enter_context(util.add_dnat_rule(exp, web_data['url_ip']))
        stack.enter_context(util.add_route(exp, web_data['url_ip']))
        stack.enter_context(util.add_dns_rule(exp, web_data['website'], web_data['url_ip']))
        exp._run_tcpdump('server', stack)
        # run the flow
        # turns out there is a bug when using subprocess and Popen in Python 3.5
        # so skip ping needs to be true
        # https://bugs.python.org/issue27122
        cctestbed.stop_bess()
        stack.enter_context(exp._run_bess(ping_source='server', skip_ping=False, bess_config_name='active-middlebox-pmd-fairness'))
        # give bess some time to start
        time.sleep(5)
        exp._show_bess_pipeline()
        stack.enter_context(exp._run_bess_monitor())
        stack.enter_context(exp._run_rtt_monitor())
        ssh_client = cctestbed.get_ssh_client(exp.server.ip_wan, exp.server.username, key_filename=exp.server.key_filename)
        
        with ssh_client as ssh_client:
            start_website_flows(ssh_client, exp, stack)
            # exit_status = stdout.channel.recv_exit_status()
            time.sleep(duration+5)
        # flow_end_time = time.time()
        logging.info('Flow ran for {} seconds'.format(duration+5))

        exp._show_bess_pipeline()
        cmd = '/opt/bess/bessctl/bessctl command module queue0 get_status EmptyArg'
        print(cctestbed.run_local_command(cmd))

        logging.info('Dumping website data to log: {}'.format(exp.logs['website_log']))
        with open(exp.logs['website_log'], 'w') as f:
            website_info = {}
            website_info['website'] = web_data['website']
            website_info['url'] = web_data['url']
            website_info['website_rtt'] = web_data['website_rtt']
            website_info['experiment_rtt'] = web_data['experiment_rtt']
            website_info['delay'] = web_data['delay']
            website_info['url_ip'] = web_data['url_ip']
            website_info['flow_runtime'] = duration+5
            json.dump(website_info, f)
예제 #4
0
def prerequisite_for_combinatory_tests(experiment, stack, includeWebsite):
    for flow in experiment.flows:
        if flow.kind == const.FLOW_KIND_WEB_VIDEO:  #TODO:Check whether these rules already exist
            stack.enter_context(
                add_dnat_rule_video(experiment, flow.webInfo['url_ip'],
                                    flow.webInfo['video_url_ip']))
            stack.enter_context(
                add_route_video(experiment, flow.webInfo['url_ip'],
                                flow.webInfo['video_url_ip']))
            stack.enter_context(
                add_dns_rule(experiment, flow.webInfo['website'],
                             flow.webInfo['url_ip']))
            stack.enter_context(
                add_dns_rule(experiment, flow.webInfo['video_server_host'],
                             flow.webInfo['video_url_ip']))

        if flow.kind == const.FLOW_KIND_WEBSITE:  #TODO:Check whether these rules already exist
            stack.enter_context(
                add_dnat_rule(experiment, flow.webInfo['url_ip']))
            stack.enter_context(add_route(experiment, flow.webInfo['url_ip']))
            stack.enter_context(
                add_dns_rule(experiment, flow.webInfo['website'],
                             flow.webInfo['url_ip']))

    experiment._run_tcpdump('server', stack)
    experiment._run_tcpdump('client', stack)
    cctestbed.stop_bess()
    stack.enter_context(
        experiment._run_bess(ping_source='server',
                             skip_ping=False,
                             bess_config_name='active-middlebox-pmd-fairness'))
    # give bess some time to start
    time.sleep(5)
    experiment._show_bess_pipeline()
    stack.enter_context(experiment._run_bess_monitor())
    if includeWebsite:
        stack.enter_context(experiment._run_rtt_monitor())
예제 #5
0
def run_experiment(website, url, btlbw=10, queue_size=128, rtt=35, force=False):
    experiment_name = '{}bw-{}rtt-{}q-{}'.format(btlbw, rtt, queue_size, website)
    if not force and is_completed_experiment(experiment_name):
        return (None, '')
    logging.info('Creating experiment for website: {}'.format(website))
    url_ip = get_website_ip(url)
    logging.info('Got website IP: {}'.format(url_ip))
    website_rtt = int(float(get_nping_rtt(url_ip)))
    logging.info('Got website RTT: {}'.format(website_rtt))

    if website_rtt >= rtt:
        logging.warning('Skipping experiment with website RTT {} >= {}'.format(
            website_rtt, rtt))
        return (-1, '')

    client = HOST_CLIENT_TEMPLATE
    client['ip_wan'] = url_ip
    client = cctestbed.Host(**client)
    server = HOST_SERVER
    
    server_nat_ip = HOST_CLIENT.ip_wan #'128.104.222.182'  taro
    server_port = 5201
    client_port = 5555

    flow = {'ccalg': 'reno',
            'end_time': 60,
            'rtt': rtt - website_rtt,
            'start_time': 0}
    flows = [cctestbed.Flow(ccalg=flow['ccalg'], start_time=flow['start_time'],
                            end_time=flow['end_time'], rtt=flow['rtt'],
                            server_port=server_port, client_port=client_port,
                            client_log=None, server_log=None, kind='website',
                            client=client)]
    
    exp = cctestbed.Experiment(name=experiment_name,
                     btlbw=btlbw,
                     queue_size=queue_size,
                     flows=flows, server=server, client=client,
                     config_filename='experiments-all-ccalgs-aws.yaml',
                     server_nat_ip=server_nat_ip)
    
    logging.info('Running experiment: {}'.format(exp.name))

    # make sure tcpdump cleaned up
    logging.info('Making sure tcpdump is cleaned up')
    with cctestbed.get_ssh_client(
            exp.server.ip_wan,
            username=exp.server.username,
            key_filename=exp.server.key_filename) as ssh_client:
        cctestbed.exec_command(
            ssh_client,
            exp.client.ip_wan,
            'sudo pkill -9 tcpdump')
                        
    with ExitStack() as stack:
        # add DNAT rule
        stack.enter_context(add_dnat_rule(exp, url_ip))
        # add route to URL
        stack.enter_context(add_route(exp, url_ip))
        # add dns entry
        stack.enter_context(add_dns_rule(exp, website, url_ip))
        exp._run_tcpdump('server', stack)
        # run the flow
        # turns out there is a bug when using subprocess and Popen in Python 3.5
        # so skip ping needs to be true
        # https://bugs.python.org/issue27122
        cctestbed.stop_bess()
        stack.enter_context(exp._run_bess(ping_source='server', skip_ping=False))
        # give bess some time to start
        time.sleep(5)
        exp._show_bess_pipeline()
        stack.enter_context(exp._run_bess_monitor())
        stack.enter_context(exp._run_rtt_monitor())
        with cctestbed.get_ssh_client(exp.server.ip_wan,
                                      exp.server.username,
                                      key_filename=exp.server.key_filename) as ssh_client:
            filename = os.path.basename(url)
            if filename.strip() == '':
                logging.warning('Could not get filename from URL')
            start_flow_cmd = 'timeout 65s wget --no-check-certificate --no-cache --delete-after --connect-timeout=10 --tries=3 --bind-address {}  -P /tmp/ "{}" || rm -f /tmp/{}.tmp*'.format(exp.server.ip_lan, url, filename)
            # won't return until flow is done
            flow_start_time = time.time()
            _, stdout, _ = cctestbed.exec_command(ssh_client, exp.server.ip_wan, start_flow_cmd)
            exit_status = stdout.channel.recv_exit_status()
            flow_end_time = time.time()
            logging.info('Flow ran for {} seconds'.format(flow_end_time - flow_start_time))
        exp._show_bess_pipeline()
        cmd = '/opt/bess/bessctl/bessctl command module queue0 get_status EmptyArg'
        print(cctestbed.run_local_command(cmd))

        logging.info('Dumping website data to log: {}'.format(exp.logs['website_log']))
        with open(exp.logs['website_log'], 'w') as f:
            website_info = {}
            website_info['website'] = website
            website_info['url'] = url
            website_info['website_rtt'] = website_rtt
            website_info['url_ip'] = url_ip
            website_info['flow_runtime'] = flow_end_time - flow_start_time 
            json.dump(website_info, f)

        if exit_status != 0:
            if exit_status == 124: # timeout exit status
                print('Timeout. Flow longer than 65s.')
                logging.warning('Timeout. Flow longer than 65s.')
            else:
                logging.error(stdout.read())
                raise RuntimeError('Error running flow.')

    proc = exp._compress_logs_url()
    return (proc, '{}-{}'.format(experiment_name, exp.exp_time))
예제 #6
0
def run_experiment(website1,
                   url1,
                   website2,
                   url2,
                   btlbw=10,
                   queue_size=128,
                   rtt=35,
                   force=False):
    experiment_name = '{}bw-{}rtt-{}q-{}-{}'.format(btlbw, rtt, queue_size,
                                                    website1, website2)
    if not force and is_completed_experiment(experiment_name):
        return
    else:
        if ran_experiment_today(experiment_name):
            return
    logging.info('Creating experiment for website1: {} website2: {}'.format(
        website1, website2))
    url_ip1 = get_website_ip(url1)
    url_ip2 = get_website_ip(url2)
    logging.info('Got website1 IP: {} website2 IP: {}'.format(
        url_ip1, url_ip2))
    website_rtt1 = int(float(get_nping_rtt(url_ip1)))
    website_rtt2 = int(float(get_nping_rtt(url_ip2)))
    logging.info('Got website1 RTT: {} website2 RTT: {}'.format(
        website_rtt1, website_rtt2))

    if website_rtt1 >= rtt:
        logging.warning(
            'Skipping experiment with website1 RTT {} >= {}'.format(
                website_rtt1, rtt))
        return -1
    elif website_rtt2 >= rtt:
        logging.warning(
            'Skipping experiment with website2 RTT {} >= {}'.format(
                website_rtt2, rtt))
        return -1

    client = HOST_CLIENT_TEMPLATE
    # TODO: Which IP should be used for client?
    client['ip_wan'] = url_ip1
    client = cctestbed.Host(**client)
    server = HOST_SERVER

    server_nat_ip = HOST_CLIENT.ip_wan  #'128.104.222.182'  taro
    server_port = 5201
    client_port = 5555

    flow1 = {
        'ccalg': 'reno',
        'end_time': 60,
        'rtt': rtt - website_rtt1,
        'start_time': 0
    }
    flow2 = {
        'ccalg': 'reno',
        'end_time': 60,
        'rtt': rtt - website_rtt2,
        'start_time': 0
    }
    flows = [
        cctestbed.Flow(ccalg=flow1['ccalg'],
                       start_time=flow1['start_time'],
                       end_time=flow1['end_time'],
                       rtt=flow1['rtt'],
                       server_port=server_port,
                       client_port=client_port,
                       client_log=None,
                       server_log=None),
        cctestbed.Flow(ccalg=flow2['ccalg'],
                       start_time=flow2['start_time'],
                       end_time=flow2['end_time'],
                       rtt=flow2['rtt'],
                       server_port=server_port,
                       client_port=client_port,
                       client_log=None,
                       server_log=None),
    ]

    exp = cctestbed.Experiment(
        name=experiment_name,
        btlbw=btlbw,
        queue_size=queue_size,
        flows=flows,
        server=server,
        client=client,
        config_filename='experiments-all-ccalgs-aws.yaml',
        server_nat_ip=server_nat_ip)

    logging.info('Running experiment: {}'.format(exp.name))

    # make sure tcpdump cleaned up
    logging.info('Making sure tcpdump is cleaned up')
    with cctestbed.get_ssh_client(
            exp.server.ip_wan,
            username=exp.server.username,
            key_filename=exp.server.key_filename) as ssh_client:
        cctestbed.exec_command(ssh_client, exp.client.ip_wan,
                               'sudo pkill -9 tcpdump')

    with ExitStack() as stack:
        # add DNAT rule
        stack.enter_context(add_dnat_rule(exp, url_ip1))
        stack.enter_context(add_dnat_rule(exp, url_ip2))
        # add route to URL
        stack.enter_context(add_route(exp, url_ip1))
        stack.enter_context(add_route(exp, url_ip2))
        # add dns entry
        stack.enter_context(add_dns_rule(exp, website1, url_ip1))
        stack.enter_context(add_dns_rule(exp, website2, url_ip2))
        exp._run_tcpdump('server', stack)
        # run the flow
        # turns out there is a bug when using subprocess and Popen in Python 3.5
        # so skip ping needs to be true
        # https://bugs.python.org/issue27122
        cctestbed.stop_bess()
        stack.enter_context(
            exp._run_bess(ping_source='server', skip_ping=False))
        # give bess some time to start
        time.sleep(5)
        exp._show_bess_pipeline()
        stack.enter_context(exp._run_bess_monitor())
        stack.enter_context(exp._run_rtt_monitor())
        with cctestbed.get_ssh_client(
                exp.server.ip_wan,
                exp.server.username,
                key_filename=exp.server.key_filename) as ssh_client:
            filename1 = os.path.basename(url1)
            filename2 = os.path.basename(url2)
            if filename1.strip() == '':
                logging.warning('Could not get filename from URL 1')
            if filename2.strip() == '':
                logging.warning('Could not get filename from URL 2')
            # Start first flow in background and second in foreground
            start_flow_cmd1 = 'timeout 65s wget --no-cache --delete-after --connect-timeout=10 --tries=3 --bind-address {}  -P /tmp/ {} || rm -f /tmp/{}.tmp* &'.format(
                exp.server.ip_lan, url1, filename1)
            start_flow_cmd2 = 'timeout 65s wget --no-cache --delete-after --connect-timeout=10 --tries=3 --bind-address {}  -P /tmp/ {} || rm -f /tmp/{}.tmp*'.format(
                exp.server.ip_lan, url2, filename2)
            # won't return until flow is done
            flow_start_time = time.time()
            _, _, _ = cctestbed.exec_command(ssh_client, exp.server.ip_wan,
                                             start_flow_cmd1)
            _, stdout, _ = cctestbed.exec_command(ssh_client,
                                                  exp.server.ip_wan,
                                                  start_flow_cmd2)
            exit_status = stdout.channel.recv_exit_status()
            flow_end_time = time.time()
            logging.info('Flow ran for {} seconds'.format(flow_end_time -
                                                          flow_start_time))
        exp._show_bess_pipeline()
        cmd = '/opt/bess/bessctl/bessctl command module queue0 get_status EmptyArg'
        print(cctestbed.run_local_command(cmd))
        if exit_status != 0:
            if exit_status == 124:  # timeout exit status
                print('Timeout. Flow longer than 65s.')
                logging.warning('Timeout. Flow longer than 65s.')
            else:
                logging.error(stdout.read())
                raise RuntimeError('Error running flow.')
    proc = exp._compress_logs_url()
    return (proc, exp.tar_filename, experiment_name)