コード例 #1
0
def add_route(exp, url_ip, video_url_ip, gateway_ip=None):
    with cctestbed.get_ssh_client(
            exp.server.ip_wan,
            exp.server.username,
            key_filename=exp.server.key_filename) as ssh_client:
        if gateway_ip is None:
            gateway_ip = exp.client.ip_lan
        add_route_cmd1 = 'sudo route add {} gw {}'.format(url_ip, gateway_ip)
        print("Route command :", add_route_cmd1)
        cctestbed.exec_command(ssh_client, exp.server.ip_wan, add_route_cmd1)

        #Add route for video data
        add_route_cmd2 = 'sudo route add {} gw {}'.format(
            video_url_ip, gateway_ip)
        print("Route command :", add_route_cmd2)
        cctestbed.exec_command(ssh_client, exp.server.ip_wan, add_route_cmd2)
    try:
        yield
    finally:
        with cctestbed.get_ssh_client(
                exp.server.ip_wan,
                exp.server.username,
                key_filename=exp.server.key_filename) as ssh_client:
            del_route_cmd1 = 'sudo route del {}'.format(url_ip)
            cctestbed.exec_command(ssh_client, exp.server.ip_wan,
                                   del_route_cmd1)
            del_route_cmd2 = 'sudo route del {}'.format(video_url_ip)
            cctestbed.exec_command(ssh_client, exp.server.ip_wan,
                                   del_route_cmd2)
コード例 #2
0
def add_dnat_rule(exp, url_ip, video_url_ip):
    with cctestbed.get_ssh_client(exp.server_nat_ip, exp.server.username,
                                  exp.server.key_filename) as ssh_client:
        print("NAT source {} destination {}", url_ip, exp.server.ip_lan)
        dnat_rule_cmd1 = 'sudo iptables -t nat -A PREROUTING -i enp1s0f0 --source {} -j DNAT --to-destination {}'.format(
            url_ip, exp.server.ip_lan)
        print("NAT IP {}", exp.server_nat_ip)
        cctestbed.exec_command(ssh_client, exp.server_nat_ip, dnat_rule_cmd1)

        #Add rule for video data
        print("NAT source {} destination {}", video_url_ip, exp.server.ip_lan)
        dnat_rule_cmd2 = 'sudo iptables -t nat -A PREROUTING -i enp1s0f0 --source {} -j DNAT --to-destination {}'.format(
            video_url_ip, exp.server.ip_lan)
        print("Video NAT IP {}", exp.server_nat_ip)
        cctestbed.exec_command(ssh_client, exp.server_nat_ip, dnat_rule_cmd2)

    try:
        yield
    finally:
        # remove DNAT rule once down with this context
        with cctestbed.get_ssh_client(exp.server_nat_ip, exp.server.username,
                                      exp.server.key_filename) as ssh_client:
            # TODO: remove hard coding of the ip addr here
            dnat_delete_cmd = 'sudo iptables -t nat --delete PREROUTING 1'
            cctestbed.exec_command(ssh_client, exp.server.ip_wan,
                                   dnat_delete_cmd)
            cctestbed.exec_command(ssh_client, exp.server.ip_wan,
                                   dnat_delete_cmd)
コード例 #3
0
def get_experiment(tarfile_remotepath):
    # check if tarfile already here
    # copy tarfile from remote machine to local machine (data directory)
    experiment_name = os.path.basename(tarfile_remotepath[:-len('.tar.gz')])
    tarfile_localpath = os.path.join(DATAPATH_RAW,
                                     '{}.tar.gz'.format(experiment_name))
    if not os.path.isfile(tarfile_localpath):
        with cctestbed.get_ssh_client(REMOTE_IP_ADDR,
                                      username=REMOTE_USERNAME) as ssh_client:
            sftp_client = ssh_client.open_sftp()
            try:
                print('Copying remotepath {} to localpath {}'.format(
                    tarfile_remotepath, tarfile_localpath))
                sftp_client.get(tarfile_remotepath, tarfile_localpath)
            finally:
                sftp_client.close()
    # get experiment description file & stored in processed data path
    experiment_description_filename = '{}.json'.format(experiment_name)
    experiment_description_localpath = os.path.join(
        DATAPATH_PROCESSED, experiment_description_filename)
    if not os.path.isfile(experiment_description_localpath):
        with untarfile(tarfile_localpath,
                       experiment_description_filename) as f:
            experiment_description = json.load(f)
        with open(experiment_description_localpath, 'w') as f:
            json.dump(experiment_description, f)
    else:
        with open(experiment_description_localpath) as f:
            experiment_description = json.load(f)
    experiment = Experiment(tarfile_localpath=tarfile_localpath,
                            **experiment_description)
    return experiment_name, experiment
コード例 #4
0
def add_route(exp, url_ip, gateway_ip=None):
    with cctestbed.get_ssh_client(exp.server.ip_wan,
                                  exp.server.username,
                                  key_filename=exp.server.key_filename) as ssh_client:
        if gateway_ip is None:
            gateway_ip = exp.client.ip_lan
        add_route_cmd = 'sudo route add {} gw {}'.format(url_ip, gateway_ip)
        cctestbed.exec_command(ssh_client, exp.server.ip_wan, add_route_cmd)
    try:
        yield
    finally:
        with cctestbed.get_ssh_client(exp.server.ip_wan,
                                      exp.server.username,
                                      key_filename=exp.server.key_filename) as ssh_client:
            del_route_cmd = 'sudo route del {}'.format(url_ip)
            cctestbed.exec_command(ssh_client, exp.server.ip_wan, del_route_cmd)
コード例 #5
0
def add_dns_rule(exp, website, url_ip):
    with cctestbed.get_ssh_client(exp.server.ip_wan,
                                  exp.server.username,
                                  key_filename=exp.server.key_filename) as ssh_client:
        add_dns_cmd = "echo '{}   {}' | sudo tee -a /etc/hosts".format(url_ip, website)
        cctestbed.exec_command(ssh_client, exp.server.ip_wan, add_dns_cmd)
    try:
        yield
    finally:
        with cctestbed.get_ssh_client(exp.server.ip_wan,
                                      exp.server.username,
                                      key_filename=exp.server.key_filename) as ssh_client:
            # will delete last line of /etc/hosts file
            # TODO: should probs check that it's the line we want to delete
            del_dns_cmd = "sudo sed -i '$ d' /etc/hosts"
            cctestbed.exec_command(ssh_client, exp.server.ip_wan, del_dns_cmd)
コード例 #6
0
def clean_tcpdump(exp):
    logging.info('Making sure tcpdump is cleaned up ')
    with cctestbed.get_ssh_client(
            exp.server.ip_wan,
            username=exp.server.username,
            key_filename=exp.server.key_filename) as ssh_client:
        cctestbed.exec_command(ssh_client, exp.server.ip_wan,
                               'sudo pkill -9 tcpdump')
コード例 #7
0
def add_dnat_rule(exp, url_ip):
    print('exp.server_nat_ip>', exp.server_nat_ip)
    with cctestbed.get_ssh_client(exp.server_nat_ip, exp.server.username,
                                  exp.server.key_filename) as ssh_client:
        dnat_rule_cmd = 'sudo iptables -t nat -A POSTROUTING --source {} -o enp1s0f0 -j SNAT --to {} && sudo iptables -t nat -A PREROUTING -i enp1s0f0 --source {} -j DNAT --to-destination {}'.format(
            HOST_SERVER.ip_lan, HOST_CLIENT.ip_wan, url_ip, exp.server.ip_lan)
        print("dnat_rule_cmd>>", dnat_rule_cmd)
        cctestbed.exec_command(ssh_client, exp.server_nat_ip, dnat_rule_cmd)
    try:
        yield
    finally:
        # remove DNAT rule once down with this context
        with cctestbed.get_ssh_client(exp.server_nat_ip, exp.server.username,
                                      exp.server.key_filename) as ssh_client:
            # TODO: remove hard coding of the ip addr here
            dnat_delete_cmd = 'sudo iptables -t nat --delete PREROUTING 1 && sudo iptables -t nat --delete POSTROUTING 1'
            cctestbed.exec_command(ssh_client, exp.server.ip_wan,
                                   dnat_delete_cmd)
コード例 #8
0
def test_experiment_run_tcpprobe(experiment):
    with ExitStack() as stack:
        pid = experiment._run_tcpprobe(stack)
        # check if kernel module loaded
        module_loaded_cmd = 'cat /proc/modules | grep tcp_probe_ray'
        ssh_client = mut.get_ssh_client(experiment.client.ip_wan)
        _, stdout, _ = ssh_client.exec_command(module_loaded_cmd)
        assert (stdout.channel.recv_exit_status() == 0)
        ssh_client.close()
        assert (is_remote_process_running(experiment.client.ip_wan, pid))
    assert (not is_remote_process_running(experiment.client.ip_wan, pid))
    # check if kernel module unloaded
    module_loaded_cmd = 'cat /proc/modules | grep tcp_probe_ray'
    ssh_client = mut.get_ssh_client(experiment.client.ip_wan)
    _, stdout, _ = ssh_client.exec_command(module_loaded_cmd)
    assert (stdout.channel.recv_exit_status() == 1)
    ssh_client.close()
    assert (os.path.isfile(experiment.logs['tcpprobe_log']))
    os.remove(experiment.logs['tcpprobe_log'])
コード例 #9
0
def add_dnat_rule(exp, url_ip):
    print("Server NAT IP:Username:keyfilename>>", exp.server_nat_ip,
          exp.server.username, exp.server.key_filename)
    with cctestbed.get_ssh_client(exp.server_nat_ip, exp.server.username,
                                  exp.server.key_filename) as ssh_client:
        dnat_rule_cmd = 'sudo iptables -t nat -A PREROUTING -i enp1s0f0 --source {} -j DNAT --to-destination {}'.format(
            url_ip, exp.server.ip_lan)
        print('dnat_rule_cmd:', dnat_rule_cmd)
        cctestbed.exec_command(ssh_client, exp.server_nat_ip, dnat_rule_cmd)
    try:
        yield
    finally:
        # remove DNAT rule once down with this context
        with cctestbed.get_ssh_client(exp.server_nat_ip, exp.server.username,
                                      exp.server.key_filename) as ssh_client:
            # TODO: remove hard coding of the ip addr here
            dnat_delete_cmd = 'sudo iptables -t nat --delete PREROUTING 1'
            cctestbed.exec_command(ssh_client, exp.server.ip_wan,
                                   dnat_delete_cmd)
コード例 #10
0
def start_apache_server(flow):
    # start apache server which is running on the cctestbed-client
    with cctestbed.get_ssh_client(
            flow.client.ip_wan,
            flow.client.username,
            key_filename=flow.client.key_filename) as ssh_client:
        # start_apache_cmd = "sudo service apache2 start"
        start_apache_cmd = "source /etc/environment ; sudo /tmp/ruk/loc/bin/apachectl -k start"
        cctestbed.exec_command(ssh_client, flow.client.ip_wan,
                               start_apache_cmd)
コード例 #11
0
def update_apache_config(host_client, server_port):
    #Add listener ports to config file
    print("update httpd.conf with listener port > ", server_port)
    # cmd = 'ssh -o StrictHostKeyChecking=no cctestbed-client "echo $"Listen {}:{}\n" | sudo tee -a /tmp/ruk/loc/conf/httpd.conf"'.format(host_client.ip_lan, server_port)
    # proc = subprocess.run(cmd, shell=True)
    with cctestbed.get_ssh_client(
            host_client.ip_wan,
            host_client.username,
            key_filename=host_client.key_filename) as ssh_client:
        cmd = 'echo Listen {}:{} | sudo tee -a /tmp/ruk/loc/conf/httpd.conf'.format(
            host_client.ip_lan, server_port)
        cctestbed.exec_command(ssh_client, host_client.ip_wan, cmd)
コード例 #12
0
def stop_local_server_and_cleanup(exp):
    print("stop_local_server_and_cleanup")
    flow = exp.flows[0]  #TODO:Pick a local website or local video service flow
    with cctestbed.get_ssh_client(
            flow.client.ip_wan,
            flow.client.username,
            key_filename=flow.client.key_filename) as ssh_client:
        stop_apache_cmd = "sudo /tmp/ruk/loc/bin/apachectl -k stop"
        unset_env_var = "unset APACHE_CCA_PORTS"
        remove_listener_ports = "cd /tmp/ruk/loc/conf && sed -i.bak '/^Listen/d' httpd.conf"
        cctestbed.exec_command(ssh_client, flow.client.ip_wan, stop_apache_cmd)
        cctestbed.exec_command(ssh_client, flow.client.ip_wan, unset_env_var)
        cctestbed.exec_command(ssh_client, flow.client.ip_wan,
                               remove_listener_ports)
コード例 #13
0
def set_env_with_congestion(host_client, env_ccas_with_ports):
    #Set an environment variable with ports and their respective ccas
    # os.environ["APACHE_CCA_PORTS"] = env_ccas_with_ports
    # print(os.environ["APACHE_CCA_PORTS"])

    cmd = 'echo APACHE_CCA_PORTS={} | sudo tee -a /etc/environment'.format(
        env_ccas_with_ports)
    # cmd = 'sudo sh -c echo APACHE_CCA_PORTS={} >> /etc/environment'.format(env_ccas_with_ports)
    cmd = cmd + " ; source /etc/environment ; export APACHE_CCA_PORTS"
    print("running cmd >", cmd)
    with cctestbed.get_ssh_client(
            host_client.ip_wan,
            host_client.username,
            key_filename=host_client.key_filename) as ssh_client:
        cctestbed.exec_command(ssh_client, host_client.ip_wan, cmd)
コード例 #14
0
ファイル: flow_impl.py プロジェクト: arukshani/tmp-cctestbed
def start_single_web_video_flow(exp, stack, url, duration):
    with cctestbed.get_ssh_client(exp.server.ip_wan, exp.server.username, key_filename=exp.server.key_filename) as ssh_client:
        # start_flow_cmd = 'timeout {}s /tmp/chrome-linux/chrome --headless --ssl-key-log-file=/users/rukshani/SSLkeylogs/sslkeylog-{}-{}.log --remote-debugging-port=9222 --autoplay-policy=no-user-gesture-required --window-size={},{} --start-maximized {}'.format(duration+5, exp.name, exp.exp_time, 1920, 1080, url)
        start_flow_cmd = 'timeout {}s google-chrome  --headless --flag-switches-begin --disable-quic --flag-switches-end --ssl-key-log-file=/users/rukshani/SSLkeylogs/sslkeylog-{}-{}.log --remote-debugging-port=9222 --autoplay-policy=no-user-gesture-required --window-size={},{} --start-maximized {}'.format(duration+5, exp.name, exp.exp_time, 3840, 2160, url)
        
        print("start_flow_cmd>>",start_flow_cmd)
        # won't return until flow is done
        flow_start_time = time.time()
        exp.logs['ssl_key_log'] = '/users/rukshani/SSLkeylogs/sslkeylog-{}-{}.log'.format(exp.name, exp.exp_time)
        ssl_key_log_cmd = 'export SSLKEYLOGFILE=/users/rukshani/SSLkeylogs/sslkeylog-{}-{}.log'.format(exp.name, exp.exp_time)
        _, stdout, _ = cctestbed.exec_command(ssh_client, exp.server.ip_wan, ssl_key_log_cmd) #This is to decrypt traffic
        _, stdout, _ = cctestbed.exec_command(ssh_client, exp.server.ip_wan, 'source /etc/environment')
        _, stdout, _ = cctestbed.exec_command(ssh_client, exp.server.ip_wan, start_flow_cmd)
        # exit_status = stdout.channel.recv_exit_status()
        return (stdout, flow_start_time)
コード例 #15
0
ファイル: flow_impl.py プロジェクト: arukshani/tmp-cctestbed
def start_bess_for_website(exp, duration, web_data):
    with ExitStack() as stack:
        print(web_data)
        stack.enter_context(util.add_dnat_rule(exp, web_data['url_ip']))
        stack.enter_context(util.add_route(exp, web_data['url_ip']))
        stack.enter_context(util.add_dns_rule(exp, web_data['website'], web_data['url_ip']))
        exp._run_tcpdump('server', stack)
        # run the flow
        # turns out there is a bug when using subprocess and Popen in Python 3.5
        # so skip ping needs to be true
        # https://bugs.python.org/issue27122
        cctestbed.stop_bess()
        stack.enter_context(exp._run_bess(ping_source='server', skip_ping=False, bess_config_name='active-middlebox-pmd-fairness'))
        # give bess some time to start
        time.sleep(5)
        exp._show_bess_pipeline()
        stack.enter_context(exp._run_bess_monitor())
        stack.enter_context(exp._run_rtt_monitor())
        ssh_client = cctestbed.get_ssh_client(exp.server.ip_wan, exp.server.username, key_filename=exp.server.key_filename)
        
        with ssh_client as ssh_client:
            start_website_flows(ssh_client, exp, stack)
            # exit_status = stdout.channel.recv_exit_status()
            time.sleep(duration+5)
        # flow_end_time = time.time()
        logging.info('Flow ran for {} seconds'.format(duration+5))

        exp._show_bess_pipeline()
        cmd = '/opt/bess/bessctl/bessctl command module queue0 get_status EmptyArg'
        print(cctestbed.run_local_command(cmd))

        logging.info('Dumping website data to log: {}'.format(exp.logs['website_log']))
        with open(exp.logs['website_log'], 'w') as f:
            website_info = {}
            website_info['website'] = web_data['website']
            website_info['url'] = web_data['url']
            website_info['website_rtt'] = web_data['website_rtt']
            website_info['experiment_rtt'] = web_data['experiment_rtt']
            website_info['delay'] = web_data['delay']
            website_info['url_ip'] = web_data['url_ip']
            website_info['flow_runtime'] = duration+5
            json.dump(website_info, f)
コード例 #16
0
ファイル: experiment.py プロジェクト: arukshani/tmp-cctestbed
def load_experiments(experiment_name_patterns, remote=True, force_local=False,
                        remote_username=REMOTE_USERNAME, remote_ip=REMOTE_IP_ADDR,
                        load_queue=False, clean=False, parallel=True,
                        min_num_files=0, min_date=None, remove_duplicates=True):
    """Load all experiments into experiment analyzers
    experiment_name_pattern : list of str
        Should be a pattern that will be called
        with '{}.tar.gz'.format(experiment_name_pattern)
    remote : bool, (default: True)
        If True, look for experiments remotely.
        If False, don't look for experiments remotely,
        only locally.
    force_local : bool, (default: False)
        If True, always look for local experiments.
        If False, only look for local experiments,
        if no remote experiments are found.
    clean: bool
        If True, delete all local files matching this exp_name_pattern
        before downloading again.
    parallel: bool
        If True, run download for experiments in parallel
    min_num_files: int
        If greater than 0, then expected to get atleast this number of files
    min_date: string
        Only return experiments with equal to or large than the expected date
    remove_duplicates: bool
        Remove experiments with the same name, keeping the most recent one
    """
    assert(type(experiment_name_patterns) is list)
    tarfile_remotepaths = []
    # i feel like this code is too dangerous since there is a rm command ...
    if clean:
        for experiment_name_pattern in experiment_name_patterns:
            print('Deleting local files matching experiment pattern: {}'.format(experiment_name_pattern))
            run_local_command('rm {}.h5'.format(os.path.join(DATAPATH_PROCESSED, experiment_name_pattern)))
    if remote:
        print('Searching for experiments on remote machine: {}'.format(remote_ip))
        with get_ssh_client(ip_addr=remote_ip, username=remote_username) as ssh_client:
            '''
            for experiment_name_pattern in experiment_name_patterns:
                _, stdout, _ = ssh_client.exec_command(
                    'ls -1 /tmp/{}.tar.gz'.format(experiment_name_pattern))
                tarfile_remotepaths += [filename.strip()
                                        for filename in stdout.readlines()]
            '''
            cmd = 'ls -1 ' + ' '.join(['/tmp/{}.tar.gz']*len(experiment_name_patterns)).format(*experiment_name_patterns)
            print(cmd)
            _, stdout, _ = ssh_client.exec_command(cmd)
            tarfile_remotepaths += [filename.strip() for filename in stdout.readlines()]
        print('Found {} experiment(s) on remote machine: {}'.format(
            len(tarfile_remotepaths), tarfile_remotepaths))
    else:
        print('Not searching remote machine for experiments.')

    if force_local or len(tarfile_remotepaths) == 0:
        num_local_files = 0
        for experiment_name_pattern in experiment_name_patterns:
            local_filepaths = glob.glob(os.path.join(DATAPATH_RAW,
                                                     experiment_name_pattern +'.tar.gz'))
            tarfile_remotepaths += local_filepaths
            num_local_files += len(local_filepaths)
        if len(tarfile_remotepaths) == 0:
            raise ValueError(('Found no experiments on remote or local machine '
                            '{} with name pattern {}').format(
                                remote_ip, experiment_name_pattern))
        if num_local_files > 0:
            print('Found {} experiment(s) on local machine: {}'.format(num_local_files,
                                                                        tarfile_remotepaths[-num_local_files:]))
        else:
            print('Found 0 experiment(s) on local machines.')

    if min_date is not None:
        # copy file so we iterate over list and modify it
        remotepaths = tarfile_remotepaths[:]
        num_wrong_date = 0
        for remotepath in remotepaths:
            date = os.path.basename(remotepath).split('-')[-1]
            if date < min_date:
                num_wrong_date += 1
                tarfile_remotepaths.remove(remotepath)
        if num_wrong_date > 0:
            print('Found {} experiment(s) with date smaller than {}.'.format(num_wrong_date, min_date))

    if remove_duplicates:
        # keep only most recent experiments with the same name
        tmp = pd.DataFrame(tarfile_remotepaths)
        num_duplicates = len(tmp)
        tarfile_remotepaths = tmp.loc[tmp[0].sort_values().apply(lambda x: '-'.join(x.split('-')[:-1])).drop_duplicates(keep='last').index][0].tolist()
        num_duplicates = num_duplicates - len(tarfile_remotepaths)
        if num_duplicates > 0:
            print('Found {} experiment(s) with duplicate prefixes.'.format(num_duplicates))


    if min_num_files > 0:
        if len(tarfile_remotepaths) < min_num_files:
            print('Wanted min number of {} experiment(s), but only found {}.'.format(min_num_files, len(tarfile_remotepaths)))
            tarfile_remotepaths = []

    #experiments = {}
    num_proc = 10
    num_tarfiles = len(tarfile_remotepaths)
    num_tarfiles_per_process = int(num_tarfiles / num_proc) + 1
    if parallel and num_tarfiles > 1:
            with mp.Pool(num_proc) as pool:
                analyzers = pool.starmap(get_experiment, zip(tarfile_remotepaths,
                                                            it.repeat(remote_ip, num_tarfiles),
                                                            it.repeat(remote_username, num_tarfiles),
                                                            it.repeat(load_queue, num_tarfiles)),
                                                            chunksize=num_tarfiles_per_process)
    else:
        analyzers = [get_experiment(tarfile_remotepath, remote_ip, remote_username, load_queue) for tarfile_remotepath in tarfile_remotepaths]
    experiment_analyzers = ExperimentAnalyzers()
    for analyzer in analyzers:
        experiment_analyzers['{}-{}'.format(analyzer.experiment.name,
                                            analyzer.experiment.exp_time)] = analyzer
    return experiment_analyzers
コード例 #17
0
def run_experiment(website, url, btlbw=10, queue_size=128, rtt=35, force=False):
    experiment_name = '{}bw-{}rtt-{}q-{}'.format(btlbw, rtt, queue_size, website)
    if not force and is_completed_experiment(experiment_name):
        return (None, '')
    logging.info('Creating experiment for website: {}'.format(website))
    url_ip = get_website_ip(url)
    logging.info('Got website IP: {}'.format(url_ip))
    website_rtt = int(float(get_nping_rtt(url_ip)))
    logging.info('Got website RTT: {}'.format(website_rtt))

    if website_rtt >= rtt:
        logging.warning('Skipping experiment with website RTT {} >= {}'.format(
            website_rtt, rtt))
        return (-1, '')

    client = HOST_CLIENT_TEMPLATE
    client['ip_wan'] = url_ip
    client = cctestbed.Host(**client)
    server = HOST_SERVER
    
    server_nat_ip = HOST_CLIENT.ip_wan #'128.104.222.182'  taro
    server_port = 5201
    client_port = 5555

    flow = {'ccalg': 'reno',
            'end_time': 60,
            'rtt': rtt - website_rtt,
            'start_time': 0}
    flows = [cctestbed.Flow(ccalg=flow['ccalg'], start_time=flow['start_time'],
                            end_time=flow['end_time'], rtt=flow['rtt'],
                            server_port=server_port, client_port=client_port,
                            client_log=None, server_log=None, kind='website',
                            client=client)]
    
    exp = cctestbed.Experiment(name=experiment_name,
                     btlbw=btlbw,
                     queue_size=queue_size,
                     flows=flows, server=server, client=client,
                     config_filename='experiments-all-ccalgs-aws.yaml',
                     server_nat_ip=server_nat_ip)
    
    logging.info('Running experiment: {}'.format(exp.name))

    # make sure tcpdump cleaned up
    logging.info('Making sure tcpdump is cleaned up')
    with cctestbed.get_ssh_client(
            exp.server.ip_wan,
            username=exp.server.username,
            key_filename=exp.server.key_filename) as ssh_client:
        cctestbed.exec_command(
            ssh_client,
            exp.client.ip_wan,
            'sudo pkill -9 tcpdump')
                        
    with ExitStack() as stack:
        # add DNAT rule
        stack.enter_context(add_dnat_rule(exp, url_ip))
        # add route to URL
        stack.enter_context(add_route(exp, url_ip))
        # add dns entry
        stack.enter_context(add_dns_rule(exp, website, url_ip))
        exp._run_tcpdump('server', stack)
        # run the flow
        # turns out there is a bug when using subprocess and Popen in Python 3.5
        # so skip ping needs to be true
        # https://bugs.python.org/issue27122
        cctestbed.stop_bess()
        stack.enter_context(exp._run_bess(ping_source='server', skip_ping=False))
        # give bess some time to start
        time.sleep(5)
        exp._show_bess_pipeline()
        stack.enter_context(exp._run_bess_monitor())
        stack.enter_context(exp._run_rtt_monitor())
        with cctestbed.get_ssh_client(exp.server.ip_wan,
                                      exp.server.username,
                                      key_filename=exp.server.key_filename) as ssh_client:
            filename = os.path.basename(url)
            if filename.strip() == '':
                logging.warning('Could not get filename from URL')
            start_flow_cmd = 'timeout 65s wget --no-check-certificate --no-cache --delete-after --connect-timeout=10 --tries=3 --bind-address {}  -P /tmp/ "{}" || rm -f /tmp/{}.tmp*'.format(exp.server.ip_lan, url, filename)
            # won't return until flow is done
            flow_start_time = time.time()
            _, stdout, _ = cctestbed.exec_command(ssh_client, exp.server.ip_wan, start_flow_cmd)
            exit_status = stdout.channel.recv_exit_status()
            flow_end_time = time.time()
            logging.info('Flow ran for {} seconds'.format(flow_end_time - flow_start_time))
        exp._show_bess_pipeline()
        cmd = '/opt/bess/bessctl/bessctl command module queue0 get_status EmptyArg'
        print(cctestbed.run_local_command(cmd))

        logging.info('Dumping website data to log: {}'.format(exp.logs['website_log']))
        with open(exp.logs['website_log'], 'w') as f:
            website_info = {}
            website_info['website'] = website
            website_info['url'] = url
            website_info['website_rtt'] = website_rtt
            website_info['url_ip'] = url_ip
            website_info['flow_runtime'] = flow_end_time - flow_start_time 
            json.dump(website_info, f)

        if exit_status != 0:
            if exit_status == 124: # timeout exit status
                print('Timeout. Flow longer than 65s.')
                logging.warning('Timeout. Flow longer than 65s.')
            else:
                logging.error(stdout.read())
                raise RuntimeError('Error running flow.')

    proc = exp._compress_logs_url()
    return (proc, '{}-{}'.format(experiment_name, exp.exp_time))
コード例 #18
0
def get_ssh_client_for_server_node(exp):
    return cctestbed.get_ssh_client(exp.server.ip_wan,
                                    exp.server.username,
                                    key_filename=exp.server.key_filename)
コード例 #19
0
def load_experiments(experiment_name_patterns,
                     remote=True,
                     force_local=False,
                     remote_username=REMOTE_USERNAME,
                     remote_ip=REMOTE_IP_ADDR):
    """
    experiment_name_pattern : list of str
        Should be a pattern that will be called with '{}.tar.gz'.format(experiment_name_pattern)
    remote : bool, (default: True)
        If True, look for experiments remotely. If False, don't look for experiments remotely,
        only locally.
    force_local : bool, (default: False)
        If True, always look for local experiments. If False, only look for local experiments,
        if no remote experiments are found.
    """
    assert (type(experiment_name_patterns) is list)
    tarfile_remotepaths = []
    if remote:
        print('Searching for experiments on remote machine: {}'.format(
            remote_ip))
        with cctestbed.get_ssh_client(ip_addr=remote_ip,
                                      username=remote_username) as ssh_client:
            for experiment_name_pattern in experiment_name_patterns:
                _, stdout, _ = ssh_client.exec_command(
                    'ls -1 /tmp/{}.tar.gz'.format(experiment_name_pattern))
                tarfile_remotepaths += [
                    filename.strip() for filename in stdout.readlines()
                ]
        print('Found {} experiment(s) on remote machine: {}'.format(
            len(tarfile_remotepaths), tarfile_remotepaths))
    else:
        print('Not searching remote machine for experiments.')

    if force_local or len(tarfile_remotepaths) == 0:
        num_local_files = 0
        for experiment_name_pattern in experiment_name_patterns:
            local_filepaths = glob.glob(
                os.path.join(DATAPATH_RAW, experiment_name_pattern))
            tarfile_remotepaths += local_filepaths
            num_local_files += len(local_filepaths)
        if len(tarfile_remotepaths) == 0:
            raise ValueError(
                ('Found no experiments on remote or local machine '
                 '{} with name pattern {}').format(REMOTE_IP_ADDR,
                                                   experiment_name_pattern))
        if num_local_files > 0:
            print('Found {} experiment(s) on local machine: {}'.format(
                num_local_files, tarfile_remotepaths[-num_local_files:]))
        else:
            print('Found 0 experiment(s) on local machines.')

    #experiments = {}
    with mp.Pool(10) as pool:
        experiments = pool.map(get_experiment, tarfile_remotepaths)
    #for tarfile_remotepath in tarfile_remotepaths:
    #    experiment_name, exp = get_experiment(tarfile_remotepath)
    #    experiments[experiment_name] = exp
    experiment_analyzers = {
        experiment_name: ExperimentAnalyzer(experiment)
        for experiment_name, experiment in experiments
    }  #experiments.items()}
    return experiment_analyzers
コード例 #20
0
def run_experiment(website1,
                   url1,
                   website2,
                   url2,
                   btlbw=10,
                   queue_size=128,
                   rtt=35,
                   force=False):
    experiment_name = '{}bw-{}rtt-{}q-{}-{}'.format(btlbw, rtt, queue_size,
                                                    website1, website2)
    if not force and is_completed_experiment(experiment_name):
        return
    else:
        if ran_experiment_today(experiment_name):
            return
    logging.info('Creating experiment for website1: {} website2: {}'.format(
        website1, website2))
    url_ip1 = get_website_ip(url1)
    url_ip2 = get_website_ip(url2)
    logging.info('Got website1 IP: {} website2 IP: {}'.format(
        url_ip1, url_ip2))
    website_rtt1 = int(float(get_nping_rtt(url_ip1)))
    website_rtt2 = int(float(get_nping_rtt(url_ip2)))
    logging.info('Got website1 RTT: {} website2 RTT: {}'.format(
        website_rtt1, website_rtt2))

    if website_rtt1 >= rtt:
        logging.warning(
            'Skipping experiment with website1 RTT {} >= {}'.format(
                website_rtt1, rtt))
        return -1
    elif website_rtt2 >= rtt:
        logging.warning(
            'Skipping experiment with website2 RTT {} >= {}'.format(
                website_rtt2, rtt))
        return -1

    client = HOST_CLIENT_TEMPLATE
    # TODO: Which IP should be used for client?
    client['ip_wan'] = url_ip1
    client = cctestbed.Host(**client)
    server = HOST_SERVER

    server_nat_ip = HOST_CLIENT.ip_wan  #'128.104.222.182'  taro
    server_port = 5201
    client_port = 5555

    flow1 = {
        'ccalg': 'reno',
        'end_time': 60,
        'rtt': rtt - website_rtt1,
        'start_time': 0
    }
    flow2 = {
        'ccalg': 'reno',
        'end_time': 60,
        'rtt': rtt - website_rtt2,
        'start_time': 0
    }
    flows = [
        cctestbed.Flow(ccalg=flow1['ccalg'],
                       start_time=flow1['start_time'],
                       end_time=flow1['end_time'],
                       rtt=flow1['rtt'],
                       server_port=server_port,
                       client_port=client_port,
                       client_log=None,
                       server_log=None),
        cctestbed.Flow(ccalg=flow2['ccalg'],
                       start_time=flow2['start_time'],
                       end_time=flow2['end_time'],
                       rtt=flow2['rtt'],
                       server_port=server_port,
                       client_port=client_port,
                       client_log=None,
                       server_log=None),
    ]

    exp = cctestbed.Experiment(
        name=experiment_name,
        btlbw=btlbw,
        queue_size=queue_size,
        flows=flows,
        server=server,
        client=client,
        config_filename='experiments-all-ccalgs-aws.yaml',
        server_nat_ip=server_nat_ip)

    logging.info('Running experiment: {}'.format(exp.name))

    # make sure tcpdump cleaned up
    logging.info('Making sure tcpdump is cleaned up')
    with cctestbed.get_ssh_client(
            exp.server.ip_wan,
            username=exp.server.username,
            key_filename=exp.server.key_filename) as ssh_client:
        cctestbed.exec_command(ssh_client, exp.client.ip_wan,
                               'sudo pkill -9 tcpdump')

    with ExitStack() as stack:
        # add DNAT rule
        stack.enter_context(add_dnat_rule(exp, url_ip1))
        stack.enter_context(add_dnat_rule(exp, url_ip2))
        # add route to URL
        stack.enter_context(add_route(exp, url_ip1))
        stack.enter_context(add_route(exp, url_ip2))
        # add dns entry
        stack.enter_context(add_dns_rule(exp, website1, url_ip1))
        stack.enter_context(add_dns_rule(exp, website2, url_ip2))
        exp._run_tcpdump('server', stack)
        # run the flow
        # turns out there is a bug when using subprocess and Popen in Python 3.5
        # so skip ping needs to be true
        # https://bugs.python.org/issue27122
        cctestbed.stop_bess()
        stack.enter_context(
            exp._run_bess(ping_source='server', skip_ping=False))
        # give bess some time to start
        time.sleep(5)
        exp._show_bess_pipeline()
        stack.enter_context(exp._run_bess_monitor())
        stack.enter_context(exp._run_rtt_monitor())
        with cctestbed.get_ssh_client(
                exp.server.ip_wan,
                exp.server.username,
                key_filename=exp.server.key_filename) as ssh_client:
            filename1 = os.path.basename(url1)
            filename2 = os.path.basename(url2)
            if filename1.strip() == '':
                logging.warning('Could not get filename from URL 1')
            if filename2.strip() == '':
                logging.warning('Could not get filename from URL 2')
            # Start first flow in background and second in foreground
            start_flow_cmd1 = 'timeout 65s wget --no-cache --delete-after --connect-timeout=10 --tries=3 --bind-address {}  -P /tmp/ {} || rm -f /tmp/{}.tmp* &'.format(
                exp.server.ip_lan, url1, filename1)
            start_flow_cmd2 = 'timeout 65s wget --no-cache --delete-after --connect-timeout=10 --tries=3 --bind-address {}  -P /tmp/ {} || rm -f /tmp/{}.tmp*'.format(
                exp.server.ip_lan, url2, filename2)
            # won't return until flow is done
            flow_start_time = time.time()
            _, _, _ = cctestbed.exec_command(ssh_client, exp.server.ip_wan,
                                             start_flow_cmd1)
            _, stdout, _ = cctestbed.exec_command(ssh_client,
                                                  exp.server.ip_wan,
                                                  start_flow_cmd2)
            exit_status = stdout.channel.recv_exit_status()
            flow_end_time = time.time()
            logging.info('Flow ran for {} seconds'.format(flow_end_time -
                                                          flow_start_time))
        exp._show_bess_pipeline()
        cmd = '/opt/bess/bessctl/bessctl command module queue0 get_status EmptyArg'
        print(cctestbed.run_local_command(cmd))
        if exit_status != 0:
            if exit_status == 124:  # timeout exit status
                print('Timeout. Flow longer than 65s.')
                logging.warning('Timeout. Flow longer than 65s.')
            else:
                logging.error(stdout.read())
                raise RuntimeError('Error running flow.')
    proc = exp._compress_logs_url()
    return (proc, exp.tar_filename, experiment_name)