Ejemplo n.º 1
0
    def _run_tcpdump(self, host, stack, capture_http=False):
        if capture_http:
            assert(host == 'server')
            start_tcpdump_cmd = (
                "tcpdump -n --packet-buffered "
                "--interface={} "
                "-w {} "
                "-s 0 -A 'tcp[((tcp[12:1] & 0xf0) >> 2):4] = 0x47455420' ") 
            start_tcpdump_cmd = start_tcpdump_cmd.format(
                self.server.ifname_remote,
                self.logs['http_log'])
            tcpdump_logs = [self.logs['http_log']]
            start_tcpdump = RemoteCommand(start_tcpdump_cmd,
                                          self.server.ip_wan,
                                          logs=tcpdump_logs,
                                          sudo=True,
                                          username=self.server.username,
                                          key_filename=self.server.key_filename,
                                          pgrep_string=self.logs['http_log'])
            return stack.enter_context(start_tcpdump())
        else:
            start_tcpdump_cmd = ('tcpdump -n --packet-buffered '
                                 '--snapshot-length=65535 '
                                 '--interface={} '
                                 '-w {}')
        if host == 'server':
            start_tcpdump_cmd = start_tcpdump_cmd.format(
                self.server.ifname_remote,
                self.logs['server_tcpdump_log'])

            tcpdump_logs = [self.logs['server_tcpdump_log']]
            start_tcpdump = RemoteCommand(start_tcpdump_cmd,
                                          self.server.ip_wan,
                                          logs=tcpdump_logs,
                                          sudo=True,
                                          username=self.server.username,
                                          key_filename=self.server.key_filename)
        elif host == 'client':
            start_tcpdump_cmd = start_tcpdump_cmd.format(
                self.client.ifname_remote,
                self.logs['client_tcpdump_log'])
            tcpdump_logs = [self.logs['client_tcpdump_log']]
            start_tcpdump = RemoteCommand(start_tcpdump_cmd,
                                          self.client.ip_wan,
                                          logs=tcpdump_logs,
                                          sudo=True,
                                          username=self.client.username,
                                          key_filename=self.client.key_filename)
        else:
            raise ValueError('Expected either server or client to host')
        return stack.enter_context(start_tcpdump())
Ejemplo n.º 2
0
 def _run_tcpprobe(self, stack):
     # assumes that tcp_bbr_measure is installed @ /opt/tcp_bbr_measure on iperf client
     insmod_cmd = ('sudo insmod '
                   '/opt/cctestbed/tcp_bbr_measure/tcp_probe_ray.ko port=0 full=1 '
                   '&& sudo chmod 444 /proc/net/tcpprobe ')
     with get_ssh_client(self.client.ip_wan,
                         username=self.client.username,
                         key_filename=self.client.key_filename) as ssh_client:
         logging.info('Running cmd ({}): {}'.format(self.client.ip_wan,
                                                    insmod_cmd))
         _, stdout, stderr = ssh_client.exec_command(insmod_cmd)
         exit_status = stdout.channel.recv_exit_status()
         if exit_status != 0:
             raise RuntimeError(
                 'Got a non-zero exit status running cmd: {}.\n{}'.format(
                     insmod_cmd, stderr.read()))
         
     try:
         start_tcpprobe_cmd = 'cat /proc/net/tcpprobe'
         start_tcpprobe = RemoteCommand(start_tcpprobe_cmd,
                                        self.client.ip_wan,
                                        stdout = self.logs['tcpprobe_log'],
                                        stderr = self.logs['tcpprobe_log'],
                                        logs=[self.logs['tcpprobe_log']],
                                        cleanup_cmd='sudo rmmod tcp_probe_ray',
                                        username=self.client.username,
                                        key_filename=self.client.key_filename)
     except:
         # need to still rmmod if we can't create the remote command
         # for some reason
         with get_ssh_client(self.client.ip_wan,
                             username=self.client.username,
                             key_filename=self.client.key_filename) as ssh_client:
             ssh_client.exec_command('sudo rmmod tcp_probe_ray')
     return stack.enter_context(start_tcpprobe())
Ejemplo n.º 3
0
    def _start_video_flow(self, flow, stack):
        # start apache server which is running on the cctestbed-client
        with get_ssh_client(
                flow.client.ip_wan,
                flow.client.username,
                key_filename=flow.client.key_filename) as ssh_client:
            start_apache_cmd = "sudo service apache2 start"
            exec_command(ssh_client, flow.client.ip_wan, start_apache_cmd)
        # change default cclag for client
        with get_ssh_client(
                flow.client.ip_wan,
                flow.client.username,
                key_filename=flow.client.key_filename) as ssh_client:
            change_ccalg = 'echo {} | sudo tee /proc/sys/net/ipv4/tcp_congestion_control'.format(
                flow.ccalg)
            exec_command(ssh_client, flow.client.ip_wan, change_ccalg)

        #TODO: should change ccalg back to default after running flow

        # delay flow start for start time plus 3 seconds
        web_download_cmd = 'timeout {}s google-chrome --disable-gpu --headless --remote-debugging-port=9222 --autoplay-policy=no-user-gesture-required "http://{}:1234/"'.format(
            flow.end_time, self.client.ip_lan)
        start_download = RemoteCommand(web_download_cmd,
                                       self.server.ip_wan,
                                       username=self.server.username,
                                       key_filename=self.server.key_filename,
                                       pgrep_string='google-chrome'.format(
                                           self.client.ip_lan))
        stack.enter_context(start_download())
        return start_download
Ejemplo n.º 4
0
    def _run_all_flows(self, stack, bess_config_name='active-middlebox-pmd'):
        # get wait times for each flow
        if len(self.flows) > 1:
            wait_times = self.get_wait_times()
        else:
            wait_times = [0]
        # run bess and monitor
        stack.enter_context(self._run_bess(bess_config_name=bess_config_name))
        # give bess some time to start
        time.sleep(3)
        self._show_bess_pipeline()
        stack.enter_context(self._run_bess_monitor())
        for flow in self.flows:
            start_server_cmd = ('iperf3 --server '
                                '--bind {} '
                                '--port {} '
                                '--one-off '
                                '--affinity {} '
                                '--logfile {} ').format(
                                    self.server.ip_lan,
                                    flow.server_port,
                                    1,
                                    flow.server_log)
            start_server = RemoteCommand(start_server_cmd,
                                         self.server.ip_wan,
                                         username=self.server.username,
                                         logs=[flow.server_log],
                                         key_filename=self.server.key_filename)
            stack.enter_context(start_server())

        for idx, flow in enumerate(self.flows):
            # make sure first flow runs for the whole time regardless of start time
            # note this assumes self.flows is sorted by start time
            flow_duration = flow.end_time - flow.start_time
            if idx == 0:
                flow_duration = flow.end_time
            start_client_cmd = ('iperf3 --client {} '
                                '--port {} '
                                '--verbose '
                                '--bind {} '
                                '--cport {} '
                                '--linux-congestion {} '
                                '--interval 0.5 '
                                '--time {} '
                                #'--length 1024K '#1024K '
                                '--affinity {} '
                                #'--set-mss 500 ' # default is 1448
                                #'--window 100K '
                                '--zerocopy '
                                '--json '
                                '--logfile {} ').format(self.server_nat_ip,
                                                        flow.server_port,
                                                        self.client.ip_lan,
                                                        flow.client_port,
                                                        flow.ccalg,
                                                        flow_duration,
                                                        idx % 32,
                                                        flow.client_log)
            start_client = RemoteCommand(start_client_cmd,
                                         self.client.ip_wan,
                                         username=self.client.username,
                                         logs=[flow.client_log],
                                         key_filename=self.client.key_filename)
            
            logging.info('Sleep for {}s before starting flow with start time {}'.format(
                    wait_times[idx], flow.start_time))
            time.sleep(wait_times[idx])
            stack.enter_context(start_client())
        # last flow should be the last one
        sleep_time = flow.end_time - flow.start_time + 1
        logging.info('Sleep for {}s'.format(sleep_time))
        time.sleep(sleep_time) 
        self._show_bess_pipeline()
Ejemplo n.º 5
0
    def _run_all_flows(self, stack, bess_config_name='active-middlebox-pmd'):
        #TODO: If multiple video flows are among the flows, skip them and give a warning.
        #TODO: Enforce only 1 video flow to be run with a given CCA.
        # get wait times for each flow
        if len(self.flows) > 1:
            wait_times = self.get_wait_times()
        else:
            wait_times = [0]
        # run bess and monitor
        stack.enter_context(self._run_bess(bess_config_name=bess_config_name))
        # give bess some time to start
        time.sleep(3)
        self._show_bess_pipeline()
        stack.enter_context(self._run_bess_monitor())
        for flow in self.flows:
            if flow.kind == 'iperf':
                start_server_cmd = ('iperf3 --server '
                                    '--bind {} '
                                    '--port {} '
                                    '--one-off '
                                    '--affinity {} '
                                    '--logfile {} ').format(
                                        self.server.ip_lan, flow.server_port,
                                        1, flow.server_log)
                start_server = RemoteCommand(
                    start_server_cmd,
                    self.server.ip_wan,
                    username=self.server.username,
                    logs=[flow.server_log],
                    key_filename=self.server.key_filename)
                stack.enter_context(start_server())
            elif flow.kind == 'video':
                #logging.warning("Video experiments are yet to be implemented")
                video_flow = self._start_video_flow(flow, stack)
                logging.info('Waiting for flow to finish')
                # wait for flow to finish
                video_flow._wait()
                # add add a time buffer before finishing up experiment
                logging.info('Local video flow finished')
            else:
                logging.warning("Only iperf and video flows are supported")
                #TODO:throw an exception here
        for idx, flow in enumerate(self.flows):
            if flow.kind == 'iperf':
                # make sure first flow runs for the whole time regardless of start time
                # note this assumes self.flows is sorted by start time
                flow_duration = flow.end_time - flow.start_time
                if idx == 0:
                    flow_duration = flow.end_time
                start_client_cmd = (
                    'iperf3 --client {} '
                    '--port {} '
                    '--verbose '
                    '--bind {} '
                    '--cport {} '
                    '--linux-congestion {} '
                    '--interval 0.5 '
                    '--time {} '
                    #'--length 1024K '#1024K '
                    '--affinity {} '
                    #'--set-mss 500 ' # default is 1448
                    #'--window 100K '
                    '--zerocopy '
                    '--json '
                    '--logfile {} ').format(self.server_nat_ip,
                                            flow.server_port,
                                            self.client.ip_lan,
                                            flow.client_port, flow.ccalg,
                                            flow_duration, idx % 32,
                                            flow.client_log)
                start_client = RemoteCommand(
                    start_client_cmd,
                    self.client.ip_wan,
                    username=self.client.username,
                    logs=[flow.client_log],
                    key_filename=self.client.key_filename)

                logging.info(
                    'Sleep for {}s before starting flow with start time {}'.
                    format(wait_times[idx], flow.start_time))
                time.sleep(wait_times[idx])
                stack.enter_context(start_client())
        # last flow should be the last one
        sleep_time = flow.end_time - flow.start_time + 1
        logging.info('Sleep for {}s'.format(sleep_time))
        time.sleep(sleep_time)
        self._show_bess_pipeline()