Exemple #1
0
def run_tests(args):
    git_summary = get_git_summary(
        args.mode, getattr(args, 'remote_path', None))

    config = parse_config()
    schemes_config = config['schemes']

    if args.all:
        cc_schemes = schemes_config.keys()
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()

    if args.random_order:
        random.shuffle(cc_schemes)

    ssh_cmd = None
    if args.mode == 'remote':
        r = parse_remote_path(args.remote_path)
        ssh_cmd = r['ssh_cmd']

    # For each run of each scheme, change the queueing discipline and
    # receiving socket buffer sizes before and after the test.
    # Check config.yml for values.
    for run_id in xrange(args.start_run_id,
                         args.start_run_id + args.run_times):
        # clean the contents in /tmp/pantheon-tmp
        clean_tmp_cmd = 'rm -rf /tmp/pantheon-tmp/*'
        if args.mode == 'remote':
            call(ssh_cmd + [clean_tmp_cmd])
        call(clean_tmp_cmd, shell=True)

        for cc in cc_schemes:
            default_qdisc = get_default_qdisc(ssh_cmd)
            old_recv_bufsizes = get_recv_sock_bufsizes(ssh_cmd)

            if 'qdisc' in schemes_config[cc]:
                test_qdisc = schemes_config[cc]['qdisc']
            else:
                test_qdisc = config['kernel_attrs']['default_qdisc']

            test_recv_sock_bufs = config['kernel_attrs']['sock_recv_bufs']

            try:
                if default_qdisc != test_qdisc:
                    set_default_qdisc(test_qdisc, ssh_cmd)

                set_recv_sock_bufsizes(test_recv_sock_bufs, ssh_cmd)

                Test(args, run_id, cc).run()
            finally:
                set_default_qdisc(default_qdisc, ssh_cmd)
                set_recv_sock_bufsizes(old_recv_bufsizes, ssh_cmd)

    if not args.no_metadata:
        meta = vars(args).copy()
        meta['cc_schemes'] = sorted(cc_schemes)
        save_test_metadata(meta, path.abspath(args.data_dir), git_summary)
Exemple #2
0
def pkill(args):
    sys.stderr.write('Cleaning up using pkill...'
                     '(enabled by --pkill-cleanup)\n')

    if args.mode == 'remote':
        r = parse_remote_path(args.remote_path)
        remote_pkill_src = path.join(r['pantheon_dir'], 'helpers', 'pkill.py')

        cmd = r['ssh_cmd'] + [
            'python', remote_pkill_src, '--kill-dir', r['pantheon_dir']]
        call(cmd)

    pkill_src = path.join(project_root.DIR, 'helpers', 'pkill.py')
    cmd = ['python', pkill_src, '--kill-dir', project_root.DIR]
    call(cmd)
def main():
    h.call(['echo', '1'])
    h.check_call('echo 2', shell=True)

    ret = h.check_output(['echo', '3']).strip()
    print ret
    assert ret == '3'

    proc = h.Popen(['echo', '4'], stdout=h.PIPE)
    ret = proc.communicate()[0].strip()
    print ret
    assert ret == '4'

    print h.get_open_port()
    h.make_sure_path_exists(h.TMPDIR)
    print h.parse_config()
Exemple #4
0
def install_deps(cc_src):
    cmd = ['python', cc_src, 'deps']
    deps = check_output(cmd).strip()

    if deps:
        cmd = 'sudo apt-get -y install ' + deps
        if call(cmd, shell=True) != 0:
            sys.stderr.write('Some dependencies failed to install '
                             'but assuming things okay.\n')
Exemple #5
0
def install_deps(cc_src):
    cmd = ['python', cc_src, 'deps']
    deps = check_output(cmd).strip()

    if deps:
        cmd = 'sudo apt-get -y install ' + deps
        if call(cmd, shell=True) != 0:
            sys.stderr.write('Some dependencies failed to install '
                             'but assuming things okay.\n')
Exemple #6
0
def setup(args):
    if not args.install_deps:
        # update submodules
        update_submodules()

        # enable IP forwarding
        sh_cmd = 'sudo sysctl -w net.ipv4.ip_forward=1'
        check_call(sh_cmd, shell=True)

        if args.interface is not None:
            # disable reverse path filtering
            rpf = 'net.ipv4.conf.%s.rp_filter'

            sh_cmd = 'sudo sysctl -w %s=0' % (rpf % 'all')
            check_call(sh_cmd, shell=True)

            sh_cmd = 'sudo sysctl -w %s=0' % (rpf % args.interface)
            check_call(sh_cmd, shell=True)

    # setup specified schemes
    cc_schemes = None

    if args.all:
        cc_schemes = parse_config()['schemes'].keys()
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()

    if cc_schemes is not None:
        for cc in cc_schemes:
            try:
                cc_src = path.join(project_root.DIR, 'src', cc + '.py')

                # install dependencies
                if args.install_deps:
                    install_deps(cc_src)
                else:
                    # persistent setup across reboots
                    if args.setup:
                        check_call(['python', cc_src, 'setup'])

                    # setup required every time after reboot
                    if call(['python', cc_src, 'setup_after_reboot']) != 0:
                        sys.stderr.write('Warning: "%s.py setup_after_reboot"'
                                         ' failed but continuing\n' % cc)
            except Exception as e:
                print('=========', e)
Exemple #7
0
def setup(args):
    if not args.install_deps:
        # update submodules
        update_submodules()

        # enable IP forwarding
        sh_cmd = 'sudo sysctl -w net.ipv4.ip_forward=1'
        check_call(sh_cmd, shell=True)

        if args.interface is not None:
            # disable reverse path filtering
            rpf = 'net.ipv4.conf.%s.rp_filter'

            sh_cmd = 'sudo sysctl -w %s=0' % (rpf % 'all')
            check_call(sh_cmd, shell=True)

            sh_cmd = 'sudo sysctl -w %s=0' % (rpf % args.interface)
            check_call(sh_cmd, shell=True)

    # setup specified schemes
    cc_schemes = None

    if args.all:
        cc_schemes = parse_config()['schemes'].keys()
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()

    if cc_schemes is not None:
        for cc in cc_schemes:
            cc_src = path.join(project_root.DIR, 'src', cc + '.py')

            # install dependencies
            if args.install_deps:
                install_deps(cc_src)
            else:
                # persistent setup across reboots
                if args.setup:
                    check_call(['python', cc_src, 'setup'])

                # setup required every time after reboot
                if call(['python', cc_src, 'setup_after_reboot']) != 0:
                    sys.stderr.write('Warning: "%s.py setup_after_reboot"'
                                     ' failed but continuing\n' % cc)
Exemple #8
0
    def process_tunnel_logs(self):
        datalink_tun_logs = []
        acklink_tun_logs = []

        apply_ofst = False
        if self.mode == 'remote':
            if self.remote_ofst is not None and self.local_ofst is not None:
                apply_ofst = True

                if self.sender_side == 'remote':
                    data_e_ofst = self.remote_ofst
                    ack_i_ofst = self.remote_ofst
                    data_i_ofst = self.local_ofst
                    ack_e_ofst = self.local_ofst
                else:
                    data_i_ofst = self.remote_ofst
                    ack_e_ofst = self.remote_ofst
                    data_e_ofst = self.local_ofst
                    ack_i_ofst = self.local_ofst

        for i in xrange(self.flows):
            tun_id = i + 1

            if self.mode == 'remote':
                # download logs from remote side
                cmd = 'scp -C %s:' % self.r['host_addr']
                cmd += '%(log)s %(log)s'

                if self.sender_side == 'remote':
                    call(cmd % {'log': self.datalink_egress_logs[i]},
                         shell=True)
                    call(cmd % {'log': self.acklink_ingress_logs[i]},
                         shell=True)
                else:
                    call(cmd % {'log': self.datalink_ingress_logs[i]},
                         shell=True)
                    call(cmd % {'log': self.acklink_egress_logs[i]},
                         shell=True)

            uid = uuid.uuid4()
            datalink_tun_log = path.join(
                TMPDIR, '%s_flow%s_uid%s.log.merged' %
                (self.datalink_name, tun_id, uid))
            acklink_tun_log = path.join(
                TMPDIR, '%s_flow%s_uid%s.log.merged' %
                (self.acklink_name, tun_id, uid))

            cmd = [
                'merge-tunnel-logs', 'single', '-i',
                self.datalink_ingress_logs[i], '-e',
                self.datalink_egress_logs[i], '-o', datalink_tun_log
            ]
            if apply_ofst:
                cmd += [
                    '-i-clock-offset', data_i_ofst, '-e-clock-offset',
                    data_e_ofst
                ]
            call(cmd)

            cmd = [
                'merge-tunnel-logs', 'single', '-i',
                self.acklink_ingress_logs[i], '-e',
                self.acklink_egress_logs[i], '-o', acklink_tun_log
            ]
            if apply_ofst:
                cmd += [
                    '-i-clock-offset', ack_i_ofst, '-e-clock-offset',
                    ack_e_ofst
                ]
            call(cmd)

            datalink_tun_logs.append(datalink_tun_log)
            acklink_tun_logs.append(acklink_tun_log)

        cmd = ['merge-tunnel-logs', 'multiple', '-o', self.datalink_log]
        if self.mode == 'local':
            cmd += ['--link-log', self.mm_datalink_log]
        cmd += datalink_tun_logs
        call(cmd)

        cmd = ['merge-tunnel-logs', 'multiple', '-o', self.acklink_log]
        if self.mode == 'local':
            cmd += ['--link-log', self.mm_acklink_log]
        cmd += acklink_tun_logs
        call(cmd)
Exemple #9
0
def cleanup():
    pkill_src = path.join(project_root.DIR, 'helpers', 'pkill.py')
    cmd = ['python', pkill_src, '--kill-dir', project_root.DIR]
    call(cmd)
Exemple #10
0
def run_tests(args):
    git_summary = get_git_summary(args.mode, getattr(args, 'remote_path',
                                                     None))

    config = parse_config()
    schemes_config = config['schemes']

    if args.all:
        cc_schemes = schemes_config.keys()
        if args.random_order:
            random.shuffle(cc_schemes)
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()
        if args.random_order:
            random.shuffle(cc_schemes)
    else:
        assert (args.test_config is not None)
        if args.random_order:
            random.shuffle(args.test_config['flows'])
        cc_schemes = [flow['scheme'] for flow in args.test_config['flows']]

    ssh_cmd = None
    if args.mode == 'remote':
        r = parse_remote_path(args.remote_path)
        ssh_cmd = r['ssh_cmd']

    # For each run of each scheme, change the queueing discipline and
    # receiving socket buffer sizes before and after the test.
    # Check config.yml for values.
    for run_id in xrange(args.start_run_id,
                         args.start_run_id + args.run_times):
        # clean the contents in /tmp/pantheon-tmp
        clean_tmp_cmd = 'rm -rf /tmp/pantheon-tmp/*'
        if args.mode == 'remote':
            call(ssh_cmd + [clean_tmp_cmd])
        call(clean_tmp_cmd, shell=True)

        # ISSUE (ranysha): no support for multiple schemes where each uses diff
        # qdisc. since version 4.13 of the kernel, TCP supports packet pacing
        # so you don't need to specify qdisc for BBR. when running with config
        # file, going to ignore qdisc setting for now.

        if args.test_config is None:
            for cc in cc_schemes:
                default_qdisc = get_default_qdisc(ssh_cmd)
                old_recv_bufsizes = get_recv_sock_bufsizes(ssh_cmd)

                if 'qdisc' in schemes_config[cc]:
                    test_qdisc = schemes_config[cc]['qdisc']
                else:
                    test_qdisc = config['kernel_attrs']['default_qdisc']

                test_recv_sock_bufs = config['kernel_attrs']['sock_recv_bufs']

                try:
                    if default_qdisc != test_qdisc:
                        set_default_qdisc(test_qdisc, ssh_cmd)

                    set_recv_sock_bufsizes(test_recv_sock_bufs, ssh_cmd)

                    Test(args, run_id, cc).run()
                finally:
                    set_default_qdisc(default_qdisc, ssh_cmd)
                    set_recv_sock_bufsizes(old_recv_bufsizes, ssh_cmd)
        else:
            default_qdisc = get_default_qdisc(ssh_cmd)
            old_recv_bufsizes = get_recv_sock_bufsizes(ssh_cmd)
            test_qdisc = config['kernel_attrs']['default_qdisc']
            test_recv_sock_bufs = config['kernel_attrs']['sock_recv_bufs']

            try:
                if default_qdisc != test_qdisc:
                    set_default_qdisc(test_qdisc, ssh_cmd)

                set_recv_sock_bufsizes(test_recv_sock_bufs, ssh_cmd)

                Test(args, run_id, None).run()
            finally:
                set_default_qdisc(default_qdisc, ssh_cmd)
                set_recv_sock_bufsizes(old_recv_bufsizes, ssh_cmd)

    if not args.no_metadata:
        meta = vars(args).copy()
        meta['cc_schemes'] = sorted(cc_schemes)
        save_test_metadata(meta, path.abspath(args.data_dir), git_summary)
Exemple #11
0
    def process_tunnel_logs(self):
        datalink_tun_logs = []
        acklink_tun_logs = []

        apply_ofst = False
        if self.mode == 'remote':
            if self.remote_ofst is not None and self.local_ofst is not None:
                apply_ofst = True

                if self.sender_side == 'remote':
                    data_e_ofst = self.remote_ofst
                    ack_i_ofst = self.remote_ofst
                    data_i_ofst = self.local_ofst
                    ack_e_ofst = self.local_ofst
                else:
                    data_i_ofst = self.remote_ofst
                    ack_e_ofst = self.remote_ofst
                    data_e_ofst = self.local_ofst
                    ack_i_ofst = self.local_ofst

        for i in xrange(self.flows):
            tun_id = i + 1

            if self.mode == 'remote':
                # download logs from remote side
                cmd = 'scp -C %s:' % self.r['host_addr']
                cmd += '%(log)s %(log)s'

                if self.sender_side == 'remote':
                    call(cmd % {'log': self.datalink_egress_logs[i]},
                         shell=True)
                    call(cmd % {'log': self.acklink_ingress_logs[i]},
                         shell=True)
                else:
                    call(cmd % {'log': self.datalink_ingress_logs[i]},
                         shell=True)
                    call(cmd % {'log': self.acklink_egress_logs[i]},
                         shell=True)

            uid = uuid.uuid4()
            datalink_tun_log = path.join(
                TMPDIR, '%s_flow%s_uid%s.log.merged'
                % (self.datalink_name, tun_id, uid))
            acklink_tun_log = path.join(
                TMPDIR, '%s_flow%s_uid%s.log.merged'
                % (self.acklink_name, tun_id, uid))

            cmd = ['merge-tunnel-logs', 'single',
                   '-i', self.datalink_ingress_logs[i],
                   '-e', self.datalink_egress_logs[i],
                   '-o', datalink_tun_log]
            if apply_ofst:
                cmd += ['-i-clock-offset', data_i_ofst,
                        '-e-clock-offset', data_e_ofst]
            call(cmd)

            cmd = ['merge-tunnel-logs', 'single',
                   '-i', self.acklink_ingress_logs[i],
                   '-e', self.acklink_egress_logs[i],
                   '-o', acklink_tun_log]
            if apply_ofst:
                cmd += ['-i-clock-offset', ack_i_ofst,
                        '-e-clock-offset', ack_e_ofst]
            call(cmd)

            datalink_tun_logs.append(datalink_tun_log)
            acklink_tun_logs.append(acklink_tun_log)

        cmd = ['merge-tunnel-logs', 'multiple', '-o', self.datalink_log]
        if self.mode == 'local':
            cmd += ['--link-log', self.mm_datalink_log]
        cmd += datalink_tun_logs
        call(cmd)

        cmd = ['merge-tunnel-logs', 'multiple', '-o', self.acklink_log]
        if self.mode == 'local':
            cmd += ['--link-log', self.mm_acklink_log]
        cmd += acklink_tun_logs
        call(cmd)
Exemple #12
0
def cleanup():
    pkill_src = path.join(project_root.DIR, 'helpers', 'pkill.py')
    cmd = ['python', pkill_src, '--kill-dir', project_root.DIR]
    call(cmd)