Beispiel #1
0
def verify_schemes(schemes):
    schemes = map(utils.get_base_scheme, utils.parse_schemes(schemes))
    all_schemes = utils.parse_config()['schemes'].keys()

    for cc in schemes:
        if cc not in all_schemes:
            sys.exit('%s is not a scheme included in src/config.yml' % cc)
Beispiel #2
0
def setup(args):
    # update submodules
    utils.update_submodules()

    # setup specified schemes
    cc_schemes = None

    if args.all:
        cc_schemes = utils.parse_config()['schemes'].keys()
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()

    if cc_schemes is None:
        return

    for cc in cc_schemes:
        cc_src = path.join(context.src_dir, 'wrappers', cc + '.py')

        # install dependencies
        if args.install_deps:
            install_deps(cc_src)
        else:
            # persistent setup across reboots
            if args.setup:
                check_call([cc_src, 'setup'])

            # setup required every time after reboot
            if call([cc_src, 'setup_after_reboot']) != 0:
                sys.stderr.write('Warning: "%s.py setup_after_reboot"'
                                 ' failed but continuing\n' % cc)
Beispiel #3
0
def setup(args):
    # update submodules
    utils.update_submodules()

    # setup specified schemes
    cc_schemes = None

    if args.all:
        cc_schemes = utils.parse_config()['schemes'].keys()
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()

    if cc_schemes is None:
        return

    for cc in cc_schemes:
        cc_src = path.join(context.src_dir, 'wrappers', cc + '.py')

        # install dependencies
        if args.install_deps:
            install_deps(cc_src)
        else:
            # persistent setup across reboots
            if args.setup:
                check_call([cc_src, 'setup'])

            # setup required every time after reboot
            if call([cc_src, 'setup_after_reboot']) != 0:
                sys.stderr.write('Warning: "%s.py setup_after_reboot"'
                                 ' failed but continuing\n' % cc)
Beispiel #4
0
def verify_schemes(schemes):
    schemes = schemes.split()
    all_schemes = utils.parse_config()['schemes'].keys()

    for cc in schemes:
        if cc not in all_schemes:
            sys.exit('%s is not a scheme included in src/config.yml' % cc)
Beispiel #5
0
def main():
    parser = argparse.ArgumentParser()

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--all', action='store_true',
                       help='test all the schemes specified in src/config.yml')
    group.add_argument('--schemes', metavar='"SCHEME1 SCHEME2..."',
                       help='test a space-separated list of schemes')

    args = parser.parse_args()

    if args.all:
        schemes = utils.parse_config()['schemes'].keys()
    elif args.schemes is not None:
        schemes = args.schemes.split()

    data_dir = path.join(utils.tmp_dir, 'test_analyze_output')
    shutil.rmtree(data_dir, ignore_errors=True)
    utils.make_sure_dir_exists(data_dir)

    test_py = path.join(context.src_dir, 'experiments', 'test.py')
    analyze_py = path.join(context.src_dir, 'analysis', 'analyze.py')

    cmd = ['python', test_py, 'local', '--schemes', ' '.join(schemes),
           '-t', '10', '--data-dir', data_dir, '--pkill-cleanup',
           '--prepend-mm-cmds', 'mm-delay 20', '--extra-mm-link-args',
           '--uplink-queue=droptail --uplink-queue-args=packets=200']
    check_call(cmd)

    cmd = ['python', analyze_py, '--data-dir', data_dir]
    check_call(cmd)
Beispiel #6
0
def main():
    parser = argparse.ArgumentParser()

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--all',
                       action='store_true',
                       help='test all the schemes specified in src/config.yml')
    group.add_argument('--schemes',
                       metavar='"SCHEME1 SCHEME2..."',
                       help='test a space-separated list of schemes')

    args = parser.parse_args()

    if args.all:
        schemes = utils.parse_config()['schemes'].keys()
    elif args.schemes is not None:
        schemes = args.schemes.split()

    data_dir = path.join(utils.tmp_dir, 'test_analyze_output')
    shutil.rmtree(data_dir, ignore_errors=True)
    utils.make_sure_dir_exists(data_dir)

    test_py = path.join(context.src_dir, 'experiments', 'test.py')
    analyze_py = path.join(context.src_dir, 'analysis', 'analyze.py')

    cmd = [
        'python', test_py, 'local', '--schemes', ' '.join(schemes), '-t', '10',
        '--data-dir', data_dir, '--pkill-cleanup', '--prepend-mm-cmds',
        'mm-delay 20', '--extra-mm-link-args',
        '--uplink-queue=droptail --uplink-queue-args=packets=200'
    ]
    check_call(cmd)

    cmd = ['python', analyze_py, '--data-dir', data_dir]
    check_call(cmd)
Beispiel #7
0
    def __init__(self, args):
        self.data_dir = path.abspath(args.data_dir)
        self.include_acklink = args.include_acklink

        metadata_path = path.join(args.data_dir, 'pantheon_metadata.json')
        self.meta = utils.load_test_metadata(metadata_path)
        self.cc_schemes = utils.verify_schemes_with_meta(args.schemes, self.meta)

        self.run_times = self.meta['run_times']
        self.flows = self.meta['flows']
        self.config = utils.parse_config()
Beispiel #8
0
    def __init__(self, args):
        self.data_dir = path.abspath(args.data_dir)
        self.include_acklink = args.include_acklink

        metadata_path = path.join(args.data_dir, 'pantheon_metadata.json')
        self.meta = utils.load_test_metadata(metadata_path)
        self.cc_schemes = utils.verify_schemes_with_meta(
            args.schemes, self.meta)

        self.run_times = self.meta['run_times']
        self.flows = self.meta['flows']
        self.config = utils.parse_config()
Beispiel #9
0
def test_schemes(args):
    wrappers_dir = path.join(context.src_dir, 'wrappers')

    if args.all:
        schemes = utils.parse_config()['schemes'].keys()
    elif args.schemes is not None:
        schemes = args.schemes.split()

    for scheme in schemes:
        sys.stderr.write('Testing %s...\n' % scheme)
        src = path.join(wrappers_dir, scheme + '.py')

        run_first = check_output([src, 'run_first']).strip()
        run_second = 'receiver' if run_first == 'sender' else 'sender'

        port = utils.get_open_port()

        # run first to run
        cmd = [src, run_first, port]
        first_proc = Popen(cmd, preexec_fn=os.setsid)

        # wait for 'run_first' to be ready
        time.sleep(3)

        # run second to run
        cmd = [src, run_second, '127.0.0.1', port]
        second_proc = Popen(cmd, preexec_fn=os.setsid)

        # test lasts for 3 seconds
        signal.signal(signal.SIGALRM, utils.timeout_handler)
        signal.alarm(3)

        try:
            for proc in [first_proc, second_proc]:
                proc.wait()
                if proc.returncode != 0:
                    sys.exit('%s failed in tests' % scheme)
        except utils.TimeoutError:
            pass
        except Exception as exception:
            sys.exit('test_schemes.py: %s\n' % exception)
        else:
            signal.alarm(0)
            sys.exit('test exited before time limit')
        finally:
            # cleanup
            utils.kill_proc_group(first_proc)
            utils.kill_proc_group(second_proc)
Beispiel #10
0
def run_tests(args):
    # check and get git summary
    git_summary = utils.get_git_summary(args.mode,
                                        getattr(args, 'remote_path', None))

    # get cc_schemes
    cc_schemes = OrderedDict()
    if args.all:
        config = utils.parse_config()
        schemes_config = config['schemes']

        for scheme in schemes_config.keys():
            cc_schemes[scheme] = {}
        if args.random_order:
            utils.shuffle_keys(cc_schemes)
    elif args.schemes is not None:
        cc_schemes = utils.parse_schemes(args.schemes)
        if args.random_order:
            utils.shuffle_keys(cc_schemes)
    else:
        assert (args.test_config is not None)
        if args.random_order:
            random.shuffle(args.test_config['flows'])
        for flow in args.test_config['flows']:
            cc_schemes[flow['scheme']] = {}

    # save metadata
    meta = vars(args).copy()
    meta['cc_schemes'] = sorted(cc_schemes)
    meta['git_summary'] = git_summary

    metadata_path = path.join(args.data_dir, 'pantheon_metadata.json')
    utils.save_test_metadata(meta, metadata_path)

    # run tests
    for run_id in xrange(args.start_run_id,
                         args.start_run_id + args.run_times):
        if not hasattr(args, 'test_config') or args.test_config is None:
            for cc, params in cc_schemes.iteritems():
                test_args = get_cc_args(args, params)
                Test(test_args, run_id, cc).run()
        else:
            Test(args, run_id, None).run()
Beispiel #11
0
def run_tests(args):
    # check and get git summary
    git_summary = utils.get_git_summary(args.mode,
                                        getattr(args, 'remote_path', None))

    # get cc_schemes
    if args.all:
        config = utils.parse_config()
        schemes_config = config['schemes']

        cc_schemes = schemes_config.keys()
        if args.random_order:
            random.shuffle(cc_schemes)
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()
        if args.random_order:
            random.shuffle(cc_schemes)
    else:
        assert (args.test_config is not None)
        if args.random_order:
            random.shuffle(args.test_config['flows'])
        cc_schemes = [flow['scheme'] for flow in args.test_config['flows']]

    # save metadata
    meta = vars(args).copy()
    meta['cc_schemes'] = sorted(cc_schemes)
    meta['git_summary'] = git_summary

    metadata_path = path.join(args.data_dir, 'pantheon_metadata.json')
    utils.save_test_metadata(meta, metadata_path)

    # run tests
    for run_id in xrange(args.start_run_id,
                         args.start_run_id + args.run_times):
        if not hasattr(args, 'test_config') or args.test_config is None:
            for cc in cc_schemes:
                Test(args, run_id, cc).run()
        else:
            Test(args, run_id, None).run()
Beispiel #12
0
def run_tests(args):
    # check and get git summary
    git_summary = utils.get_git_summary(args.mode,
                                        getattr(args, 'remote_path', None))

    # get cc_schemes
    if args.all:
        config = utils.parse_config()
        schemes_config = config['schemes']

        cc_schemes = schemes_config.keys()
        if args.random_order:
            random.shuffle(cc_schemes)
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()
        if args.random_order:
            random.shuffle(cc_schemes)
    else:
        assert(args.test_config is not None)
        if args.random_order:
            random.shuffle(args.test_config['flows'])
        cc_schemes = [flow['scheme'] for flow in args.test_config['flows']]

    # save metadata
    meta = vars(args).copy()
    meta['cc_schemes'] = sorted(cc_schemes)
    meta['git_summary'] = git_summary

    metadata_path = path.join(args.data_dir, 'pantheon_metadata.json')
    utils.save_test_metadata(meta, metadata_path)

    # run tests
    for run_id in xrange(args.start_run_id,
                         args.start_run_id + args.run_times):
        if not hasattr(args, 'test_config') or args.test_config is None:
            for cc in cc_schemes:
                Test(args, run_id, cc).run()
        else:
            Test(args, run_id, None).run()
Beispiel #13
0
    def run(self):
        perf_data, stats_logs = self.eval_performance()

        all_perf_path = path.join(self.data_dir, 'all_perf.log')
        with open(all_perf_path, 'w') as all_perf_log:
            all_perf_log.write(
                'Scheme(all runs)\tAvg throughput (Mbit/s)\tAvg 95th delay (ms)\tAvg 99th delay (ms)\tAvg mean delay (ms)\tAvg loss rate\n'
            )

        data_for_plot = {}
        data_for_json = {}
        capacity = {}

        for cc in perf_data:
            data_for_plot[cc] = {}
            data_for_plot[cc]['95th'] = []
            data_for_plot[cc]['99th'] = []
            data_for_plot[cc]['mean'] = []
            data_for_json[cc] = {}
            capacity[cc] = {}
            sum_tput = 0
            sum_delay_95th = 0
            sum_delay_99th = 0
            sum_delay_mean = 0
            sum_loss = 0
            valid_run_times = 0

            for run_id in perf_data[cc]:
                if perf_data[cc][run_id] is None:
                    continue

                tput = perf_data[cc][run_id]['throughput']
                delay_95th = perf_data[cc][run_id]['delay_95th']
                delay_99th = perf_data[cc][run_id]['delay_99th']
                delay_mean = perf_data[cc][run_id]['delay_mean']
                loss = perf_data[cc][run_id]['loss']

                if tput is None or delay_95th is None:
                    continue
                data_for_plot[cc]['95th'].append((tput, delay_95th))

                if tput is None or delay_99th is None:
                    continue
                data_for_plot[cc]['99th'].append((tput, delay_99th))

                if tput is None or delay_mean is None:
                    continue
                data_for_plot[cc]['mean'].append((tput, delay_mean))

                flow_data = perf_data[cc][run_id]['flow_data']
                if flow_data is not None:
                    data_for_json[cc][run_id] = flow_data

                # calculate the sum performance of all runs for every cc
                valid_run_times += 1
                sum_tput += tput
                sum_delay_95th += delay_95th
                sum_delay_99th += delay_99th
                sum_delay_mean += delay_mean
                sum_loss += loss

                # gather cc performance data into one file
                # with open(all_perf_path, 'a') as all_perf_log:
                #     all_perf_log.write('%s\t%.2f\t%.2f\t%.2f\t%.2f\t%.6f\n' %
                #                        (cc, run_id, tput, delay_95th, delay_99th, delay_mean, loss))

            # calculate the average performance of all runs for every cc
            avg_tput = (float(sum_tput) /
                        valid_run_times) if valid_run_times > 0 else 0
            avg_delay_95th = float(
                sum_delay_95th) / valid_run_times if valid_run_times > 0 else 0
            avg_delay_99th = float(
                sum_delay_99th) / valid_run_times if valid_run_times > 0 else 0
            avg_delay_mean = float(
                sum_delay_mean) / valid_run_times if valid_run_times > 0 else 0
            avg_loss = float(
                sum_loss) / valid_run_times if valid_run_times > 0 else 0

            # gather avg cc performance data of all run into one file
            with open(all_perf_path, 'a') as all_perf_log:
                all_perf_log.write(
                    '%s\t%.2f\t%.2f\t%.2f\t%.2f\t%.6f\n' %
                    (utils.parse_config()['schemes'][cc]['name'], avg_tput,
                     avg_delay_95th, avg_delay_99th, avg_delay_mean, avg_loss))

        if not self.no_graphs:
            self.plot_all_perf_graph(perf_data)
            self.plot_throughput_delay('95th', data_for_plot)
            self.plot_throughput_delay('99th', data_for_plot)
            self.plot_throughput_delay('mean', data_for_plot)

        plt.close('all')

        perf_path = path.join(self.data_dir, 'pantheon_perf.json')
        with open(perf_path, 'w') as fh:
            json.dump(data_for_json, fh)
Beispiel #14
0
    def plot_throughput_delay(self, measure, data):
        min_raw_delay = sys.maxint
        min_mean_delay = sys.maxint
        max_raw_delay = -sys.maxint
        max_mean_delay = -sys.maxint

        fig_raw, ax_raw = plt.subplots()
        fig_mean, ax_mean = plt.subplots()

        schemes_config = utils.parse_config()['schemes']
        for cc in data:
            if not data[cc]:
                sys.stderr.write('No performance data for scheme %s\n' % cc)
                continue

            # measure is among 95th percentile, 99th percentile, mean
            value = data[cc][measure]
            cc_name = schemes_config[cc]['name']
            color = schemes_config[cc]['color']
            marker = schemes_config[cc]['marker']
            y_data, x_data = zip(*value)

            # update min and max raw delay
            min_raw_delay = min(min(x_data), min_raw_delay)
            max_raw_delay = max(max(x_data), max_raw_delay)

            # plot raw values
            ax_raw.scatter(x_data,
                           y_data,
                           color=color,
                           marker=marker,
                           label=cc_name,
                           clip_on=False)

            # plot the average of raw values
            x_mean = np.mean(x_data)
            y_mean = np.mean(y_data)

            # update min and max mean delay
            min_mean_delay = min(x_mean, min_mean_delay)
            max_mean_delay = max(x_mean, max_mean_delay)

            ax_mean.scatter(x_mean,
                            y_mean,
                            color=color,
                            marker=marker,
                            clip_on=False)
            ax_mean.annotate(cc_name, (x_mean, y_mean))

        for fig_type, fig, ax in [('raw', fig_raw, ax_raw),
                                  ('mean', fig_mean, ax_mean)]:
            if fig_type == 'raw':
                self.xaxis_log_scale(ax, min_raw_delay, max_raw_delay)
            else:
                self.xaxis_log_scale(ax, min_mean_delay, max_mean_delay)
            ax.invert_xaxis()

            yticks = ax.get_yticks()
            if yticks[0] < 0:
                ax.set_ylim(bottom=0)

            if measure == '95th':
                tag = '95th percentile'
            elif measure == '99th':
                tag = '99th percentile'
            elif measure == 'mean':
                tag = 'Average'
            xlabel = tag + ' one-way delay (ms)'
            ax.set_xlabel(xlabel, fontsize=18)
            ax.set_ylabel('Average throughput (Mbit/s)', fontsize=18)
            ax.grid()

        # save pantheon_summary.svg and .pdf
        ax_raw.set_title(self.expt_title.strip(), y=1.02, fontsize=18)
        lgd = ax_raw.legend(scatterpoints=1,
                            bbox_to_anchor=(1, 0.5),
                            loc='center left',
                            fontsize=18)
        plt.rc('font', size=FONT_SIZE)
        for graph_format in ['svg', 'pdf']:
            raw_summary = path.join(
                self.data_dir,
                'pantheon_summary_delay_%s.%s' % (measure, graph_format))
            fig_raw.savefig(raw_summary,
                            dpi=300,
                            bbox_extra_artists=(lgd, ),
                            bbox_inches='tight',
                            pad_inches=0.2)

        # save pantheon_summary_mean.svg and .pdf
        ax_mean.set_title(self.expt_title + ' (mean of all runs by scheme)',
                          fontsize=18)

        for graph_format in ['svg', 'pdf']:
            mean_summary = path.join(
                self.data_dir,
                'pantheon_summary_mean_delay_%s.%s' % (measure, graph_format))
            fig_mean.savefig(mean_summary,
                             dpi=300,
                             bbox_inches='tight',
                             pad_inches=0.2)

        sys.stderr.write('Saved throughput graphs, delay graphs, and summary '
                         'graphs in %s\n' % self.data_dir)
Beispiel #15
0
    def run(self):
        fig, ax = plt.subplots()
        total_min_time = None
        total_max_time = None

        if self.flows > 0:
            datalink_fmt_str = '%s_datalink_run%s.log'
        else:
            datalink_fmt_str = '%s_mm_datalink_run%s.log'

        schemes_config = utils.parse_config()['schemes']
        for cc in self.cc_schemes:
            cc_name = schemes_config[cc]['name']

            for run_id in xrange(1, self.run_times + 1):
                tunnel_log_path = path.join(self.data_dir,
                                            datalink_fmt_str % (cc, run_id))
                clock_time, throughput = self.parse_tunnel_log(tunnel_log_path)

                min_time = None
                max_time = None
                max_tput = None

                for flow_id in clock_time:
                    ax.plot(clock_time[flow_id], throughput[flow_id])

                    if min_time is None or clock_time[flow_id][0] < min_time:
                        min_time = clock_time[flow_id][0]
                    if max_time is None or clock_time[flow_id][-1] < min_time:
                        max_time = clock_time[flow_id][-1]
                    flow_max_tput = max(throughput[flow_id])
                    if max_tput is None or flow_max_tput > max_tput:
                        max_tput = flow_max_tput

                ax.annotate(cc_name, (min_time, max_tput))

                if total_min_time is None or min_time < total_min_time:
                    total_min_time = min_time
                if total_max_time is None or max_time > total_max_time:
                    total_max_time = max_time

        xmin = int(math.floor(total_min_time))
        xmax = int(math.ceil(total_max_time))
        ax.set_xlim(xmin, xmax)

        new_xticks = range(xmin, xmax, 10)
        ax.set_xticks(new_xticks)
        formatter = ticker.FuncFormatter(lambda x, pos: x - xmin)
        ax.xaxis.set_major_formatter(formatter)

        fig_w, fig_h = fig.get_size_inches()
        fig.set_size_inches(self.amplify * len(new_xticks), fig_h)

        start_datetime = time.strftime('%a, %d %b %Y %H:%M:%S',
                                       time.localtime(total_min_time))
        start_datetime += ' ' + time.strftime('%z')
        ax.set_xlabel('Time (s) since ' + start_datetime, fontsize=12)
        ax.set_ylabel('Throughput (Mbit/s)', fontsize=12)

        for graph_format in ['svg', 'pdf']:
            fig_path = path.join(self.data_dir,
                                 'pantheon_throughput_time.%s' % graph_format)
            fig.savefig(fig_path, bbox_inches='tight', pad_inches=0.2)

        sys.stderr.write('Saved pantheon_throughput_time in %s\n' %
                         self.data_dir)

        plt.close('all')
Beispiel #16
0
    def plot_throughput_delay(self, data):
        min_raw_delay = sys.maxint
        min_mean_delay = sys.maxint
        max_raw_delay = -sys.maxint
        max_mean_delay = -sys.maxint

        fig_raw, ax_raw = plt.subplots()
        fig_mean, ax_mean = plt.subplots()

        schemes_config = utils.parse_config()['schemes']
        for cc in data:
            if not data[cc]:
                sys.stderr.write('No performance data for scheme %s\n' % cc)
                continue

            value = data[cc]
            cc_name = schemes_config[cc]['name']
            color = schemes_config[cc]['color']
            marker = schemes_config[cc]['marker']
            y_data, x_data = zip(*value)

            # update min and max raw delay
            min_raw_delay = min(min(x_data), min_raw_delay)
            max_raw_delay = max(max(x_data), max_raw_delay)

            # plot raw values
            ax_raw.scatter(x_data, y_data, color=color, marker=marker,
                           label=cc_name, clip_on=False)

            # plot the average of raw values
            x_mean = np.mean(x_data)
            y_mean = np.mean(y_data)

            # update min and max mean delay
            min_mean_delay = min(x_mean, min_mean_delay)
            max_mean_delay = max(x_mean, max_mean_delay)

            ax_mean.scatter(x_mean, y_mean, color=color, marker=marker,
                            clip_on=False)
            ax_mean.annotate(cc_name, (x_mean, y_mean))

        for fig_type, fig, ax in [('raw', fig_raw, ax_raw),
                                  ('mean', fig_mean, ax_mean)]:
            if fig_type == 'raw':
                self.xaxis_log_scale(ax, min_raw_delay, max_raw_delay)
            else:
                self.xaxis_log_scale(ax, min_mean_delay, max_mean_delay)
            ax.invert_xaxis()

            yticks = ax.get_yticks()
            if yticks[0] < 0:
                ax.set_ylim(bottom=0)

            xlabel = '95th percentile one-way delay (ms)'
            ax.set_xlabel(xlabel, fontsize=12)
            ax.set_ylabel('Average throughput (Mbit/s)', fontsize=12)
            ax.grid()

        # save pantheon_summary.svg and .pdf
        ax_raw.set_title(self.expt_title.strip(), y=1.02, fontsize=12)
        lgd = ax_raw.legend(scatterpoints=1, bbox_to_anchor=(1, 0.5),
                            loc='center left', fontsize=12)

        for graph_format in ['svg', 'pdf']:
            raw_summary = path.join(
                self.data_dir, 'pantheon_summary.%s' % graph_format)
            fig_raw.savefig(raw_summary, dpi=300, bbox_extra_artists=(lgd,),
                            bbox_inches='tight', pad_inches=0.2)

        # save pantheon_summary_mean.svg and .pdf
        ax_mean.set_title(self.expt_title +
                          ' (mean of all runs by scheme)', fontsize=12)

        for graph_format in ['svg', 'pdf']:
            mean_summary = path.join(
                self.data_dir, 'pantheon_summary_mean.%s' % graph_format)
            fig_mean.savefig(mean_summary, dpi=300,
                             bbox_inches='tight', pad_inches=0.2)

        sys.stderr.write(
            'Saved throughput graphs, delay graphs, and summary '
            'graphs in %s\n' % self.data_dir)
Beispiel #17
0
    def parse_tunnel_log(self):
        tunlog = open(self.tunnel_log)

        self.flows = {}
        first_ts = None
        capacities = {}

        arrivals = {}
        departures = {}
        self.delays_t = {}
        self.delays = {}

        first_capacity = None
        last_capacity = None
        first_arrival = {}
        last_arrival = {}
        first_departure = {}
        last_departure = {}

        total_first_departure = None
        total_last_departure = None
        total_arrivals = 0
        total_departures = 0

        while True:
            line = tunlog.readline()
            if not line:
                break

            if line.startswith('#'):
                continue

            items = line.split()
            ts = float(items[0])
            event_type = items[1]
            num_bits = int(items[2]) * 8

            if first_ts is None:
                first_ts = ts

            bin_id = self.ms_to_bin(ts, first_ts)

            if event_type == '#':
                capacities[bin_id] = capacities.get(bin_id, 0) + num_bits

                if first_capacity is None:
                    first_capacity = ts

                if last_capacity is None or ts > last_capacity:
                    last_capacity = ts
            elif event_type == '+':
                if len(items) == 4:
                    flow_id = int(items[-1])
                else:
                    flow_id = 0

                self.flows[flow_id] = True

                if flow_id not in arrivals:
                    arrivals[flow_id] = {}
                    first_arrival[flow_id] = ts

                if flow_id not in last_arrival:
                    last_arrival[flow_id] = ts
                else:
                    if ts > last_arrival[flow_id]:
                        last_arrival[flow_id] = ts

                old_value = arrivals[flow_id].get(bin_id, 0)
                arrivals[flow_id][bin_id] = old_value + num_bits

                total_arrivals += num_bits
            elif event_type == '-':
                if len(items) == 5:
                    flow_id = int(items[-1])
                else:
                    flow_id = 0

                self.flows[flow_id] = True

                if flow_id not in departures:
                    departures[flow_id] = {}
                    first_departure[flow_id] = ts

                if flow_id not in last_departure:
                    last_departure[flow_id] = ts
                else:
                    if ts > last_departure[flow_id]:
                        last_departure[flow_id] = ts

                old_value = departures[flow_id].get(bin_id, 0)
                departures[flow_id][bin_id] = old_value + num_bits

                total_departures += num_bits

                # update total variables
                if total_first_departure is None:
                    total_first_departure = ts
                if (total_last_departure is None or ts > total_last_departure):
                    total_last_departure = ts

                # store delays in a list for each flow and sort later
                delay = float(items[3])
                if flow_id not in self.delays:
                    self.delays[flow_id] = []
                    self.delays_t[flow_id] = []
                self.delays[flow_id].append(delay)
                self.delays_t[flow_id].append((ts - first_ts) / 1000.0)

        tunlog.close()

        us_per_bin = 1000.0 * self.ms_per_bin

        self.avg_capacity = None
        self.link_capacity = []
        self.link_capacity_t = []
        if capacities:
            # calculate average capacity
            if last_capacity == first_capacity:
                self.avg_capacity = 0
            else:
                delta = 1000.0 * (last_capacity - first_capacity)
                self.avg_capacity = sum(capacities.values()) / delta

            # transform capacities into a list
            capacity_bins = capacities.keys()
            for bin_id in xrange(min(capacity_bins), max(capacity_bins) + 1):
                self.link_capacity.append(
                    capacities.get(bin_id, 0) / us_per_bin)
                self.link_capacity_t.append(self.bin_to_s(bin_id))

        # calculate ingress and egress throughput for each flow
        self.ingress_tput = {}
        self.egress_tput = {}
        self.ingress_t = {}
        self.egress_t = {}
        self.avg_ingress = {}
        self.avg_egress = {}
        self.delay_95th = {}
        self.delay_99th = {}
        self.delay_mean = {}
        self.loss_rate = {}

        total_delays = []

        for flow_id in self.flows:
            self.ingress_tput[flow_id] = []
            self.egress_tput[flow_id] = []
            self.ingress_t[flow_id] = []
            self.egress_t[flow_id] = []
            self.avg_ingress[flow_id] = 0
            self.avg_egress[flow_id] = 0

            if flow_id in arrivals:
                # calculate average ingress and egress throughput
                first_arrival_ts = first_arrival[flow_id]
                last_arrival_ts = last_arrival[flow_id]

                if last_arrival_ts == first_arrival_ts:
                    self.avg_ingress[flow_id] = 0
                else:
                    delta = 1000.0 * (last_arrival_ts - first_arrival_ts)
                    flow_arrivals = sum(arrivals[flow_id].values())
                    self.avg_ingress[flow_id] = flow_arrivals / delta

                ingress_bins = arrivals[flow_id].keys()
                for bin_id in xrange(min(ingress_bins), max(ingress_bins) + 1):
                    self.ingress_tput[flow_id].append(
                        arrivals[flow_id].get(bin_id, 0) / us_per_bin)
                    self.ingress_t[flow_id].append(self.bin_to_s(bin_id))

            if flow_id in departures:
                first_departure_ts = first_departure[flow_id]
                last_departure_ts = last_departure[flow_id]

                if last_departure_ts == first_departure_ts:
                    self.avg_egress[flow_id] = 0
                else:
                    delta = 1000.0 * (last_departure_ts - first_departure_ts)
                    flow_departures = sum(departures[flow_id].values())
                    self.avg_egress[flow_id] = flow_departures / delta

                egress_bins = departures[flow_id].keys()

                self.egress_tput[flow_id].append(0.0)
                self.egress_t[flow_id].append(self.bin_to_s(min(egress_bins)))

                for bin_id in xrange(min(egress_bins), max(egress_bins) + 1):
                    self.egress_tput[flow_id].append(
                        departures[flow_id].get(bin_id, 0) / us_per_bin)
                    self.egress_t[flow_id].append(self.bin_to_s(bin_id + 1))

            # calculate 95th, 99th percentile, mean per-packet one-way delay
            self.delay_95th[flow_id] = None
            self.delay_99th[flow_id] = None
            self.delay_mean[flow_id] = None
            if flow_id in self.delays:
                self.delay_95th[flow_id] = np.percentile(
                    self.delays[flow_id], 95, interpolation='nearest')
                self.delay_99th[flow_id] = np.percentile(
                    self.delays[flow_id], 99, interpolation='nearest')
                self.delay_mean[flow_id] = np.mean(self.delays[flow_id])
                total_delays += self.delays[flow_id]

            # calculate loss rate for each flow
            if flow_id in arrivals and flow_id in departures:
                flow_arrivals = sum(arrivals[flow_id].values())
                flow_departures = sum(departures[flow_id].values())

                self.loss_rate[flow_id] = None
                if flow_arrivals > 0:
                    self.loss_rate[flow_id] = (
                        1 - 1.0 * flow_departures / flow_arrivals)

        self.total_loss_rate = None
        if total_arrivals > 0:
            self.total_loss_rate = 1 - 1.0 * total_departures / total_arrivals

        # calculate total average throughput and 95th, 99th percentile, mean delay
        self.total_avg_egress = None
        if total_last_departure == total_first_departure:
            self.total_duration = 0
            self.total_avg_egress = 0
        else:
            self.total_duration = total_last_departure - total_first_departure
            self.total_avg_egress = total_departures / (1000.0 *
                                                        self.total_duration)

        self.total_delay_95th = None
        self.total_delay_99th = None
        self.total_delay_mean = None
        if total_delays:
            self.total_delay_95th = np.percentile(total_delays,
                                                  95,
                                                  interpolation='nearest')
            self.total_delay_99th = np.percentile(total_delays,
                                                  99,
                                                  interpolation='nearest')
            self.total_delay_mean = np.mean(total_delays)

        self.lock.acquire()
        schemes_config = utils.parse_config()['schemes']
        # gather all time-varying tput and delay into one file of each run for all cc (only one flow)
        with open(self.all_tput_log, 'a') as all_tput_log:
            for i in range(len(self.egress_t[1])):
                all_tput_log.write(
                    '%s\tegress\t%.2f\t%.2f\n' %
                    (schemes_config[self.cc]['name'], self.egress_t[1][i],
                     self.egress_tput[1][i]))
            for i in range(len(self.ingress_t[1])):
                all_tput_log.write(
                    '%s\tingress\t%.2f\t%.2f\n' %
                    (schemes_config[self.cc]['name'], self.ingress_t[1][i],
                     self.ingress_tput[1][i]))
        with open(self.all_delay_log, 'a') as all_delay_log:
            for i in range(len(self.delays_t[1])):
                all_delay_log.write('%s\t%f\t%.2f\n' %
                                    (schemes_config[self.cc]['name'],
                                     self.delays_t[1][i], self.delays[1][i]))
        self.lock.release()
Beispiel #18
0
    def run(self):
        fig, ax = plt.subplots()
        total_min_time = None
        total_max_time = None

        if self.flows > 0:
            datalink_fmt_str = '%s_datalink_run%s.log'
        else:
            datalink_fmt_str = '%s_mm_datalink_run%s.log'

        schemes_config = utils.parse_config()['schemes']
        for cc in self.cc_schemes:
            cc_name = schemes_config[cc]['name']

            for run_id in xrange(1, self.run_times + 1):
                tunnel_log_path = path.join(
                    self.data_dir, datalink_fmt_str % (cc, run_id))
                clock_time, throughput = self.parse_tunnel_log(tunnel_log_path)

                min_time = None
                max_time = None
                max_tput = None

                for flow_id in clock_time:
                    ax.plot(clock_time[flow_id], throughput[flow_id])

                    if min_time is None or clock_time[flow_id][0] < min_time:
                        min_time = clock_time[flow_id][0]
                    if max_time is None or clock_time[flow_id][-1] < min_time:
                        max_time = clock_time[flow_id][-1]
                    flow_max_tput = max(throughput[flow_id])
                    if max_tput is None or flow_max_tput > max_tput:
                        max_tput = flow_max_tput

                ax.annotate(cc_name, (min_time, max_tput))

                if total_min_time is None or min_time < total_min_time:
                    total_min_time = min_time
                if total_max_time is None or max_time > total_max_time:
                    total_max_time = max_time

        xmin = int(math.floor(total_min_time))
        xmax = int(math.ceil(total_max_time))
        ax.set_xlim(xmin, xmax)

        new_xticks = range(xmin, xmax, 10)
        ax.set_xticks(new_xticks)
        formatter = ticker.FuncFormatter(lambda x, pos: x - xmin)
        ax.xaxis.set_major_formatter(formatter)

        fig_w, fig_h = fig.get_size_inches()
        fig.set_size_inches(self.amplify * len(new_xticks), fig_h)

        start_datetime = time.strftime('%a, %d %b %Y %H:%M:%S',
                                       time.localtime(total_min_time))
        start_datetime += ' ' + time.strftime('%z')
        ax.set_xlabel('Time (s) since ' + start_datetime, fontsize=12)
        ax.set_ylabel('Throughput (Mbit/s)', fontsize=12)

        for graph_format in ['svg', 'pdf']:
            fig_path = path.join(
                self.data_dir, 'pantheon_throughput_time.%s' % graph_format)
            fig.savefig(fig_path, bbox_inches='tight', pad_inches=0.2)

        sys.stderr.write(
            'Saved pantheon_throughput_time in %s\n' % self.data_dir)

        plt.close('all')