Пример #1
0
def main():
    parser = argparse.ArgumentParser()

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--all', action='store_true',
                       help='test all the schemes specified in src/config.yml')
    group.add_argument('--schemes', metavar='"SCHEME1 SCHEME2..."',
                       help='test a space-separated list of schemes')

    args = parser.parse_args()

    if args.all:
        schemes = parse_config()['schemes'].keys()
    elif args.schemes is not None:
        schemes = args.schemes.split()

    curr_dir = path.abspath(path.dirname(__file__))
    data_dir = path.join(curr_dir, 'data')
    shutil.rmtree(data_dir, ignore_errors=True)
    make_sure_path_exists(data_dir)

    test_py = path.join(project_root.DIR, 'test', 'test.py')
    analyze_py = path.join(project_root.DIR, 'analysis', 'analyze.py')

    cmd = ['python', test_py, 'local', '--schemes', ' '.join(schemes),
           '-t', '10', '--data-dir', data_dir, '--pkill-cleanup',
           '--prepend-mm-cmds', 'mm-delay 20', '--extra-mm-link-args',
           '--uplink-queue=droptail --uplink-queue-args=packets=200']
    check_call(cmd)

    cmd = ['python', analyze_py, '--data-dir', data_dir]
    check_call(cmd)
Пример #2
0
def main():
    h.call(['echo', '1'])
    h.check_call('echo 2', shell=True)

    ret = h.check_output(['echo', '3']).strip()
    print ret
    assert ret == '3'

    proc = h.Popen(['echo', '4'], stdout=h.PIPE)
    ret = proc.communicate()[0].strip()
    print ret
    assert ret == '4'

    print h.get_open_port()
    h.make_sure_path_exists(h.TMPDIR)
    print h.parse_config()
Пример #3
0
def verify_schemes(schemes):
    schemes = schemes.split()
    all_schemes = parse_config()['schemes'].keys()

    for cc in schemes:
        if cc not in all_schemes:
            sys.exit('%s is not a scheme included in src/config.yml' % cc)
Пример #4
0
def run_tests(args):
    git_summary = get_git_summary(
        args.mode, getattr(args, 'remote_path', None))

    config = parse_config()
    schemes_config = config['schemes']

    if args.all:
        cc_schemes = schemes_config.keys()
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()

    if args.random_order:
        random.shuffle(cc_schemes)

    ssh_cmd = None
    if args.mode == 'remote':
        r = parse_remote_path(args.remote_path)
        ssh_cmd = r['ssh_cmd']

    # For each run of each scheme, change the queueing discipline and
    # receiving socket buffer sizes before and after the test.
    # Check config.yml for values.
    for run_id in xrange(args.start_run_id,
                         args.start_run_id + args.run_times):
        # clean the contents in /tmp/pantheon-tmp
        clean_tmp_cmd = 'rm -rf /tmp/pantheon-tmp/*'
        if args.mode == 'remote':
            call(ssh_cmd + [clean_tmp_cmd])
        call(clean_tmp_cmd, shell=True)

        for cc in cc_schemes:
            default_qdisc = get_default_qdisc(ssh_cmd)
            old_recv_bufsizes = get_recv_sock_bufsizes(ssh_cmd)

            if 'qdisc' in schemes_config[cc]:
                test_qdisc = schemes_config[cc]['qdisc']
            else:
                test_qdisc = config['kernel_attrs']['default_qdisc']

            test_recv_sock_bufs = config['kernel_attrs']['sock_recv_bufs']

            try:
                if default_qdisc != test_qdisc:
                    set_default_qdisc(test_qdisc, ssh_cmd)

                set_recv_sock_bufsizes(test_recv_sock_bufs, ssh_cmd)

                Test(args, run_id, cc).run()
            finally:
                set_default_qdisc(default_qdisc, ssh_cmd)
                set_recv_sock_bufsizes(old_recv_bufsizes, ssh_cmd)

    if not args.no_metadata:
        meta = vars(args).copy()
        meta['cc_schemes'] = sorted(cc_schemes)
        save_test_metadata(meta, path.abspath(args.data_dir), git_summary)
Пример #5
0
    def __init__(self, args):
        self.data_dir = path.abspath(args.data_dir)
        self.include_acklink = args.include_acklink

        metadata_path = path.join(args.data_dir, 'pantheon_metadata.json')
        self.meta = load_test_metadata(metadata_path)
        self.cc_schemes = verify_schemes_with_meta(args.schemes, self.meta)

        self.run_times = self.meta['run_times']
        self.flows = self.meta['flows']
        self.config = parse_config()
Пример #6
0
def test_schemes(args):
    src_dir = path.join(project_root.DIR, 'src')

    if args.all:
        schemes = parse_config()['schemes'].keys()
    elif args.schemes is not None:
        schemes = args.schemes.split()

    for scheme in schemes:
        sys.stderr.write('Testing %s...\n' % scheme)
        src = path.join(src_dir, scheme + '.py')

        run_first = check_output([src, 'run_first']).strip()
        run_second = 'receiver' if run_first == 'sender' else 'sender'

        port = get_open_port()

        # run first to run
        cmd = [src, run_first, port]
        first_proc = Popen(cmd, preexec_fn=os.setsid)

        # wait for 'run_first' to be ready
        time.sleep(3)

        # run second to run
        cmd = [src, run_second, '127.0.0.1', port]
        second_proc = Popen(cmd, preexec_fn=os.setsid)

        # test lasts for 3 seconds
        signal.signal(signal.SIGALRM, timeout_handler)
        signal.alarm(3)

        try:
            for proc in [first_proc, second_proc]:
                proc.wait()
                if proc.returncode != 0:
                    sys.exit('%s failed in tests' % scheme)
        except TimeoutError:
            pass
        except Exception as exception:
            sys.exit('test_schemes.py: %s\n' % exception)
        else:
            signal.alarm(0)
            sys.exit('test exited before time limit')
        finally:
            # cleanup
            kill_proc_group(first_proc)
            kill_proc_group(second_proc)
Пример #7
0
def check_default_qdisc(cc):
    config = parse_config()
    cc_config = config['schemes'][cc]
    kernel_qdisc = get_default_qdisc(debug=False)

    if 'qdisc' in cc_config:
        required_qdisc = cc_config['qdisc']
    else:
        required_qdisc = config['kernel_attrs']['default_qdisc']

    if kernel_qdisc != required_qdisc:
        sys.exit('Your default packet scheduler is "%s" currently. Please run '
                 '"sudo sysctl -w net.core.default_qdisc=%s" to use the '
                 'appropriate queueing discipline for %s to work, and change '
                 'it back after testing.'
                 % (kernel_qdisc, required_qdisc, cc_config['friendly_name']))
Пример #8
0
def verify_schemes_with_meta(schemes, meta):
    schemes_config = parse_config()['schemes']

    all_schemes = meta['cc_schemes']
    if schemes is None:
        cc_schemes = all_schemes
    else:
        cc_schemes = schemes.split()

    for cc in cc_schemes:
        if cc not in all_schemes:
            sys.exit('%s is not a scheme included in '
                     'pantheon_metadata.json' % cc)
        if cc not in schemes_config:
            sys.exit('%s is not a scheme included in src/config.yml' % cc)

    return cc_schemes
Пример #9
0
    def run(self):
        stats_logs, data = self.eval_performance()

        if not self.no_graphs:
            self.plot_throughput_delay(data)

        # change data names to displayable names
        schemes_config = parse_config()['schemes']
        stats_logs_display = {}
        for cc in stats_logs:
            cc_name = schemes_config[cc]['friendly_name']
            stats_logs_display[cc_name] = stats_logs[cc]

        perf_data_path = path.join(self.data_dir, 'perf_data.pkl')
        with open(perf_data_path, 'wb') as f:
            pickle.dump(stats_logs_display, f)

        plt.close('all')
Пример #10
0
def setup(args):
    if not args.install_deps:
        # update submodules
        update_submodules()

        # enable IP forwarding
        sh_cmd = 'sudo sysctl -w net.ipv4.ip_forward=1'
        check_call(sh_cmd, shell=True)

        if args.interface is not None:
            # disable reverse path filtering
            rpf = 'net.ipv4.conf.%s.rp_filter'

            sh_cmd = 'sudo sysctl -w %s=0' % (rpf % 'all')
            check_call(sh_cmd, shell=True)

            sh_cmd = 'sudo sysctl -w %s=0' % (rpf % args.interface)
            check_call(sh_cmd, shell=True)

    # setup specified schemes
    cc_schemes = None

    if args.all:
        cc_schemes = parse_config()['schemes'].keys()
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()

    if cc_schemes is not None:
        for cc in cc_schemes:
            cc_src = path.join(project_root.DIR, 'src', cc + '.py')

            # install dependencies
            if args.install_deps:
                install_deps(cc_src)
            else:
                # persistent setup across reboots
                if args.setup:
                    check_call(['python', cc_src, 'setup'])

                # setup required every time after reboot
                if call(['python', cc_src, 'setup_after_reboot']) != 0:
                    sys.stderr.write('Warning: "%s.py setup_after_reboot"'
                                     ' failed but continuing\n' % cc)
Пример #11
0
    def plot_throughput_delay(self, data):
        min_raw_delay = sys.maxint
        min_mean_delay = sys.maxint
        max_raw_delay = -sys.maxint
        max_mean_delay = -sys.maxint

        fig_raw, ax_raw = plt.subplots()
        fig_mean, ax_mean = plt.subplots()

        schemes_config = parse_config()['schemes']
        for cc in data:
            if not data[cc]:
                sys.stderr.write('No performance data for scheme %s\n' % cc)
                continue

            value = data[cc]
            cc_name = schemes_config[cc]['friendly_name']
            color = schemes_config[cc]['color']
            marker = schemes_config[cc]['marker']
            y_data, x_data, _ = zip(*value)

            # update min and max raw delay
            min_raw_delay = min(min(x_data), min_raw_delay)
            max_raw_delay = max(max(x_data), max_raw_delay)

            # plot raw values
            ax_raw.scatter(x_data, y_data, color=color, marker=marker,
                           label=cc_name, clip_on=False)

            # plot the average of raw values
            x_mean = np.mean(x_data)
            y_mean = np.mean(y_data)

            # update min and max mean delay
            min_mean_delay = min(x_mean, min_mean_delay)
            max_mean_delay = max(x_mean, max_mean_delay)

            ax_mean.scatter(x_mean, y_mean, color=color, marker=marker,
                            clip_on=False)
            ax_mean.annotate(cc_name, (x_mean, y_mean))

        for fig_type, fig, ax in [('raw', fig_raw, ax_raw),
                                  ('mean', fig_mean, ax_mean)]:
            if fig_type == 'raw':
                self.xaxis_log_scale(ax, min_raw_delay, max_raw_delay)
            else:
                self.xaxis_log_scale(ax, min_mean_delay, max_mean_delay)
            ax.invert_xaxis()

            yticks = ax.get_yticks()
            if yticks[0] < 0:
                ax.set_ylim(bottom=0)

            xlabel = '95th percentile one-way delay (ms)'
            ax.set_xlabel(xlabel, fontsize=12)
            ax.set_ylabel('Average throughput (Mbit/s)', fontsize=12)
            ax.grid()

        # save pantheon_summary.svg and .pdf
        ax_raw.set_title(self.expt_title.strip(), y=1.02, fontsize=12)
        lgd = ax_raw.legend(scatterpoints=1, bbox_to_anchor=(1, 0.5),
                            loc='center left', fontsize=12)

        for graph_format in ['svg', 'pdf']:
            raw_summary = path.join(
                self.data_dir, 'pantheon_summary.%s' % graph_format)
            fig_raw.savefig(raw_summary, dpi=300, bbox_extra_artists=(lgd,),
                            bbox_inches='tight', pad_inches=0.2)

        # save pantheon_summary_mean.svg and .pdf
        ax_mean.set_title(self.expt_title +
                          ' (mean of all runs by scheme)', fontsize=12)

        for graph_format in ['svg', 'pdf']:
            mean_summary = path.join(
                self.data_dir, 'pantheon_summary_mean.%s' % graph_format)
            fig_mean.savefig(mean_summary, dpi=300,
                             bbox_inches='tight', pad_inches=0.2)

        sys.stderr.write(
            'Saved throughput graphs, delay graphs, and summary '
            'graphs in %s\n' % self.data_dir)
Пример #12
0
import sys
sys.path.append("..")
from flask import Flask, jsonify, request, send_file
import ast
from storage.mongodb_storage import MongoDBStorage as mdb
from helpers.helpers import parse_config
config = parse_config('server')

client = mdb().client
app = Flask(__name__)


def group_forecasts(forecasts):
    grouped = {}
    for forecast in forecasts:
        grouped.setdefault(forecast['forecast_type'], [])
        grouped[forecast['forecast_type']].append(forecast)
    return grouped


def get_file_link(resource, file_name):
    pattern = '{server_domain}/api/files/{resource}/{file_name}'
    file_link = pattern.format(server_domain=config['domain'],
                               resource=resource,
                               file_name=file_name)
    return file_link


def format_forecast_logos(forecast):
    if forecast['resource'] == 'vseprosport.ru':
        if forecast['forecast_type'] == 'Solo':
Пример #13
0
    def run(self):
        fig, ax = plt.subplots()
        total_min_time = None
        total_max_time = None

        if self.flows > 0:
            datalink_fmt_str = '%s_datalink_run%s.log'
        else:
            datalink_fmt_str = '%s_mm_datalink_run%s.log'

        schemes_config = parse_config()['schemes']
        for cc in self.cc_schemes:
            cc_name = schemes_config[cc]['friendly_name']

            for run_id in xrange(1, self.run_times + 1):
                tunnel_log_path = path.join(
                    self.data_dir, datalink_fmt_str % (cc, run_id))
                clock_time, throughput = self.parse_tunnel_log(tunnel_log_path)

                min_time = None
                max_time = None
                max_tput = None

                for flow_id in clock_time:
                    ax.plot(clock_time[flow_id], throughput[flow_id])

                    if min_time is None or clock_time[flow_id][0] < min_time:
                        min_time = clock_time[flow_id][0]
                    if max_time is None or clock_time[flow_id][-1] < min_time:
                        max_time = clock_time[flow_id][-1]
                    flow_max_tput = max(throughput[flow_id])
                    if max_tput is None or flow_max_tput > max_tput:
                        max_tput = flow_max_tput

                ax.annotate(cc_name, (min_time, max_tput))

                if total_min_time is None or min_time < total_min_time:
                    total_min_time = min_time
                if total_max_time is None or max_time > total_max_time:
                    total_max_time = max_time

        xmin = int(math.floor(total_min_time))
        xmax = int(math.ceil(total_max_time))
        ax.set_xlim(xmin, xmax)

        new_xticks = range(xmin, xmax, 10)
        ax.set_xticks(new_xticks)
        formatter = ticker.FuncFormatter(lambda x, pos: x - xmin)
        ax.xaxis.set_major_formatter(formatter)

        fig_w, fig_h = fig.get_size_inches()
        fig.set_size_inches(self.amplify * len(new_xticks), fig_h)

        start_datetime = time.strftime('%a, %d %b %Y %H:%M:%S',
                                       time.localtime(total_min_time))
        start_datetime += ' ' + time.strftime('%z')
        ax.set_xlabel('Time (s) since ' + start_datetime, fontsize=12)
        ax.set_ylabel('Throughput (Mbit/s)', fontsize=12)

        for graph_format in ['svg', 'pdf']:
            fig_path = path.join(
                self.data_dir, 'pantheon_throughput_time.%s' % graph_format)
            fig.savefig(fig_path, bbox_inches='tight', pad_inches=0.2)

        sys.stderr.write(
            'Saved pantheon_throughput_time in %s\n' % self.data_dir)

        plt.close('all')
Пример #14
0
def run_tests(args):
    git_summary = get_git_summary(args.mode, getattr(args, 'remote_path',
                                                     None))

    config = parse_config()
    schemes_config = config['schemes']

    if args.all:
        cc_schemes = schemes_config.keys()
        if args.random_order:
            random.shuffle(cc_schemes)
    elif args.schemes is not None:
        cc_schemes = args.schemes.split()
        if args.random_order:
            random.shuffle(cc_schemes)
    else:
        assert (args.test_config is not None)
        if args.random_order:
            random.shuffle(args.test_config['flows'])
        cc_schemes = [flow['scheme'] for flow in args.test_config['flows']]

    ssh_cmd = None
    if args.mode == 'remote':
        r = parse_remote_path(args.remote_path)
        ssh_cmd = r['ssh_cmd']

    # For each run of each scheme, change the queueing discipline and
    # receiving socket buffer sizes before and after the test.
    # Check config.yml for values.
    for run_id in xrange(args.start_run_id,
                         args.start_run_id + args.run_times):
        # clean the contents in /tmp/pantheon-tmp
        clean_tmp_cmd = 'rm -rf /tmp/pantheon-tmp/*'
        if args.mode == 'remote':
            call(ssh_cmd + [clean_tmp_cmd])
        call(clean_tmp_cmd, shell=True)

        # ISSUE (ranysha): no support for multiple schemes where each uses diff
        # qdisc. since version 4.13 of the kernel, TCP supports packet pacing
        # so you don't need to specify qdisc for BBR. when running with config
        # file, going to ignore qdisc setting for now.

        if args.test_config is None:
            for cc in cc_schemes:
                default_qdisc = get_default_qdisc(ssh_cmd)
                old_recv_bufsizes = get_recv_sock_bufsizes(ssh_cmd)

                if 'qdisc' in schemes_config[cc]:
                    test_qdisc = schemes_config[cc]['qdisc']
                else:
                    test_qdisc = config['kernel_attrs']['default_qdisc']

                test_recv_sock_bufs = config['kernel_attrs']['sock_recv_bufs']

                try:
                    if default_qdisc != test_qdisc:
                        set_default_qdisc(test_qdisc, ssh_cmd)

                    set_recv_sock_bufsizes(test_recv_sock_bufs, ssh_cmd)

                    Test(args, run_id, cc).run()
                finally:
                    set_default_qdisc(default_qdisc, ssh_cmd)
                    set_recv_sock_bufsizes(old_recv_bufsizes, ssh_cmd)
        else:
            default_qdisc = get_default_qdisc(ssh_cmd)
            old_recv_bufsizes = get_recv_sock_bufsizes(ssh_cmd)
            test_qdisc = config['kernel_attrs']['default_qdisc']
            test_recv_sock_bufs = config['kernel_attrs']['sock_recv_bufs']

            try:
                if default_qdisc != test_qdisc:
                    set_default_qdisc(test_qdisc, ssh_cmd)

                set_recv_sock_bufsizes(test_recv_sock_bufs, ssh_cmd)

                Test(args, run_id, None).run()
            finally:
                set_default_qdisc(default_qdisc, ssh_cmd)
                set_recv_sock_bufsizes(old_recv_bufsizes, ssh_cmd)

    if not args.no_metadata:
        meta = vars(args).copy()
        meta['cc_schemes'] = sorted(cc_schemes)
        save_test_metadata(meta, path.abspath(args.data_dir), git_summary)
 def __init__(self):
     self.config = parse_config('db')
     self.client = self.connect_to_db()
     self.product_collection = self.client[
         self.config['PRODUCTS_COLLECTION']]
Пример #16
0
 def __init__(self, check_url, use_proxy=True):
     self.check_url = check_url
     self.config = parse_config('server')
     if use_proxy:
         self.get_valid_proxies()
     self.lock = Lock()
Пример #17
0
    def plot_throughput_delay(self, data):
        min_raw_delay = sys.maxint
        min_mean_delay = sys.maxint
        max_raw_delay = -sys.maxint
        max_mean_delay = -sys.maxint

        fig_raw, ax_raw = plt.subplots()
        fig_mean, ax_mean = plt.subplots()

        schemes_config = parse_config()['schemes']
        for cc in data:
            if not data[cc]:
                sys.stderr.write('No performance data for scheme %s\n' % cc)
                continue

            value = data[cc]
            cc_name = schemes_config[cc]['friendly_name']
            color = schemes_config[cc]['color']
            marker = schemes_config[cc]['marker']
            y_data, x_data, _ = zip(*value)

            # update min and max raw delay
            min_raw_delay = min(min(x_data), min_raw_delay)
            max_raw_delay = max(max(x_data), max_raw_delay)

            # plot raw values
            ax_raw.scatter(x_data,
                           y_data,
                           color=color,
                           marker=marker,
                           label=cc_name,
                           clip_on=False)

            # plot the average of raw values
            x_mean = np.mean(x_data)
            y_mean = np.mean(y_data)

            # update min and max mean delay
            min_mean_delay = min(x_mean, min_mean_delay)
            max_mean_delay = max(x_mean, max_mean_delay)

            ax_mean.scatter(x_mean,
                            y_mean,
                            color=color,
                            marker=marker,
                            clip_on=False)
            ax_mean.annotate(cc_name, (x_mean, y_mean))

        for fig_type, fig, ax in [('raw', fig_raw, ax_raw),
                                  ('mean', fig_mean, ax_mean)]:
            if fig_type == 'raw':
                self.xaxis_log_scale(ax, min_raw_delay, max_raw_delay)
            else:
                self.xaxis_log_scale(ax, min_mean_delay, max_mean_delay)
            ax.invert_xaxis()

            yticks = ax.get_yticks()
            if yticks[0] < 0:
                ax.set_ylim(bottom=0)

            xlabel = '95th percentile one-way delay (ms)'
            ax.set_xlabel(xlabel, fontsize=12)
            ax.set_ylabel('Average throughput (Mbit/s)', fontsize=12)
            ax.grid()

        # save pantheon_summary.svg and .pdf
        ax_raw.set_title(self.expt_title.strip(), y=1.02, fontsize=12)
        lgd = ax_raw.legend(scatterpoints=1,
                            bbox_to_anchor=(1, 0.5),
                            loc='center left',
                            fontsize=12)

        for graph_format in ['svg', 'pdf']:
            raw_summary = path.join(self.data_dir,
                                    'pantheon_summary.%s' % graph_format)
            fig_raw.savefig(raw_summary,
                            dpi=300,
                            bbox_extra_artists=(lgd, ),
                            bbox_inches='tight',
                            pad_inches=0.2)

        # save pantheon_summary_mean.svg and .pdf
        ax_mean.set_title(self.expt_title + ' (mean of all runs by scheme)',
                          fontsize=12)

        for graph_format in ['svg', 'pdf']:
            mean_summary = path.join(self.data_dir,
                                     'pantheon_summary_mean.%s' % graph_format)
            fig_mean.savefig(mean_summary,
                             dpi=300,
                             bbox_inches='tight',
                             pad_inches=0.2)

        sys.stderr.write('Saved throughput graphs, delay graphs, and summary '
                         'graphs in %s\n' % self.data_dir)
Пример #18
0
    def run(self):
        fig, ax = plt.subplots()
        total_min_time = None
        total_max_time = None

        if self.flows > 0:
            datalink_fmt_str = '%s_datalink_run%s.log'
        else:
            datalink_fmt_str = '%s_mm_datalink_run%s.log'

        schemes_config = parse_config()['schemes']
        for cc in self.cc_schemes:
            cc_name = schemes_config[cc]['friendly_name']

            for run_id in xrange(1, self.run_times + 1):
                tunnel_log_path = path.join(self.data_dir,
                                            datalink_fmt_str % (cc, run_id))
                clock_time, throughput = self.parse_tunnel_log(tunnel_log_path)

                min_time = None
                max_time = None
                max_tput = None

                for flow_id in clock_time:
                    ax.plot(clock_time[flow_id], throughput[flow_id])

                    if min_time is None or clock_time[flow_id][0] < min_time:
                        min_time = clock_time[flow_id][0]
                    if max_time is None or clock_time[flow_id][-1] < min_time:
                        max_time = clock_time[flow_id][-1]
                    flow_max_tput = max(throughput[flow_id])
                    if max_tput is None or flow_max_tput > max_tput:
                        max_tput = flow_max_tput

                ax.annotate(cc_name, (min_time, max_tput))

                if total_min_time is None or min_time < total_min_time:
                    total_min_time = min_time
                if total_max_time is None or max_time > total_max_time:
                    total_max_time = max_time

        xmin = int(math.floor(total_min_time))
        xmax = int(math.ceil(total_max_time))
        ax.set_xlim(xmin, xmax)

        new_xticks = range(xmin, xmax, 10)
        ax.set_xticks(new_xticks)
        formatter = ticker.FuncFormatter(lambda x, pos: x - xmin)
        ax.xaxis.set_major_formatter(formatter)

        fig_w, fig_h = fig.get_size_inches()
        fig.set_size_inches(self.amplify * len(new_xticks), fig_h)

        start_datetime = time.strftime('%a, %d %b %Y %H:%M:%S',
                                       time.localtime(total_min_time))
        start_datetime += ' ' + time.strftime('%z')
        ax.set_xlabel('Time (s) since ' + start_datetime, fontsize=12)
        ax.set_ylabel('Throughput (Mbit/s)', fontsize=12)

        for graph_format in ['svg', 'pdf']:
            fig_path = path.join(self.data_dir,
                                 'pantheon_throughput_time.%s' % graph_format)
            fig.savefig(fig_path, bbox_inches='tight', pad_inches=0.2)

        sys.stderr.write('Saved pantheon_throughput_time in %s\n' %
                         self.data_dir)

        plt.close('all')
Пример #19
0
 def __init__(self):
     self.config = parse_config('db')
     self.client = self.connect_to_db()