Ejemplo n.º 1
0
def plot_line(configs):
    raw_stats = []
    if not configs.split_host:
        assert len(configs.names) == len(configs.input)
        for dirname in configs.input:
            raw_stats.append(get_all_metrics(dirname))
    else:
        assert len(configs.input) == 1
        raw_stats = get_all_metrics(configs.input[0], True)
        assert len(configs.names) == len(raw_stats)
    # check the stats data
    for stat in raw_stats:
        assert len(stat) == len(raw_stats[0])
        # FIXME(wen): The name could be different if lines are different in
        # something other than cc algo.
        #for name in raw_stats[0]:
        #    assert name in stat
    data = []
    x_values = None
    for stat in raw_stats:
        v, x_values, _ = preprocess_line_data(configs, stat)
        data.append(v)

    df = get_line_dataframe(configs, data, x_values, configs.names)
    # scale axis
    if configs.x == "loss":
        df["loss"] *= 100
    if configs.y == "goodput":
        # convert bps to Mbps
        if configs.debug:
            print(df["goodput"])
        df["goodput"] /= 1e6
    if configs.logx:
        df[configs.x] *= configs.logx_scale
    ax = seaborn.lineplot(data=df, x=configs.x, y=configs.y, hue="name", style="name")

    if configs.x == "loss":
        ax.set_xlabel("Loss Percentage (%)")
    if configs.y == "goodput":
        ax.set_ylabel("Goodput (Mbps)")
    if configs.y == "retransmits":
        ax.set_ylabel("Retr Number")
    if configs.logx:
        ax.set_xscale("log")

    # remove the label title so it's consistent with the paper
    ax.get_legend().set_title(None)

    ax.set_ylim(ymin=0)
    if configs.logx:
        ax.set_xlim(xmin=0.01 * configs.logx_scale)
    else:
        ax.set_xlim(xmin=0)
    return ax
def runner(models, learn_options, GP_likelihoods=None, orders=None, WD_kernel_degrees=None, where='local', cluster_user='******', cluster='RR1-N13-09-H44', test=False, exp_name = None, **kwargs):

    if where == 'local':
        results, all_learn_options = run_models(models, orders=orders, GP_likelihoods=GP_likelihoods, learn_options_set=learn_options, WD_kernel_degrees=WD_kernel_degrees, test=test, **kwargs)
        all_metrics, gene_names = util.get_all_metrics(results, learn_options)
        util.plot_all_metrics(all_metrics, gene_names, all_learn_options, save=True)

        # for non-local (i.e. cluster), the comparable code is in cli_run_model.py
        pickle_runner_results(exp_name, results, all_learn_options)

        return results, all_learn_options, all_metrics, gene_names

    elif where == 'cluster':
        import cluster_job

        # create random cluster directory, dump learn options, and create cluster file
        tempdir, user, clust_filename = cluster_job.create(cluster_user, models, orders, WD_kernel_degrees, GP_likelihoods, exp_name=exp_name, learn_options=learn_options, **kwargs)

        # raw_input("Submit job to HPC and press any key when it's finished: ")
        # util.plot_cluster_results(directory=tempdir)

        #stdout = tempdir + r"/stdout"
        #stderr = tempdir + r"/stderr"
        #if not os.path.exists(stdout): os.makedirs(stdout)
        #if not os.path.exists(stderr): os.makedirs(stderr)

        return tempdir, clust_filename, user#, stdout, stderr
Ejemplo n.º 3
0
def plot_heatmap(configs):
    # based on the number of inputs and target
    # load stats from two directories
    if configs.target == "retransmits":
        assert len(configs.input) == 1
        stats1 = get_all_metrics(configs.input[0])
        stats2 = None
    else:
        assert len(configs.input) == 2
        stats1 = get_all_metrics(configs.input[0])
        stats2 = get_all_metrics(configs.input[1])
    # make sure we have the same stuff
    assert len(stats1) > 0
    if stats2 is not None:
        assert len(stats1) == len(stats2)
        for name in stats1:
            assert name in stats2

    mat1, x_values, y_values, params = preprocess_heatmap_data(configs, stats1)
    if stats2 is not None:
        mat2, _, __, ___ = preprocess_heatmap_data(configs, stats2)
    else:
        mat2 = None

    if configs.target == "retransmits":
        mat = np.array(mat1, dtype=int)
    elif configs.target == "rtt":
        mat = np.array(compute_dec(mat1, mat2) * 100, dtype=int)
    else:
        assert configs.target == "goodput"
        mat = np.array(compute_gain(mat1, mat2) * 100, dtype=int)
    # prepare panda dataframe

    df = get_heatmap_dataframe(mat, x_values, y_values, [configs.y, configs.x, "value"])
    df = df.pivot(configs.x, configs.y, "value")
    ax = seaborn.heatmap(df, annot=configs.target != "retransmits", fmt="d", cmap=seaborn.cm.rocket_r)
    ax.invert_yaxis()
    # set labels if necessary
    if configs.x == "rtt":
        ax.set_xlabel("RTT (ms)")
    if configs.y == "bw":
        ax.set_ylabel("Bandwidth (Mbps)")

    return ax
Ejemplo n.º 4
0
def runner(models, learn_options, GP_likelihoods=None, orders=None, WD_kernel_degrees=None, where='local', cluster_user='******', cluster='RR1-N13-09-H44', test=False, exp_name = None, **kwargs):

    if where == 'local':
        results, all_learn_options = run_models(models, orders=orders, GP_likelihoods=GP_likelihoods, learn_options_set=learn_options, WD_kernel_degrees=WD_kernel_degrees, test=test, **kwargs)
        all_metrics, gene_names = util.get_all_metrics(results, learn_options)
        util.plot_all_metrics(all_metrics, gene_names, all_learn_options, save=True)

        # for non-local (i.e. cluster), the comparable code is in cli_run_model.py
        abspath = os.path.abspath(__file__)
        dname = os.path.dirname(abspath) + "/../" + "results"
        if not os.path.exists(dname):
            os.makedirs(dname)
            print "Created directory: %s" % str(dname)
        if exp_name is None:
            exp_name = results.keys()[0]
        myfile = dname+'/'+ exp_name + '.pickle'
        with open(myfile, 'wb') as f:
            print "writing results to %s" % myfile
            pickle.dump((results, all_learn_options), f, -1)

        return results, all_learn_options, all_metrics, gene_names

    elif where == 'cluster':
        import cluster_job

        # create random cluster directory, dump learn options, and create cluster file
        tempdir, user, clust_filename = cluster_job.create(cluster_user, models, orders, WD_kernel_degrees, GP_likelihoods, exp_name=exp_name, learn_options=learn_options, **kwargs)

        # raw_input("Submit job to HPC and press any key when it's finished: ")
        # util.plot_cluster_results(directory=tempdir)

        #stdout = tempdir + r"/stdout"
        #stderr = tempdir + r"/stderr"
        #if not os.path.exists(stdout): os.makedirs(stdout)
        #if not os.path.exists(stderr): os.makedirs(stderr)

        return tempdir, clust_filename, user#, stdout, stderr
def runner(models, learn_options, GP_likelihoods=None, orders=None, WD_kernel_degrees=None, where='local', cluster_user='******', cluster='RR1-N13-09-H44', test=False, exp_name = None, **kwargs):

    if where == 'local':
        results, all_learn_options = run_models(models, orders=orders, GP_likelihoods=GP_likelihoods, learn_options_set=learn_options, WD_kernel_degrees=WD_kernel_degrees, test=test, **kwargs)
        all_metrics, gene_names = util.get_all_metrics(results, learn_options)
        util.plot_all_metrics(all_metrics, gene_names, all_learn_options, save=True)

        # for non-local (i.e. cluster), the comparable code is in cli_run_model.py
        abspath = os.path.abspath(__file__)
        dname = os.path.dirname(abspath) + "/../" + "results"
        if not os.path.exists(dname):
            os.makedirs(dname)
            print "Created directory: %s" % str(dname)
        if exp_name is None:
            exp_name = results.keys()[0]
        myfile = dname+'/'+ exp_name + '.pickle'
        with open(myfile, 'wb') as f:
            print "writing results to %s" % myfile
            pickle.dump((results, all_learn_options), f, -1)

        return results, all_learn_options, all_metrics, gene_names

    elif where == 'cluster':
        import cluster_job

        # create random cluster directory, dump learn options, and create cluster file
        tempdir, user, clust_filename = cluster_job.create(cluster_user, models, orders, WD_kernel_degrees, GP_likelihoods, exp_name=exp_name, learn_options=learn_options, **kwargs)

        # raw_input("Submit job to HPC and press any key when it's finished: ")
        # util.plot_cluster_results(directory=tempdir)

        #stdout = tempdir + r"/stdout"
        #stderr = tempdir + r"/stderr"
        #if not os.path.exists(stdout): os.makedirs(stdout)
        #if not os.path.exists(stderr): os.makedirs(stderr)

        return tempdir, clust_filename, user#, stdout, stderr