コード例 #1
0
ファイル: fet_hists.py プロジェクト: marciopocebon/unfair
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(
        description="Visualize a simulation's features.")
    psr.add_argument("--training-data",
                     help="The path to the parsed training data.",
                     required=True,
                     type=str)
    psr, psr_verify = cl_args.add_out(psr)
    args = psr_verify(psr.parse_args())
    dat_flp = args.training_data
    assert path.exists(dat_flp), f"File does not exist: {dat_flp}"

    # Read data.
    dat = np.load(dat_flp)
    num_arrays = len(dat.files)
    assert num_arrays == 5, f"Expected 5 arrays, but found: {dat.files}"
    dat_in = dat["dat_in"]

    # Generate graphs.
    for fet in dat_in.dtype.names:
        print(f"Plotting feature: {fet}")
        pyplot.hist(dat_in[fet], bins=50, density=True)
        pyplot.xlabel(fet)
        pyplot.ylabel("histogram")
        pyplot.savefig(
            path.join(args.out_dir,
                      f"{fet.replace(' ', '_').replace('/', '-')}.pdf"))
        pyplot.close()
コード例 #2
0
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(
        description="Parses the output of CloudLab experiments.")
    psr.add_argument(
        "--exp-dir",
        help=("The directory in which the experiment results are stored "
              "(required)."),
        required=True,
        type=str)
    psr.add_argument(
        "--untar-dir",
        help=("The directory in which the untarred experiment intermediate "
              "files are stored (required)."),
        required=True,
        type=str)
    psr.add_argument("--random-order",
                     action="store_true",
                     help="Parse experiments in a random order.")
    psr.add_argument("--skip-smoothed-features",
                     action="store_true",
                     help="Do not calculate EWMA and windowed features.")
    psr.add_argument("--parallel",
                     default=multiprocessing.cpu_count(),
                     help="The number of files to parse in parallel.",
                     type=int)
    psr, psr_verify = cl_args.add_out(psr)
    args = psr_verify(psr.parse_args())
    exp_dir = args.exp_dir
    untar_dir = args.untar_dir
    out_dir = args.out_dir
    skip_smoothed = args.skip_smoothed_features

    # Find all experiments.
    pcaps = [(path.join(exp_dir, exp), untar_dir, out_dir, skip_smoothed)
             for exp in sorted(os.listdir(exp_dir)) if exp.endswith(".tar.gz")]
    if args.random_order:
        random.shuffle(pcaps)

    print(f"Num files: {len(pcaps)}")
    tim_srt_s = time.time()
    if defaults.SYNC:
        smallest_safe_wins = {parse_exp(*pcap) for pcap in pcaps}
    else:
        with multiprocessing.Pool(processes=args.parallel) as pol:
            smallest_safe_wins = set(pol.starmap(parse_exp, pcaps))
    print(f"Done parsing - time: {time.time() - tim_srt_s:.2f} seconds")

    # Remove return values from experiments that were not parsed.
    smallest_safe_wins = [win for win in smallest_safe_wins if win != -1]
    if 0 in smallest_safe_wins:
        print("Some experiments had no safe window sizes.")
    print(
        "Smallest globally-safe window size:",
        max(smallest_safe_wins)
        if smallest_safe_wins else "No experiments parsed!")
コード例 #3
0
def main():
    """ This program's entrypoint. """
    utils.set_rand_seed()

    psr = argparse.ArgumentParser(
        description=(
            "Merges parsed experiment files into unified training, validation, "
            "and test data."))
    psr.add_argument(
        "--data-dir",
        help="The path to a directory containing the experiment files.",
        required=True, type=str)
    psr.add_argument(
        "--train-split", default=50, help="Training data fraction",
        required=False, type=float)
    psr.add_argument(
        "--val-split", default=20, help="Validation data fraction",
        required=False, type=float)
    psr.add_argument(
        "--test-split", default=30, help="Test data fraction",
        required=False, type=float)
    psr, psr_verify = cl_args.add_sample_percent(*cl_args.add_out(
        *cl_args.add_warmup(*cl_args.add_num_exps(psr))))
    args = psr_verify(psr.parse_args())

    split_fracs = {
        "train": args.train_split / 100, "val": args.val_split / 100,
        "test": args.test_split / 100}
    tot_split = sum(split_fracs.values())
    assert tot_split == 1, \
        ("The sum of the training, validation, and test splits must equal 100, "
         f"not {tot_split * 100}")

    tim_srt_s = time.time()
    # Determine the experiment filepaths.
    exps_dir = args.data_dir
    exp_flps = [
        path.join(exps_dir, fln) for fln in os.listdir(exps_dir)
        if not fln.startswith(defaults.DATA_PREFIX) and fln.endswith(".npz")]
    random.shuffle(exp_flps)
    num_exps = len(exp_flps) if args.num_exps is None else args.num_exps
    exp_flps = exp_flps[:num_exps]
    print(f"Selected {num_exps} experiments")
    warmup_frac = args.warmup_percent / 100
    sample_frac = args.sample_percent / 100
    exp_flps, num_pkts, dtype = survey(exp_flps, warmup_frac)
    print(
        f"Total packets: {num_pkts}\nFeatures ({len(dtype.names)}):\n\t" +
        "\n\t".join(sorted(dtype.names)))

    # Create the merged training, validation, and test files.
    merge(
        exp_flps, args.out_dir, num_pkts, dtype, split_fracs, warmup_frac,
        sample_frac)
    print(f"Finished - time: {time.time() - tim_srt_s:.2f} seconds")
    return 0
コード例 #4
0
ファイル: graph_one.py プロジェクト: marciopocebon/unfair
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(
        description="Visualize a simulation's features.")
    psr.add_argument("--parsed-data",
                     help="The path to the parsed simulation data.",
                     required=True,
                     type=str)
    psr, psr_verify = cl_args.add_out(psr)
    args = psr_verify(psr.parse_args())
    dat_flp = args.parsed_data
    out_dir = args.out_dir
    assert path.exists(dat_flp), f"File does not exist: {dat_flp}"
    if not path.exists(out_dir):
        os.makedirs(out_dir)
    with np.load(dat_flp) as fil:
        num_unfair = len(fil.files)
        assert num_unfair == 1, \
            ("This script supports simulations with a single unfair flow only, "
             f"but the provided simulation contains {num_unfair} unfair flows!")
        dat = fil[fil.files[0]]

    sim = utils.Sim(dat_flp)
    queue_fair_occupancy = 1 / (sim.unfair_flws + sim.fair_flws)

    for fet in dat.dtype.names:
        if fet == "arrival time us":
            continue
        print(f"Plotting feature: {fet}")
        pyplot.plot(dat["arrival time us"],
                    np.where(dat[fet] == -1, np.nan, dat[fet]))
        pyplot.xlabel("arrival time (us)")
        pyplot.ylabel(fet)

        # Adjust plot limits.
        pyplot.ylim(bottom=0)
        if "queue" in fet:
            pyplot.ylim(top=1.1)
            pyplot.hlines(queue_fair_occupancy,
                          0,
                          dat["arrival time us"][-1],
                          colors="k",
                          linestyles="dashdot")
        if ("mathis model label" in fet
                or "loss" in fet) and "sqrt" not in fet:
            pyplot.ylim(top=1.1)

        pyplot.tight_layout()
        pyplot.savefig(
            path.join(out_dir,
                      ("arrival_time_us_vs_"
                       f"{fet.replace(' ', '_').replace('/', '-')}.pdf")))
        pyplot.close()
コード例 #5
0
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(description="Generates training data.")
    psr.add_argument(
        "--log-dst", default=EMAIL_DST,
        help="The email address to which updates will be sent.", type=str)
    psr, psr_verify = cl_args.add_out(psr)
    args = psr_verify(psr.parse_args())
    # The ID of the experiment.
    eid = str(round(time.time()))
    # Create a new output directory based on the current time.
    out_dir = path.join(args.out_dir, eid)
    # For organization purposes, store the pcap files in a subdirectory.
    sim_dir = path.join(out_dir, "sim")
    # This also creates out_dir.
    os.makedirs(sim_dir)

    # Set up logging.
    numeric_level = getattr(logging, LOG_LVL.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError(f"Invalid log level: {LOG_LVL}")
    logging.basicConfig(level=numeric_level)
    log = logging.getLogger(LOGGER)

    # Assemble the configurations.
    cnfs = [{"bottleneck_bandwidth_Mbps": bw_Mbps,
             "bottleneck_delay_us": dly_us,
             # Calculate queue capacity as a multiple of the BDP. If the BDP is
             # less than a single packet, then use 1 packet as the BDP anyway.
             "bottleneck_queue_p": int(round(
                 que_p *
                 max(1, bdp_bps(bw_Mbps, dly_us * 6) / float(PACKET_SIZE_B)))),
             "unfair_flows": UNFAIR_FLOWS,
             "other_flows": flws,
             "other_proto": OTHER_PROTO,
             "unfair_edge_delays_us": f"[{dly_us}]",
             "other_edge_delays_us": f"[{dly_us}]",
             "payload_B": PACKET_SIZE_B,
             "enable_mitigation": "false",
             "duration_s": DUR_s,
             "pcap": "true" if PCAP else "false",
             "out_dir": sim_dir}
            for bw_Mbps, dly_us, que_p, flws in itertools.product(
                BWS_Mbps, DELAYS_us, QUEUE_p, OTHER_FLOWS)]
    sim.sim(eid, cnfs, out_dir, log_par=LOGGER, log_dst=args.log_dst,
            dry_run=DRY_RUN, sync=defaults.SYNC)

    log.info("Results in: %s", out_dir)
    log.critical("Finished.")
コード例 #6
0
ファイル: graph_one.py プロジェクト: cmu-snap/unfair
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(
        description="Visualize a experiment's features.")
    psr.add_argument(
        "--parsed-data",
        help=("The path to the parsed experiment data generated by "
              "gen_features.py."),
        required=True,
        type=str)
    psr, psr_verify = cl_args.add_out(psr)
    args = psr_verify(psr.parse_args())
    dat_flp = args.parsed_data
    out_dir = args.out_dir
    assert path.exists(dat_flp), f"File does not exist: {dat_flp}"
    if not path.exists(out_dir):
        os.makedirs(out_dir)
    with np.load(dat_flp) as fil:
        dat = [fil[flw] for flw in sorted(fil.files, key=int)]

    exp = utils.Exp(dat_flp)
    num_flws = exp.tot_flws
    found_flws = len(dat)
    assert num_flws == found_flws, \
        (f"Experiment has {num_flws} flows, parsed data has {found_flws} "
         f"flows: {dat_flp}")
    if found_flws == 0:
        print("No flows to graph.")
        return

    bw_share_fair = 1 / num_flws
    bw_fair = exp.bw_bps * bw_share_fair
    labels = [f"Flow {flw}" for flw in range(num_flws)]
    x_min = min(flw_dat[0][features.ARRIVAL_TIME_FET] for flw_dat in dat)
    x_max = max(flw_dat[-1][features.ARRIVAL_TIME_FET] for flw_dat in dat)
    fets = dat[0].dtype.names

    print(f"Plotting {len(fets)} features...")
    with multiprocessing.Pool() as pol:
        pol.starmap(
            graph_fet,
            ((out_dir, dat, fet, bw_share_fair, bw_fair, x_min, x_max, labels)
             for fet in fets))
コード例 #7
0
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(
        description="Visualize a simulation's features.")
    psr.add_argument(
        "--f1b",
        help=("The path to a directory contained parsed data files for figure "
              "1b."),
        required=True,
        type=str)
    psr.add_argument(
        "--f1c",
        help=(
            "The path to a directory containing a parsed data file for figure "
            "1c."),
        required=True,
        type=str)
    psr.add_argument("--variant",
                     help="The TCP variant competing with BBR.",
                     required=True,
                     type=str)
    psr, psr_verify = cl_args.add_out(psr)
    args = psr_verify(psr.parse_args())
    f1b = args.f1b
    f1c = args.f1c
    var = args.variant
    out_dir = args.out_dir
    assert path.exists(f1b), f"Directory does not exist: {f1b}"
    assert path.exists(f1c), f"Directory does not exist: {f1c}"
    if not path.exists(out_dir):
        os.makedirs(out_dir)

    plot_f1b([path.join(f1b, fln) for fln in os.listdir(f1b)], var, out_dir)
    if var == "Cubic":
        plot_f1c([path.join(f1c, fln) for fln in os.listdir(f1c)], var,
                 out_dir)
コード例 #8
0
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(
        description="Parses the output of gen_training_data.py.")
    psr.add_argument(
        "--exp-dir",
        help=("The directory in which the experiment results are stored "
              "(required)."),
        required=True,
        type=str)
    psr.add_argument("--random-order",
                     action="store_true",
                     help="Parse the simulations in a random order.")
    psr, psr_verify = cl_args.add_out(psr)
    args = psr_verify(psr.parse_args())
    exp_dir = args.exp_dir
    out_dir = args.out_dir

    # Find all simulations.
    pcaps = [(path.join(exp_dir, sim), out_dir)
             for sim in sorted(os.listdir(exp_dir))]
    if args.random_order:
        # Set the random seed so that multiple instances of this
        # script see the same random order.
        utils.set_rand_seed()
        random.shuffle(pcaps)

    print(f"Num files: {len(pcaps)}")
    tim_srt_s = time.time()
    if defaults.SYNC:
        for pcap in pcaps:
            parse_pcap(*pcap)
    else:
        with multiprocessing.Pool() as pol:
            pol.starmap(parse_pcap, pcaps)
    print(f"Done parsing - time: {time.time() - tim_srt_s:.2f} seconds")
コード例 #9
0
ファイル: prepare_data.py プロジェクト: marciopocebon/unfair
def main():
    """ This program's entrypoint. """
    utils.set_rand_seed()

    psr = argparse.ArgumentParser(description=(
        "Merges parsed simulation files into unified training, validation, "
        "and test data."))
    psr.add_argument(
        "--data-dir",
        help="The path to a directory containing the simulation files.",
        required=True,
        type=str)
    psr.add_argument("--train-split",
                     default=50,
                     help="Training data fraction",
                     required=False,
                     type=float)
    psr.add_argument("--val-split",
                     default=20,
                     help="Validation data fraction",
                     required=False,
                     type=float)
    psr.add_argument("--test-split",
                     default=30,
                     help="Test data fraction",
                     required=False,
                     type=float)
    psr, psr_verify = cl_args.add_out(*cl_args.add_warmup(
        *cl_args.add_num_sims(psr)))
    args = psr_verify(psr.parse_args())

    split_prcs = {
        "train": args.train_split,
        "val": args.val_split,
        "test": args.test_split
    }
    tot_split = sum(split_prcs.values())
    assert tot_split == 100, \
        ("The sum of the training, validation, and test splits must equal 100, "
         f"not {tot_split}")

    tim_srt_s = time.time()
    # Determine the simulation filepaths.
    sims_dir = args.data_dir
    sim_flns = os.listdir(sims_dir)
    random.shuffle(sim_flns)
    num_sims = args.num_sims
    num_sims = len(sim_flns) if num_sims is None else num_sims
    print(f"Selected {num_sims} simulations")
    sim_flps = [
        path.join(sims_dir, sim_fln) for sim_fln in sim_flns[:num_sims]
    ]
    warmup_frac = args.warmup_percent / 100
    num_pkts, dtype = survey(sim_flps, warmup_frac)
    fets = dtype.names
    print(f"Total packets: {num_pkts}\nFeatures:\n    " +
          "\n    ".join(sorted(fets)))

    # Create the merged training, validation, and test files.
    merge(sim_flps, args.out_dir, num_pkts, dtype, split_prcs, warmup_frac)
    print(f"Finished - time: {time.time() - tim_srt_s:.2f} seconds")
    return 0