예제 #1
0
    def setUp(self):

        test_args = '-s 3 -r 500 -g 50 -c config.json file.txt 2'
        argStr = str_to_args(test_args)
        self.config_short = Config('1.0', argStr)
        test_args = '''
            --start 3
            --rate 500
            --games 50
            --config config.json           
            file.txt 2'''
        argStr = str_to_args(test_args)
        self.config_long = Config('1.0', argStr)
        self.config_no_args = Config('1.0', str_to_args("file.txt 2"))
예제 #2
0
파일: mlp.py 프로젝트: nokutu/m3-project
def load_model_from_weights(weights_file):
    args_str = os.path.splitext(os.path.basename(weights_file))[0].split('_', 1)[1]
    args_str = args_str.replace('categorical_crossentropy', 'categorical-crossentropy')
    args = str_to_args(args_str)
    image_size = args.patch_size if args.patches else args.image_size
    model = build_model(image_size, args.units, args.activation, args.optimizer, args.loss, args.metrics, test=True)
    model.load_weights(weights_file)
    return model
예제 #3
0
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(description="Analyzes full experiments.")
    psr, psr_verify = cl_args.add_running(psr)
    psr.add_argument("--model",
                     help="The path to a trained model file.",
                     required=True,
                     type=str)
    psr.add_argument(
        "--experiments",
        help=("The path to a directory of experiments to analyze, or the path "
              "to a single experiment file."),
        required=False,
        default=None,
        type=str)
    psr.add_argument(
        "--input-file",
        help=("The path to a file containing a list of experiments."),
        required=False,
        default=None,
        type=str)
    args = psr_verify(psr.parse_args())
    mdl_flp = args.model
    out_dir = args.out_dir
    standardize = args.standardize
    input_file = args.input_file
    exp_dir = args.experiments

    assert (exp_dir is None) != (input_file is None), \
        "Test takes in either a directory of experiments or " \
        "an input file containing all the experiments"
    assert path.exists(mdl_flp), f"Model file does not exist: {mdl_flp}"
    if args.experiments:
        assert path.exists(exp_dir), \
            f"Experiment dir/file does not exist: {exp_dir}"
        exp_flps = ([
            path.join(exp_dir, exp_fln) for exp_fln in os.listdir(exp_dir)
        ] if path.isdir(exp_dir) else [exp_dir])
    else:
        with open(input_file, "r") as input_file:
            exp_flps = [line.rstrip("\n") for line in input_file]

    # Parse the model filepath to determine the model type, and instantiate it.
    net = models.MODELS[
        # Convert the model filename to an arguments dictionary, and
        # extract the "model" key.
        utils.str_to_args(path.basename(mdl_flp),
                          order=sorted(defaults.DEFAULTS.keys()),
                          which="model")["model"]]()
    # # Manually remove the loss event rate sqrt feature.
    # net.in_spc.remove("loss event rate sqrt")
    # Load the model.
    if mdl_flp.endswith("pickle"):
        with open(mdl_flp, "rb") as fil:
            mdl = pickle.load(fil)
    elif mdl_flp.endswith("pth"):
        mdl = torch.jit.load(mdl_flp)
    else:
        raise Exception(f"Unknown model type: {mdl_flp}")
    net.net = mdl
    net.graph = True

    # manager = multiprocessing.Manager()
    # all_accuracy = manager.list()
    # all_bucketized_accuracy = manager.list()

    # (bw_dict, rtt_dict, queue_dict, bucketized_bw_dict,
    #  bucketized_rtt_dict, bucketized_queue_dict) = init_global(manager)

    # lock = manager.Lock()

    total = len(exp_flps)
    func_input = [(idx, total, exp_flp,
                   path.join(out_dir,
                             path.basename(exp_flp).split(".")[0]), net,
                   args.warmup_percent, args.scale_params, standardize)
                  for idx, exp_flp in enumerate(exp_flps)]

    print(f"Num files: {len(func_input)}")
    tim_srt_s = time.time()
    with multiprocessing.Pool() as pol:
        pol.starmap(process_one, func_input)

    print(f"Done Processing - time: {time.time() - tim_srt_s:.2f} seconds")
예제 #4
0
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(description="Analyzes full simulations.")
    psr, psr_verify = cl_args.add_running(psr)
    psr.add_argument("--model",
                     help="The path to a trained model file.",
                     required=True,
                     type=str)
    psr.add_argument(
        "--simulations",
        help=("The path to a directory of simulations to analyze, or the path "
              "to a single simulation file."),
        required=True,
        type=str)
    args = psr_verify(psr.parse_args())
    mdl_flp = args.model
    out_dir = args.out_dir
    standardize = args.standardize
    assert path.exists(mdl_flp), f"Model file does not exist: {mdl_flp}"
    sim_dir = args.simulations
    assert path.exists(sim_dir), \
        f"Simulation dir/file does not exist: {sim_dir}"
    sim_flps = ([
        path.join(sim_dir, sim_fln) for sim_fln in os.listdir(sim_dir)
    ] if path.isdir(sim_dir) else [sim_dir])

    # Parse the model filepath to determine the model type, and instantiate it.
    net = models.MODELS[
        # Convert the model filename to an arguments dictionary, and
        # extract the "model" key.
        utils.str_to_args(path.basename(mdl_flp),
                          order=sorted(defaults.DEFAULTS.keys()))["model"]]()
    # # Manually remove the loss event rate sqrt feature.
    # net.in_spc.remove("loss event rate sqrt")
    # Load the model.
    if mdl_flp.endswith("pickle"):
        with open(mdl_flp, "rb") as fil:
            mdl = pickle.load(fil)
    elif mdl_flp.endswith("pth"):
        mdl = torch.jit.load(mdl_flp)
    else:
        raise Exception(f"Unknown model type: {mdl_flp}")
    net.net = mdl
    net.graph = True

    manager = multiprocessing.Manager()
    all_accuracy = manager.list()
    all_bucketized_accuracy = manager.list()

    # BW in Mbps.
    bw_dict = manager.dict({
        1: manager.list(),
        10: manager.list(),
        30: manager.list(),
        50: manager.list(),
        1000: manager.list()
    })

    # RTT in us
    rtt_dict = manager.dict({
        1000: manager.list(),
        10000: manager.list(),
        50000: manager.list(),
        100000: manager.list(),
        1000000: manager.list()
    })

    # Queue size in BDP
    queue_dict = manager.dict({
        1: manager.list(),
        2: manager.list(),
        4: manager.list(),
        8: manager.list(),
        16: manager.list(),
        32: manager.list(),
        64: manager.list()
    })

    total = len(sim_flps)
    func_input = [
        (idx, total, sim_flp,
         path.join(out_dir,
                   path.basename(sim_flp).split(".")[0]), net,
         args.warmup_percent, args.scale_params, standardize, all_accuracy,
         all_bucketized_accuracy, bw_dict, rtt_dict, queue_dict)
        for idx, sim_flp in enumerate(sim_flps)
    ]

    print(f"Num files: {len(func_input)}")
    tim_srt_s = time.time()
    with multiprocessing.Pool() as pol:
        pol.starmap(process_one, func_input)

    print(f"Done Processing - time: {time.time() - tim_srt_s:.2f} seconds")

    mean_accuracy = mean(all_accuracy)

    with open(path.join(out_dir, "results.txt"), "w") as fil:
        fil.write("Average accuracy for all the processed simulations: "
                  f"{mean_accuracy}\n")

        x_axis = []
        y_axis = []

        for bw_Mbps, values in bw_dict.items():
            if values:
                bw_accuracy = mean(values)
                fil.write(f"Bandwidth <= {bw_Mbps} Mbps, accuracy "
                          f"{bw_accuracy}\n")

                x_axis.append(f"{bw_Mbps}Mbps")
                y_axis.append(bw_accuracy)

        plot_bar(x_axis, y_axis, path.join(out_dir,
                                           "bandwidth_vs_accuracy.pdf"))

        x_axis.clear()
        y_axis.clear()

        for rtt_us, values in rtt_dict.items():
            if values:
                rtt_accuracy = mean(values)
                fil.write(f"Rtt <= {rtt_us} us, accuracy {rtt_accuracy}\n")

                x_axis.append(f"{rtt_us}us")
                y_axis.append(rtt_accuracy)

        plot_bar(x_axis, y_axis, path.join(out_dir, "rtt_vs_accuracy.pdf"))

        x_axis.clear()
        y_axis.clear()

        for queue_bdp, values in queue_dict.items():
            if values:
                queue_accuracy = mean(values)
                fil.write(f"Queue size <= {queue_bdp}x BDP, accuracy "
                          f"{queue_accuracy}\n")

                x_axis.append(f"{queue_bdp}bdp")
                y_axis.append(queue_accuracy)

        plot_bar(x_axis, y_axis, path.join(out_dir, "queue_vs_accuracy.pdf"))
예제 #5
0
파일: test_all.py 프로젝트: cmu-snap/unfair
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(
        description="Hyper-parameter optimizer for train.py.")
    psr.add_argument("--model",
                     help="The path to a trained model file.",
                     required=True,
                     type=str)
    psr.add_argument("--simulations",
                     help="The path to a simulations to analyze.",
                     required=True,
                     type=str)
    psr.add_argument(
        "--warmup-percent",
        default=train.DEFAULTS["warmup_percent"],
        help=("The percent of each simulation's datapoint to drop from the "
              "beginning."),
        type=float)
    psr.add_argument("--scale-params",
                     help="The path to the input scaling parameters.",
                     required=True,
                     type=str)
    psr.add_argument(
        "--standardize",
        action="store_true",
        help=("Standardize the data so that it has a mean of 0 and a variance "
              "of 1. Otherwise, data will be rescaled to the range [0, 1]."))
    psr.add_argument("--out-dir",
                     default=".",
                     help="The directory in which to store output files.",
                     type=str)
    args = psr.parse_args()
    mdl_flp = args.model
    sim_dir = args.simulations
    warmup_prc = args.warmup_percent
    scl_prms_flp = args.scale_params
    out_dir = args.out_dir
    standardize = args.standardize
    assert path.exists(mdl_flp), f"Model file does not exist: {mdl_flp}"
    assert path.exists(sim_dir), f"Simulation file does not exist: {sim_dir}"
    assert 0 <= warmup_prc < 100, \
        ("\"warmup_percent\" must be in the range [0, 100), but is: "
         f"{warmup_prc}")
    assert path.exists(scl_prms_flp), \
        f"Scaling parameters file does not exist: {scl_prms_flp}"
    if not path.exists(out_dir):
        os.makedirs(out_dir)

    # Parse the model filepath to determine the model type, and instantiate it.
    net = models.MODELS[
        # Convert the model filename to an arguments dictionary, and
        # extract the "model" key.
        utils.str_to_args(path.basename(mdl_flp),
                          order=sorted(train.DEFAULTS.keys()),
                          which="model")["model"]]()
    # # Manually remove the loss event rate sqrt feature.
    # net.in_spc.remove("loss event rate sqrt")
    # Load the model.
    if mdl_flp.endswith("pickle"):
        with open(mdl_flp, "rb") as fil:
            mdl = pickle.load(fil)
    elif mdl_flp.endswith("pth"):
        mdl = torch.jit.load(mdl_flp)
    else:
        raise Exception(f"Unknown model type: {mdl_flp}")
    net.net = mdl
    net.graph = True

    func_input = [(path.join(sim_dir,
                             sim), path.join(out_dir,
                                             sim.split(".")[0]), net,
                   warmup_prc, scl_prms_flp, standardize, all_accuracy,
                   all_bucketized_accuracy, bw_dict, rtt_dict, queue_dict)
                  for sim in sorted(os.listdir(sim_dir))]

    print(f"Num files: {len(func_input)}")
    tim_srt_s = time.time()
    with multiprocessing.Pool() as pol:
        pol.starmap(process_one, func_input)

    print(f"Done Processing - time: {time.time() - tim_srt_s:.2f} seconds")

    mean_accuracy = mean(all_accuracy)

    with open("results.txt", "w") as f:
        f.write(
            f"Average accuracy for all the processed simulations: {mean_accuracy}\n"
        )

        x_axis = []
        y_axis = []

        for bw_Mbps, values in bw_dict.items():
            if values:
                bw_accuracy = mean(values)
                f.write(
                    f"Bandwidth less than {bw_Mbps}Mbps accuracy {bw_accuracy}\n"
                )

                x_axis.append(f"{bw_Mbps}Mbps")
                y_axis.append(bw_accuracy)

        plot_bar(x_axis, y_axis, "bandwidth_vs_accuracy.pdf")

        x_axis.clear()
        y_axis.clear()

        for rtt_us, values in rtt_dict.items():
            if values:
                rtt_accuracy = mean(values)
                f.write(f"Rtt less than {rtt_us}us accuracy {rtt_accuracy}\n")

                x_axis.append(f"{rtt_us}us")
                y_axis.append(rtt_accuracy)

        plot_bar(x_axis, y_axis, "rtt_vs_accuracy.pdf")

        x_axis.clear()
        y_axis.clear()

        for queue_bdp, values in queue_dict.items():
            if values:
                queue_accuracy = mean(values)
                f.write(
                    f"Queue size less than {queue_bdp} BDP accuracy {queue_accuracy}\n"
                )

                x_axis.append(f"{queue_bdp}bdp")
                y_axis.append(queue_accuracy)

        plot_bar(x_axis, y_axis, "queue_vs_accuracy.pdf")
예제 #6
0
    parser = argparse.ArgumentParser()
    parser.add_argument('model_file', type=str)
    parser.add_argument('-d',
                        '--dataset',
                        type=str,
                        default='/home/mcv/datasets/MIT_split')
    return parser.parse_args()


if __name__ == '__main__':

    args = parse_args()
    last = '_'.join(
        args.model_file.split('/')[-1].split('.')[0].split('_')[1:])
    last = last.replace('categorical_crossentropy', 'categorical-crossentropy')
    args2 = str_to_args(last)

    # Read the train and test files.
    train_filenames, train_labels = load_dataset(args.dataset + '/train')
    test_filenames, test_labels = load_dataset(args.dataset + '/test')

    train_ims = np.empty(
        (len(train_filenames), args2.image_size, args2.image_size, 3))
    test_ims = np.empty(
        (len(test_filenames), args2.image_size, args2.image_size, 3))
    for i, imname in enumerate(train_filenames):
        im = Image.open(imname)
        im = im.resize((args2.image_size, args2.image_size))
        train_ims[i, :, :, :] = np.array(im)
    for i, imname in enumerate(test_filenames):
        im = Image.open(imname)