コード例 #1
0
def main(params):

    # load configuration of pre-trained models
    exp_path = os.path.join("results", params["dataset"], params["model_type"],
                            params["exp"])
    config_path = os.path.join(exp_path, "config.yml")
    config = io_utils.load_yaml(config_path)
    params["config_path"] = config_path
    config = M.override_config_from_params(config, params)
    config["exp_path"] = exp_path
    cmf.create_save_dirs(config["misc"])

    # create logger
    logger_path = os.path.join(config["exp_path"], "evaluation.log")
    logger = io_utils.get_logger("Evaluate", log_file_path=logger_path)
    """ Build data loader """
    loader_config = io_utils.load_yaml(params["loader_config_path"])
    dset, L = cmf.get_loader(dataset, ["test"], [loader_config],
                             num_workers=params["num_workers"])
    config = M.override_config_from_loader(config, dset["test"])
    """ Build network """
    ckpt_path = os.path.join(
        exp_path, "checkpoints",
        "checkpoint_epoch_{:03d}.pkl".format(params["epoch"]))
    net = cmf.factory_model(config, M, dset["test"], ckpt_path)
    """ Test networks """
    cmf.test_inference(config, L["test"], net)
コード例 #2
0
ファイル: eval.py プロジェクト: skynbe/JH-settings
def main(params):
    # Obtain configuration path
    exp_path = os.path.join("results", params["dataset"], params["model_type"],
                            params["experiment"])
    config_path = os.path.join(exp_path, "config.yml")
    params["config_path"] = config_path

    # prepare model and dataset
    M, dataset, config = cmf.prepare_experiment(params)

    # evaluate on GT
    config["evaluation"]["use_gt"] = params["evaluate_on_gt"]

    # evaluate on Top 1000 proposals
    if params["evaluate_on_top1000"]:
        config["evaluation"]["use_gt"] = False
        config["evaluation"]["apply_nms"] = False

    if len(params["proposal"]) > 0:
        config["evaluation"]["precomputed_proposal_sequence"] = params[
            "proposal"]

    # create logger
    epoch_logger = cmf.create_logger(config, "EPOCH", "test.log")
    """ Build data loader """
    loader_config = io_utils.load_yaml(params["loader_config_path"])
    if params["test_on_server"]:
        loader_config = loader_config["test_loader"]
        test_on = "Test_Server"
    else:
        loader_config = loader_config["val_loader"]
        test_on = "Test"
    dsets, L = cmf.get_loader(dataset,
                              split=["test"],
                              loader_configs=[loader_config],
                              num_workers=params["num_workers"])
    config = M.override_config_from_dataset(config, dsets["test"], mode="Test")
    config["model"]["resume"] = True
    tensorboard_path = config["misc"]["tensorboard_dir"]
    config["misc"]["tensorboard_dir"] = ""  #
    config["misc"]["debug"] = params["debug_mode"]
    """ Evaluating networks """
    e0 = params["start_epoch"]
    e1 = params["end_epoch"]
    es = params["epoch_stride"]
    io_utils.check_and_create_dir(tensorboard_path +
                                  "_test_s{}_e{}".format(e0, e1))
    summary = PytorchSummary(tensorboard_path + "_test_s{}_e{}".format(e0, e1))
    for epoch in range(e0, e1 + 1, es):
        """ Build network """
        config["model"]["checkpoint_path"] = \
            os.path.join(exp_path, "checkpoints", "epoch_{:03d}.pkl".format(epoch))
        net, _ = cmf.factory_model(config, M, dsets["test"], None)
        net.set_tensorboard_summary(summary)

        cmf.test(config, L["test"], net, epoch, None, epoch_logger, on=test_on)
コード例 #3
0
ファイル: ensemble.py プロジェクト: JonghwanMun/MCL-KD
def main(params):
    # loading configuration and setting environment
    config = io_utils.load_yaml(params["config_path"])
    config["debug_mode"] = params["debug_mode"]
    config["out"] = params["output_filename"]
    config["assignment_path"] = params["assignment_path"]
    config["save_logits"] = params["save_logits"]

    # ensemble networks
    ensemble(config)
コード例 #4
0
def prepare_experiment(params):
    M = get_model(params["model_type"])
    D = get_dataset(params["dataset"])

    # loading configuration and setting environment
    config = io_utils.load_yaml(params["config_path"])
    config = update_config_from_params(config, params)
    create_save_dirs(config["misc"])

    return M, D, config
コード例 #5
0
ファイル: train.py プロジェクト: JonghwanMun/MCL-KD
def main():
    # get parameters from cmd
    params = _get_argument_params()
    global M, dataset
    M = cmf.get_model(params["model_type"])
    dataset = cmf.get_dataset(params["dataset"])

    # loading configuration and setting environment
    config = io_utils.load_yaml(params["config_path"])
    config = M.override_config_from_params(config, params)
    cmf.create_save_dirs(config["misc"])

    # create loggers
    global logger
    logger = cmf.create_logger(config)

    # train network
    train(config)
コード例 #6
0
def main(params):
    config = io_utils.load_yaml(params["config"])

    # prepare dataset
    D = cmf.get_dataset(params["dataset"])
    dsets, L = cmf.get_loader(D,
                              split=["test"],
                              loader_configs=[config["test_loader"]],
                              num_workers=params["num_workers"])

    # Build network
    M = cmf.get_method(params["method"])
    net = M(config, logger=None)
    net.load_checkpoint(params["checkpoint"], True)
    if config["model"]["use_gpu"]: net.gpu_mode()

    # Evaluating networks
    cmf.test(config, L["test"], net, -1, None, mode="Test")
コード例 #7
0
ファイル: ensemble.py プロジェクト: JonghwanMun/MCL-KD
def ensemble(config):

    """ Build data loader """
    dset = dataset.DataSet(config["test_loader"])
    L = data.DataLoader( \
            dset, batch_size=config["test_loader"]["batch_size"], \
            num_workers=config["num_workers"], \
            shuffle=False, collate_fn=dataset.collate_fn)

    """ Load assignments if exists """
    with_assignment = False
    if config["assignment_path"] != "None":
        with_assignment = True
        assignment_file = io_utils.load_hdf5(config["assignment_path"], verbose=False)
        assignments = assignment_file["assignments"][:]
        cnt_mapping = np.zeros((3,3))

    """ Build network """
    nets = []
    net_configs = []
    for i in range(len(config["checkpoint_paths"])):
        net_configs.append(io_utils.load_yaml(config["config_paths"][i]))
        net_configs[i] = M.override_config_from_loader(net_configs[i], dset)
        nets.append(M(net_configs[i]))
        nets[i].bring_loader_info(dset)
        apply_cc_after = utils.get_value_from_dict(
                net_configs[i]["model"], "apply_curriculum_learning_after", -1)
        # load checkpoint if exists
        nets[i].load_checkpoint(config["checkpoint_paths"][i])
        start_epoch = int(utils.get_filename_from_path(
                config["checkpoint_paths"][i]).split("_")[-1])
        # If checkpoint use curriculum learning
        if (apply_cc_after > 0) and (start_epoch >= apply_cc_after):
            nets[i].apply_curriculum_learning()

    # ship network to use gpu
    if config["use_gpu"]:
        for i in range(len(nets)):
            nets[i].gpu_mode()
    for i in range(len(nets)):
        nets[i].eval_mode()

    # initialize counters for different tau
    metrics = ["top1-avg", "top1-max", "oracle"]
    for i in range(len(nets)):
        modelname = "M{}".format(i)
        metrics.append(modelname)
    tau = [1.0, 1.2, 1.5, 2.0, 5.0, 10.0, 50.0, 100.0]
    counters = OrderedDict()
    for T in tau:
        tau_name = "tau-"+str(T)
        counters[tau_name] = OrderedDict()
        for mt in metrics:
            counters[tau_name][mt] = accumulator.Accumulator(mt)

    """ Run training network """
    ii = 0
    itoa = dset.get_itoa()
    predictions = []
    for batch in tqdm(L):
        # Forward networks
        probs = 0
        B = batch[0][0].size(0)
        if type(batch[0][-1]) == type(list()):
            gt = batch[0][-1][0]
        else:
            gt = batch[0][-1]

        correct = 0
        probs = {}
        for T in tau:
            tau_name = "tau-"+str(T)
            probs[tau_name] = 0

        prob_list = []
        for i in range(len(nets)):
            outputs = nets[i].evaluate(batch)
            prob_list.append(outputs[1]) # m*[B,A]

        if config["save_logits"]:
            TODO = True

        for T in tau:
            tau_name = "tau-"+str(T)
            probs = [net_utils.get_data(F.softmax(logits/T, dim=1)) \
                     for logits in prob_list] # m*[B,A]

            # count correct numbers for each model
            for i in range(len(nets)):
                val, idx = probs[i].max(dim=1)
                correct = torch.eq(idx, gt)
                num_correct = torch.sum(correct)
                modelname = "M{}".format(i)
                counters[tau_name][modelname].add(num_correct, B)

                # add prob of each model
                if i == 0:
                    oracle_correct = correct
                else:
                    oracle_correct = oracle_correct + correct


            # top1-max accuracy for ensemble
            ens_probs, ens_idx = torch.stack(probs,0).max(0) # [B,A]
            max_val, max_idx = ens_probs.max(dim=1)
            num_correct = torch.sum(torch.eq(max_idx, gt))
            counters[tau_name]["top1-max"].add(num_correct, B)

            # top1-avg accuracy for ensemble
            ens_probs = sum(probs) # [B,A]
            max_val, max_idx = ens_probs.max(dim=1)
            num_correct = torch.sum(torch.eq(max_idx, gt))
            counters[tau_name]["top1-avg"].add(num_correct, B)

            # oracle accuracy for ensemble
            num_oracle_correct = torch.sum(torch.ge(oracle_correct, 1))
            counters[tau_name]["oracle"].add(num_oracle_correct, B)

            # attach predictions
            for i in range(len(batch[1])):
                qid = batch[1][i]
                predictions.append({
                    "question_id": qid,
                    "answer": utils.label2string(itoa, max_idx[i])
                })

        # epoch done

    # print accuracy
    for cnt_k,cnt_v in counters.items():
        txt = cnt_k + " "
        for k,v in cnt_v.items():
            txt += ", {} = {:.5f}".format(v.get_name(), v.get_average())
        print(txt)

    save_dir = os.path.join("results", "ensemble_predictions")
    io_utils.check_and_create_dir(save_dir)
    io_utils.write_json(os.path.join(save_dir, config["out"]+".json"), predictions)
コード例 #8
0
                        help="Train the model in debug mode.")

    params = vars(parser.parse_args())
    print(json.dumps(params, indent=4))
    return params


if __name__ == "__main__":
    # load parameters
    params = _get_argument_params()
    global M, dataset
    M = cmf.get_model(params["model_type"])
    dataset = cmf.get_dataset(params["dataset"])

    # loading configuration and setting environment
    config = io_utils.load_yaml(params["config_path"])
    config = M.override_config_from_params(config, params)
    cmf.create_save_dirs(config["misc"])
    """ Build data loader """
    if params["mode"] == "train":
        dset = dataset.DataSet(config["train_loader"])
    else:
        dset = dataset.DataSet(config["test_loader"])

    L = data.DataLoader(dset, batch_size=64, \
                     num_workers=config["misc"]["num_workers"], \
                     shuffle=False, collate_fn=dataset.collate_fn)
    config = M.override_config_from_loader(config, dset)
    """ Build network """
    net = M(config)
    net.bring_loader_info(dset)
コード例 #9
0
ファイル: eval.py プロジェクト: JonghwanMun/MCL-KD
def main(params):

    # load configuration of pre-trained models
    exp_path = os.path.join("results", params["dataset"],
                            params["model_type"], params["exp"])
    config_path = os.path.join(exp_path, "config.yml")
    config = io_utils.load_yaml(config_path)
    params["config_path"] = config_path
    config = M.override_config_from_params(config, params)
    config["exp_path"] = exp_path
    cmf.create_save_dirs(config["misc"])

    # create logger
    logger_path = os.path.join(config["exp_path"], "evaluation.log")
    logger = io_utils.get_logger("Evaluate", log_file_path=logger_path)

    """ Build data loader """
    loader_config = io_utils.load_yaml(params["loader_config_path"])
    dset = dataset.DataSet(loader_config)
    L = data.DataLoader(dset, batch_size=loader_config["batch_size"], \
                        num_workers=params["num_workers"], \
                        shuffle=False, collate_fn=dataset.collate_fn)
    config = M.override_config_from_loader(config, dset)

    if params["mode"] == "eval":

        """ Evaluating networks """
        e0 = params["start_epoch"]
        e1 = params["end_epoch"]
        e_stride = params["epoch_stride"]
        sample_data = dset.get_samples(5)
        for epoch in range(e0, e1+1, e_stride):
            """ Build network """
            net = M(config)
            net.bring_loader_info(dset)
            # ship network to use gpu
            if config["model"]["use_gpu"]:
                net.gpu_mode()

            # load checkpoint
            if not (net.classname == "ENSEMBLE" and config["model"]["version"] == "IE"):
                ckpt_path = os.path.join(exp_path, "checkpoints",
                                         "checkpoint_epoch_{:03d}.pkl".format(epoch))
                assert os.path.exists(ckpt_path), \
                    "Checkpoint does not exists ({})".format(ckpt_path)
                net.load_checkpoint(ckpt_path)

            # If checkpoint is already applied with curriculum learning
            apply_cc_after = utils.get_value_from_dict(
                    config["model"], "apply_curriculum_learning_after", -1)
            if (apply_cc_after > 0) and (epoch >= apply_cc_after):
                net.apply_curriculum_learning()

            cmf.evaluate(config, L, net, epoch-1, logger_name="eval",
                         mode="Evaluation", verbose_every=100)

    elif params["mode"] == "selection":
        epoch = params["start_epoch"]
        """ Build network """
        net = M(config)
        net.bring_loader_info(dset)
        # ship network to use gpu
        if config["model"]["use_gpu"]:
            net.gpu_mode()

        # load checkpoint
        ckpt_path = os.path.join(exp_path, "checkpoints", "checkpoint_epoch_{:03d}.pkl".format(epoch))
        assert os.path.exists(ckpt_path), "Checkpoint does not exists ({})".format(ckpt_path)
        net.load_checkpoint(ckpt_path)
        apply_cc_after = utils.get_value_from_dict(
                config["model"], "apply_curriculum_learning_after", -1)
        # If checkpoint use curriculum learning
        if (apply_cc_after > 0) and (epoch >= apply_cc_after):
            net.apply_curriculum_learning()

        cmf.get_selection_values(config, L, net, epoch-1, logger_name="eval", mode="Evaluation", verbose_every=100)