Esempio n. 1
0
 def _get_loggers(self):
     """ Create logging variables.
     """
     self.logger = {}
     self.logger["train"] = io_utils.get_logger("Train")
     self.logger["epoch"] = io_utils.get_logger("Epoch")
     self.logger["eval"] = io_utils.get_logger("Evaluate")
Esempio n. 2
0
def evaluate(config,
             loader,
             net,
             epoch,
             logger_name="epoch",
             mode="Train",
             verbose_every=None):

    if verbose_every == None:
        verbose_every = config["evaluation"]["print_every"]
    # load logger
    if logger_name == "epoch":
        logger = io_utils.get_logger("Train")
    elif logger_name == "eval":
        logger = io_utils.get_logger("Evaluate")
    else:
        raise NotImplementedError()

    net.eval_mode()  # set network as evalmode
    net.reset_status()  # reset status
    """ Run validating network """
    ii = 0
    tm = timer.Timer()
    for batch in loader:
        data_load_duration = tm.get_duration()
        # forward the network
        tm.reset()
        outputs = net.evaluate(batch)
        run_duration = tm.get_duration()

        # accumulate the number of correct answers
        net.compute_status(outputs[1], batch[0][-1])

        # print learning information
        if ((verbose_every > 0) and ((ii+1) % verbose_every == 0)) \
                or config["misc"]["debug"]:
            net.print_status(epoch + 1, ii + 1, mode="eval")
            txt = "[TEST] fetching for {:.3f}s, inference for {:.3f}s\n"
            logger.debug(txt.format(data_load_duration, run_duration))

        ii += 1
        tm.reset()

        if (config["misc"]["debug"]) and (ii > 2):
            break
        # end for batch in loader

    net.metric = net.counters["top1-avg"].get_average(
    )  # would be used for tuning parameter
    net.print_counters_info(epoch + 1, logger_name=logger_name, mode=mode)
    net.save_results(None, "epoch_{:03d}".format(epoch + 1), mode="eval")
def create_logger(config, logger_name, log_path):
    logger_path = os.path.join(config["misc"]["result_dir"], log_path)
    logger = io_utils.get_logger(
        logger_name, log_file_path=logger_path,\
        print_lev=getattr(logging, config["logging"]["print_level"]),\
        write_lev=getattr(logging, config["logging"]["write_level"]))
    return logger
Esempio n. 4
0
def main(params):

    # load configuration of pre-trained models
    exp_path = os.path.join("results", params["dataset"], params["model_type"],
                            params["exp"])
    config_path = os.path.join(exp_path, "config.yml")
    config = io_utils.load_yaml(config_path)
    params["config_path"] = config_path
    config = M.override_config_from_params(config, params)
    config["exp_path"] = exp_path
    cmf.create_save_dirs(config["misc"])

    # create logger
    logger_path = os.path.join(config["exp_path"], "evaluation.log")
    logger = io_utils.get_logger("Evaluate", log_file_path=logger_path)
    """ Build data loader """
    loader_config = io_utils.load_yaml(params["loader_config_path"])
    dset, L = cmf.get_loader(dataset, ["test"], [loader_config],
                             num_workers=params["num_workers"])
    config = M.override_config_from_loader(config, dset["test"])
    """ Build network """
    ckpt_path = os.path.join(
        exp_path, "checkpoints",
        "checkpoint_epoch_{:03d}.pkl".format(params["epoch"]))
    net = cmf.factory_model(config, M, dset["test"], ckpt_path)
    """ Test networks """
    cmf.test_inference(config, L["test"], net)
Esempio n. 5
0
def create_logger(config, train_mode=True):
    if train_mode:
        logger = {}
        logger_path = os.path.join(config["misc"]["result_dir"], "train.log")
        logger["train"] = io_utils.get_logger(
            "Train", log_file_path=logger_path,\
            print_lev=getattr(logging, config["logging"]["print_level"]),\
            write_lev=getattr(logging, config["logging"]["write_level"]))
        epoch_logger_path = os.path.join(config["misc"]["result_dir"],
                                         "performance.log")
        logger["epoch"] = io_utils.get_logger(
            "Epoch", log_file_path=epoch_logger_path,\
            print_lev=getattr(logging, config["logging"]["print_level"]),\
            write_lev=getattr(logging, config["logging"]["write_level"]))
    else:
        epoch_logger_path = os.path.join(config["misc"]["result_dir"],
                                         "performance.log")
        logger["epoch"] = io_utils.get_logger(
            "Epoch", log_file_path=epoch_logger_path,\
            print_lev=getattr(logging, config["logging"]["print_level"]),\
            write_lev=getattr(logging, config["logging"]["write_level"]))
    return logger
Esempio n. 6
0
    def __init__(self, config):
        super(EnsembleLoss, self).__init__()  # Must call super __init__()

        self.logger = io_utils.get_logger("Train")

        # common options
        self.version = utils.get_value_from_dict(config, "version", "KD-MCL")
        self.use_gpu = utils.get_value_from_dict(config, "use_gpu", True)
        self.m = utils.get_value_from_dict(config, "num_models", 5)
        self.num_labels = utils.get_value_from_dict(config, "num_labels", 28)
        self.print_every = 20
        self.log_every = 500

        # options for computing assignments
        self.k = utils.get_value_from_dict(config, "num_overlaps", 2)
        self.tau = utils.get_value_from_dict(config, "tau", -1)
        self.beta = utils.get_value_from_dict(config, "beta", 0.75)

        # options for margin-MCL
        self.margin_threshold = utils.get_value_from_dict(
            config, "margin_threshold", 1.0)
        self.use_logit = utils.get_value_from_dict(config, "margin_in_logit",
                                                   True)

        # options for attention transfer
        self.use_attention_transfer = utils.get_value_from_dict(
            config, "use_attention_transfer", False)
        if self.use_attention_transfer:
            self.att_transfer_beta = utils.get_value_from_dict(
                config, "att_transfer_beta", 1000)

        # options for assignment model
        self.use_assignment_model = utils.get_value_from_dict(
            config, "use_assignment_model", False)

        if self.use_assignment_model:
            self.assignment_criterion = nn.CrossEntropyLoss()

        self.iteration = 0
Esempio n. 7
0
def main(params):

    # load configuration of pre-trained models
    exp_path = os.path.join("results", params["dataset"],
                            params["model_type"], params["exp"])
    config_path = os.path.join(exp_path, "config.yml")
    config = io_utils.load_yaml(config_path)
    params["config_path"] = config_path
    config = M.override_config_from_params(config, params)
    config["exp_path"] = exp_path
    cmf.create_save_dirs(config["misc"])

    # create logger
    logger_path = os.path.join(config["exp_path"], "evaluation.log")
    logger = io_utils.get_logger("Evaluate", log_file_path=logger_path)

    """ Build data loader """
    loader_config = io_utils.load_yaml(params["loader_config_path"])
    dset = dataset.DataSet(loader_config)
    L = data.DataLoader(dset, batch_size=loader_config["batch_size"], \
                        num_workers=params["num_workers"], \
                        shuffle=False, collate_fn=dataset.collate_fn)
    config = M.override_config_from_loader(config, dset)

    if params["mode"] == "eval":

        """ Evaluating networks """
        e0 = params["start_epoch"]
        e1 = params["end_epoch"]
        e_stride = params["epoch_stride"]
        sample_data = dset.get_samples(5)
        for epoch in range(e0, e1+1, e_stride):
            """ Build network """
            net = M(config)
            net.bring_loader_info(dset)
            # ship network to use gpu
            if config["model"]["use_gpu"]:
                net.gpu_mode()

            # load checkpoint
            if not (net.classname == "ENSEMBLE" and config["model"]["version"] == "IE"):
                ckpt_path = os.path.join(exp_path, "checkpoints",
                                         "checkpoint_epoch_{:03d}.pkl".format(epoch))
                assert os.path.exists(ckpt_path), \
                    "Checkpoint does not exists ({})".format(ckpt_path)
                net.load_checkpoint(ckpt_path)

            # If checkpoint is already applied with curriculum learning
            apply_cc_after = utils.get_value_from_dict(
                    config["model"], "apply_curriculum_learning_after", -1)
            if (apply_cc_after > 0) and (epoch >= apply_cc_after):
                net.apply_curriculum_learning()

            cmf.evaluate(config, L, net, epoch-1, logger_name="eval",
                         mode="Evaluation", verbose_every=100)

    elif params["mode"] == "selection":
        epoch = params["start_epoch"]
        """ Build network """
        net = M(config)
        net.bring_loader_info(dset)
        # ship network to use gpu
        if config["model"]["use_gpu"]:
            net.gpu_mode()

        # load checkpoint
        ckpt_path = os.path.join(exp_path, "checkpoints", "checkpoint_epoch_{:03d}.pkl".format(epoch))
        assert os.path.exists(ckpt_path), "Checkpoint does not exists ({})".format(ckpt_path)
        net.load_checkpoint(ckpt_path)
        apply_cc_after = utils.get_value_from_dict(
                config["model"], "apply_curriculum_learning_after", -1)
        # If checkpoint use curriculum learning
        if (apply_cc_after > 0) and (epoch >= apply_cc_after):
            net.apply_curriculum_learning()

        cmf.get_selection_values(config, L, net, epoch-1, logger_name="eval", mode="Evaluation", verbose_every=100)