Ejemplo n.º 1
0
def train(conf):
    logger = util.Logger(conf)
    if not os.path.exists(conf.checkpoint_dir):
        os.makedirs(conf.checkpoint_dir)

    model_name = conf.model_name
    dataset_name = "ClassificationDataset"
    collate_name = "FastTextCollator" if model_name == "FastText" \
        else "ClassificationCollator"
    train_data_loader, validate_data_loader, test_data_loader = \
        get_data_loader(dataset_name, collate_name, conf)
    empty_dataset = globals()[dataset_name](conf, [], mode="train")
    model = get_classification_model(model_name, empty_dataset, conf)
    loss_fn = globals()["ClassificationLoss"](label_size=len(
        empty_dataset.label_map),
                                              loss_type=conf.train.loss_type)
    optimizer = get_optimizer(conf, model)
    evaluator = cEvaluator(conf.eval.dir)
    trainer = globals()["ClassificationTrainer"](empty_dataset.label_map,
                                                 logger, evaluator, conf,
                                                 loss_fn)

    best_epoch = -1
    best_performance = 0
    model_file_prefix = conf.checkpoint_dir + "/" + model_name
    for epoch in range(conf.train.start_epoch,
                       conf.train.start_epoch + conf.train.num_epochs):
        start_time = time.time()
        trainer.train(train_data_loader, model, optimizer, "Train", epoch)
        trainer.eval(train_data_loader, model, optimizer, "Train", epoch)
        performance = trainer.eval(validate_data_loader, model, optimizer,
                                   "Validate", epoch)
        trainer.eval(test_data_loader, model, optimizer, "test", epoch)
        if performance > best_performance:  # record the best model
            best_epoch = epoch
            best_performance = performance
        save_checkpoint(
            {
                'epoch': epoch,
                'model_name': model_name,
                'state_dict': model.state_dict(),
                'best_performance': best_performance,
                'optimizer': optimizer.state_dict(),
            }, model_file_prefix)
        time_used = time.time() - start_time
        logger.info("Epoch %d cost time: %d second" % (epoch, time_used))

    # best model on validateion set
    best_epoch_file_name = model_file_prefix + "_" + str(best_epoch)
    best_file_name = model_file_prefix + "_best"
    shutil.copyfile(best_epoch_file_name, best_file_name)

    load_checkpoint(model_file_prefix + "_" + str(best_epoch), conf, model,
                    optimizer)
    trainer.eval(test_data_loader, model, optimizer, "Best test", best_epoch)
Ejemplo n.º 2
0
def kfold_eval(conf):
    logger = util.Logger(conf)
    model_name = conf.model_name
    dataset_name = "ClassificationDataset"
    collate_name = "FastTextCollator" if model_name == "FastText" \
        else "ClassificationCollator"

    test_dataset = globals()[dataset_name](conf, conf.data.test_json_files)
    collate_fn = globals()[collate_name](conf, len(test_dataset.label_map))
    test_data_loader = DataLoader(
        test_dataset, batch_size=conf.eval.batch_size, shuffle=False,
        num_workers=conf.data.num_worker, collate_fn=collate_fn,
        pin_memory=True)

    empty_dataset = globals()[dataset_name](conf, [])
    model = get_classification_model(model_name, empty_dataset, conf)
    optimizer = get_optimizer(conf, model)
    load_checkpoint(conf.eval.model_dir, conf, model, optimizer)
    model.eval()
    predict_probs = []
    standard_labels = []
    evaluator = cEvaluator(conf.eval.dir)
    for batch in test_data_loader:
        logits = model(batch)
        result = torch.sigmoid(logits).cpu().tolist()
        predict_probs.extend(result)
        standard_labels.extend(batch[ClassificationDataset.DOC_LABEL_LIST])

        # ============================ EVALUATION API ============================================================================================
    y_test, predictions = [], []

    print (standard_labels)
    for i, j in zip(standard_labels, predict_probs):
            y_test.append(i)
            predictions.append(j)



    pred, actual = take_values(predictions, y_test , conf.eval.threshold, conf.eval.top_k )
    print(pred)
    actual=np.array(actual)
    pred=np.array(pred)

    evaluation_measures={"Accuracy": accuracy(actual, pred) ,
                             "Precision": precision(actual, pred) ,
                             "Recall": recall(actual, pred) ,
                             "F1 score": f1_scor(actual, pred, ) ,
                             "Hamming Loss":hammingLoss(actual, pred),
                             "f-1 Macro":macroF1(actual, pred) ,
                             "f-1 Micro":microF1(actual, pred),
                             "averagePrecision":averagePrecision(actual, pred)
                             }
    return evaluation_measures
Ejemplo n.º 3
0
def eval(conf):
    logger = util.Logger(conf)
    model_name = conf.model_name
    dataset_name = "ClassificationDataset"
    collate_name = "FastTextCollator" if model_name == "FastText" \
        else "ClassificationCollator"

    test_dataset = globals()[dataset_name](conf, conf.data.test_json_files)
    collate_fn = globals()[collate_name](conf, len(test_dataset.label_map))
    test_data_loader = DataLoader(test_dataset,
                                  batch_size=conf.eval.batch_size,
                                  shuffle=False,
                                  num_workers=conf.data.num_worker,
                                  collate_fn=collate_fn,
                                  pin_memory=True)

    empty_dataset = globals()[dataset_name](conf, [])
    model = get_classification_model(model_name, empty_dataset, conf)
    optimizer = get_optimizer(conf, model.parameters())
    load_checkpoint(conf.eval.model_dir, conf, model, optimizer)
    model.eval()
    is_multi = False
    if conf.task_info.label_type == ClassificationType.MULTI_LABEL:
        is_multi = True
    predict_probs = []
    standard_labels = []
    total_loss = 0.
    evaluator = cEvaluator(conf.eval.dir)
    for batch in test_data_loader:
        logits = model(batch)
        if not is_multi:
            result = torch.nn.functional.softmax(logits, dim=1).cpu().tolist()
        else:
            result = torch.sigmoid(logits).cpu().tolist()
        predict_probs.extend(result)
        standard_labels.extend(batch[ClassificationDataset.DOC_LABEL_LIST])
    total_loss = total_loss / len(predict_probs)
    (_, precision_list, recall_list, fscore_list, right_list,
     predict_list, standard_list) = \
        evaluator.evaluate(
            predict_probs, standard_label_ids=standard_labels, label_map=empty_dataset.label_map,
            threshold=conf.eval.threshold, top_k=conf.eval.top_k,
            is_flat=conf.eval.is_flat, is_multi=is_multi)
    logger.warn(
        "Performance is precision: %f, "
        "recall: %f, fscore: %f, right: %d, predict: %d, standard: %d." %
        (precision_list[0][cEvaluator.MICRO_AVERAGE],
         recall_list[0][cEvaluator.MICRO_AVERAGE],
         fscore_list[0][cEvaluator.MICRO_AVERAGE],
         right_list[0][cEvaluator.MICRO_AVERAGE],
         predict_list[0][cEvaluator.MICRO_AVERAGE],
         standard_list[0][cEvaluator.MICRO_AVERAGE]))
    evaluator.save()
Ejemplo n.º 4
0
def train(conf):
    model_name = conf.model_name
    logger = util.Logger(conf)
    if conf.task_info.weak_pretrain:
        logger.info("Batch Size: " + str(conf.train.batch_size) +
                    " Pretrain Num Epoch: " +
                    str(conf.train.pretrain_num_epochs))
    else:
        logger.info("Batch Size: " + str(conf.train.batch_size))

    if conf.task_info.weak_pretrain and conf.task_info.weak_data_augmentation:
        model_teacher = get_classification_model(model_name, empty_dataset,
                                                 conf)
        if conf.model_name != "BERT":
            optimizer_teacher = get_optimizer(conf, model_teacher)
        else:
            optimizer_teacher = AdamW(model_teacher.parameters(),
                                      lr=5e-2,
                                      eps=1e-2)
        # optimizer_teacher: optimizer for teacher model

    model_target = get_classification_model(model_name, empty_dataset, conf)
    loss_fn = globals()["ClassificationLoss"](label_size=len(
        empty_dataset.label_map),
                                              loss_type=conf.train.loss_type)

    if conf.task_info.weak_pretrain:
        if conf.model_name != "BERT":
            optimizer_weak = get_optimizer(conf, model_target)
        else:
            optimizer_weak = AdamW(model_target.parameters(),
                                   lr=5e-2,
                                   eps=1e-2)
        # optimizer_weak: optimizer for target model pretraining stage
    if conf.model_name != "BERT":
        optimizer_target = get_optimizer(conf, model_target)
    else:
        optimizer_target = AdamW(model_target.parameters(), lr=5e-2, eps=1e-2)
    # optimizer_target: optimizer for target model fine-tuning stage
    evaluator = cEvaluator(conf.eval.dir)

    trainer_target = globals()["ClassificationTrainer"](
        empty_dataset.label_map, logger, evaluator, conf, loss_fn)
    # trainer_target: trainer for target model on fine-tuning stage
    if conf.task_info.weak_pretrain:
        trainer_weak = globals()["ClassificationTrainer"](
            empty_dataset.label_map, logger, evaluator, conf, loss_fn)
        # trainer_weak: trainer for target model on pretraining stage
        if conf.task_info.weak_data_augmentation:
            trainer_teacher = globals()["ClassificationTrainer"](
                empty_dataset.label_map, logger, evaluator, conf, loss_fn)
            # trainer_teacher: trainer for teacher model

    if conf.task_info.weak_data_augmentation:
        best_epoch = -1
        best_performance = 0
        model_file_prefix = conf.checkpoint_dir + "/" + model_name + "_teacher"

        logger.info("Training Teacher Model on Labeled Data")
        for epoch in range(conf.train.start_epoch,
                           conf.train.start_epoch + conf.train.num_epochs):
            start_time = time.time()
            trainer_teacher.train(train_data_loader, model_teacher,
                                  optimizer_teacher, "Train", epoch)
            trainer_teacher.eval(train_data_loader, model_teacher,
                                 optimizer_teacher, "Train", epoch)
            performance = trainer_teacher.eval(validate_data_loader,
                                               model_teacher,
                                               optimizer_teacher, "Validate",
                                               epoch)
            trainer_teacher.eval(test_data_loader, model_teacher,
                                 optimizer_teacher, "Test", epoch)

            if performance > best_performance:  # record the best model
                best_epoch = epoch
                best_performance = performance
                temp_model = model_teacher
                save_checkpoint(
                    {
                        'epoch': epoch,
                        'model_name': model_name,
                        'state_dict': model_teacher.state_dict(),
                        'best_performance': best_performance,
                        'optimizer': optimizer_teacher.state_dict(),
                    }, model_file_prefix)

            time_used = time.time() - start_time
            logger.info("Epoch %d cost time: %d second" % (epoch, time_used))
    best_epoch = -1
    best_performance = 0
    if conf.task_info.weak_pretrain:
        if conf.task_info.weak_data_augmentation:
            unlabeled_data_train_data_loader = select_unlabeled_data(
                temp_model, unlabeled_train_data_loader,
                len(trainer_weak.label_map), conf)

        logger.info("Pretraining on Weak Supervision Data")
        for epoch in range(
                conf.train.start_epoch,
                conf.train.start_epoch + conf.train.pretrain_num_epochs):
            start_time = time.time()
            trainer_weak.train(unlabeled_train_data_loader, model_target,
                               optimizer_weak, "Train", epoch)
            trainer_weak.eval(unlabeled_train_data_loader, model_target,
                              optimizer_weak, "Train", epoch)
            performance = trainer_weak.eval(validate_data_loader, model_target,
                                            optimizer_weak, "Validate", epoch)
            trainer_weak.eval(test_data_loader, model_target, optimizer_weak,
                              "Test", epoch)

            if performance > best_performance:  # record the best model
                temp_model = model_target
            time_used = time.time() - start_time
            logger.info("Epoch %d cost time: %d second" % (epoch, time_used))
        model_target = temp_model

    logger.info("Fine-tuning on Labeled Data")

    best_epoch = -1
    best_performance = 0
    if conf.task_info.weak_pretrain:
        if conf.task_info.weak_data_augmentation:
            model_file_prefix = conf.checkpoint_dir + "/" + model_name + "-Augmentation-" + conf.task_info.Augmentation_Method + "-Pretrain" + str(
                conf.train.pretrain_num_epochs) + "-Batch" + str(
                    conf.train.batch_size)
        else:
            model_file_prefix = conf.checkpoint_dir + "/" + model_name + "-WeakSupervision-" + "-Pretrain" + str(
                conf.train.pretrain_num_epochs) + "-Batch" + str(
                    conf.train.batch_size)
    else:
        model_file_prefix = conf.checkpoint_dir + "/" + model_name + "-Batch" + str(
            conf.train.batch_size)
    for epoch in range(conf.train.start_epoch,
                       conf.train.start_epoch + conf.train.num_epochs):
        start_time = time.time()
        trainer_target.train(train_data_loader, model_target, optimizer_target,
                             "Train", epoch)
        trainer_target.eval(train_data_loader, model_target, optimizer_target,
                            "Train", epoch)
        performance = trainer_target.eval(validate_data_loader, model_target,
                                          optimizer_target, "Validate", epoch)
        trainer_target.eval(test_data_loader, model_target, optimizer_target,
                            "Test", epoch)
        if performance > best_performance:  # record the best model
            best_epoch = epoch
            best_performance = performance
            temp_model = model_target
            save_checkpoint(
                {
                    'epoch': epoch,
                    'model_name': model_name,
                    'state_dict': model_target.state_dict(),
                    'best_performance': best_performance,
                    'optimizer': optimizer_target.state_dict(),
                }, model_file_prefix)
        time_used = time.time() - start_time
        logger.info("Epoch %d cost time: %d second" % (epoch, time_used))

    logger.info("The Best Performance on Validation Data and Test Data")
    #best_epoch_file_name = model_file_prefix + "_" + str(best_epoch)
    #best_file_name = model_file_prefix + "_best"
    #shutil.copyfile(best_epoch_file_name, best_file_name)
    #load_checkpoint(model_file_prefix + "_" + str(best_epoch), conf, model,
    #                optimizer)
    model = temp_model
    trainer_target.eval(train_data_loader, model, optimizer_target,
                        "Best Train", best_epoch)
    trainer_target.eval(validate_data_loader, model, optimizer_target,
                        "Best Validate", best_epoch)
    trainer_target.eval(test_data_loader, model, optimizer_target, "Best Test",
                        best_epoch)
Ejemplo n.º 5
0
def eval(conf):
    logger = util.Logger(conf)
    model_name = conf.model_name
    dataset_name = "ClassificationDataset"
    collate_name = "FastTextCollator" if model_name == "FastText" \
        else "ClassificationCollator"

    test_dataset = globals()[dataset_name](conf, conf.data.test_json_files)
    collate_fn = globals()[collate_name](conf, len(test_dataset.label_map))
    test_data_loader = DataLoader(
        test_dataset, batch_size=conf.eval.batch_size, shuffle=False,
        num_workers=conf.data.num_worker, collate_fn=collate_fn,
        pin_memory=True)

    empty_dataset = globals()[dataset_name](conf, [])
    model = get_classification_model(model_name, empty_dataset, conf)
    optimizer = get_optimizer(conf, model)
    load_checkpoint(conf.eval.model_dir, conf, model, optimizer)
    model.eval()
    is_multi = False
    if conf.task_info.label_type == ClassificationType.MULTI_LABEL:
        is_multi = True
    predict_probs = []
    standard_labels = []
    evaluator = cEvaluator(conf.eval.dir)
    for batch in test_data_loader:
        with torch.no_grad():
            logits = model(batch)
        if not is_multi:
            result = torch.nn.functional.softmax(logits, dim=1)
        else:
            result = torch.sigmoid(logits)
        result = result.detach().cpu().tolist()
        predict_probs.extend(result)
        standard_labels.extend(batch[ClassificationDataset.DOC_LABEL_LIST])
    if conf.eval.is_flat:
        (_, precision_list, recall_list, fscore_list, right_list,
         predict_list, standard_list, pak_dict, rak_dict, rpak_dict, ndcgak_dict) = \
            evaluator.evaluate(
                predict_probs, standard_label_ids=standard_labels, label_map=empty_dataset.label_map,
                threshold=conf.eval.threshold, top_k=conf.eval.top_k,
                is_flat=conf.eval.is_flat, is_multi=is_multi,
                debug_file_name=conf.eval.debug_file_name,
                is_label_split=conf.data.generate_label_group,
                label_split_json_file=os.path.join(conf.data.dict_dir,
                                                   "{}.json".format(ClassificationDataset.DOC_LABEL_GROUP)),
                instance_remove=conf.eval.instance_remove
            )
        sup_message = ""
        for i in range(1, conf.eval.top_k + 1):
            for group in pak_dict[i]:
                sup_message += "Precision at {} of {} group: {}, ".format(i, group, pak_dict[i][group])
                sup_message += "Recall at {} of {} group: {}, ".format(i, group, rak_dict[i][group])
                sup_message += "R-Precision at {} of {} group: {}, ".format(i, group, rpak_dict[i][group])
                sup_message += "nDCG at {} of {} group: {}, ".format(i, group, ndcgak_dict[i][group])

        message = "Performance is precision: {}, recall: {}, fscore: {}, " + \
                  "macro-fscore: {}, right: {}, predict: {}, standard: {}, "
        logger.warn(message.format(
            precision_list[0][cEvaluator.MICRO_AVERAGE],
            recall_list[0][cEvaluator.MICRO_AVERAGE],
            fscore_list[0][cEvaluator.MICRO_AVERAGE],
            fscore_list[0][cEvaluator.MACRO_AVERAGE],
            right_list[0][cEvaluator.MICRO_AVERAGE],
            predict_list[0][cEvaluator.MICRO_AVERAGE],
            standard_list[0][cEvaluator.MICRO_AVERAGE]) +
            sup_message)
    else:
        (_, precision_list, recall_list, fscore_list, right_list,
         predict_list, standard_list) = \
            evaluator.evaluate(
                predict_probs, standard_label_ids=standard_labels, label_map=empty_dataset.label_map,
                threshold=conf.eval.threshold, top_k=conf.eval.top_k,
                is_flat=conf.eval.is_flat, is_multi=is_multi,
                is_label_split=conf.data.generate_label_group,
                label_split_json_file=os.path.join(conf.data.dict_dir,
                                                   "{}.json".format(ClassificationDataset.DOC_LABEL_GROUP))
            )
        logger.warn(
            "Performance is precision: %f, "
            "recall: %f, fscore: %f, right: %d, predict: %d, standard: %d." % (
                precision_list[0][cEvaluator.MICRO_AVERAGE],
                recall_list[0][cEvaluator.MICRO_AVERAGE],
                fscore_list[0][cEvaluator.MICRO_AVERAGE],
                right_list[0][cEvaluator.MICRO_AVERAGE],
                predict_list[0][cEvaluator.MICRO_AVERAGE],
                standard_list[0][cEvaluator.MICRO_AVERAGE]))
    evaluator.save()