示例#1
0
def test_compute():
    acc = BinaryAccuracy()

    y_pred = torch.FloatTensor([0.2, 0.4, 0.6, 0.8])
    y = torch.ones(4).type(torch.LongTensor)
    acc.update((y_pred, y))
    assert acc.compute() == 0.5

    acc.reset()
    y_pred = torch.FloatTensor([0.2, 0.7, 0.8, 0.9])
    y = torch.ones(4).type(torch.LongTensor)
    acc.update((y_pred, y))
    assert acc.compute() == 0.75
示例#2
0
def test_compute_batch_images():
    acc = BinaryAccuracy()

    y_pred = torch.FloatTensor([[[0.3, 0.7], [0.1, 0.6]],
                                [[0.2, 0.7], [0.2, 0.6]]])
    y = torch.ones(1, 2, 2).type(torch.LongTensor)
    acc.update((y_pred, y))
    assert acc.compute() == 0.5

    acc.reset()
    y_pred = torch.FloatTensor([[[0.3, 0.7], [0.8, 0.6]],
                                [[0.2, 0.7], [0.9, 0.6]]])
    y = torch.ones(2, 2, 2).type(torch.LongTensor)
    acc.update((y_pred, y))
    assert acc.compute() == 0.75
def get_metrics(non_binary_y_target):
    metrics = {
        'accuracy': BinaryAccuracy(output_transform=zero_one_transform),
        'bce': Loss(nn.modules.loss.BCELoss()),
        'f1_score': F1_Score(output_transform=zero_one_transform),
        'roc_auc': ROC_AUC(),
        'precision': Precision(output_transform=zero_one_transform),
        'recall': Recall(output_transform=zero_one_transform),
        'conf_matrix': ConfusionMatrix(output_transform=zero_one_transform),
        # 'positive_stat':    PositiveStatistics(non_binary_y_target),
    }
    return metrics
示例#4
0
def test_zero_div():
    acc = BinaryAccuracy()
    with pytest.raises(NotComputableError):
        acc.compute()
示例#5
0
def test_warning():
    with pytest.warns(DeprecationWarning):
        BinaryAccuracy()
示例#6
0
def test_compute_batch_images():
    acc = BinaryAccuracy()

    y_pred = torch.sigmoid(torch.rand(1, 2, 2))
    y = torch.ones(1, 2, 2).type(torch.LongTensor)
    y_pred = y_pred.unsqueeze(1)
    indices = torch.max(torch.cat([1.0 - y_pred, y_pred], dim=1), dim=1)[1]
    acc.update((y_pred, y))
    assert isinstance(acc.compute(), float)
    assert accuracy_score(
        y.view(-1).data.numpy(),
        indices.view(-1).data.numpy()) == pytest.approx(acc.compute())

    acc.reset()
    y_pred = torch.sigmoid(torch.rand(2, 1, 2, 2))
    y = torch.ones(2, 2, 2).type(torch.LongTensor)
    indices = torch.max(torch.cat([1.0 - y_pred, y_pred], dim=1), dim=1)[1]
    acc.update((y_pred, y))
    assert isinstance(acc.compute(), float)
    assert accuracy_score(
        y.view(-1).data.numpy(),
        indices.view(-1).data.numpy()) == pytest.approx(acc.compute())

    acc.reset()
    y_pred = torch.sigmoid(torch.rand(2, 1, 2, 2))
    y = torch.ones(2, 1, 2, 2).type(torch.LongTensor)
    indices = torch.max(torch.cat([1.0 - y_pred, y_pred], dim=1), dim=1)[1]
    acc.update((y_pred, y))
    assert isinstance(acc.compute(), float)
    assert accuracy_score(
        y.view(-1).data.numpy(),
        indices.view(-1).data.numpy()) == pytest.approx(acc.compute())
示例#7
0
def main():
    starttime = time.time()

    # Set up logging and plotting
    logfile = 'log.csv'
    logheader = ("Epoch", "Train Loss", "Train Error", "Val Loss", "Val Error")
    plots = [("plot_error.pdf", {
        'x': logheader[0],
        'y': [logheader[2], logheader[4]],
        'ylim': (0, 1)
    }),
             ("plot_loss.pdf", {
                 'x': logheader[0],
                 'y': [logheader[1], logheader[3]]
             })]
    logger = Logger(*logheader, file=logfile)
    plotter = Plotter(logfile, plots)
    atexit.register(plotter.save)  # plot on exit

    args = get_args()
    set_random_seed(args.seed)

    device = torch.device("cuda" if args.cuda else "cpu")

    train_loader, val_loader = get_data_loaders(args.train_datadir,
                                                args.train_batchsize,
                                                args.val_datadir,
                                                args.val_batchsize, args.cuda)

    classifier = Supermodel(style.models.index, args.model).to(device)

    criterion = nn.BCELoss()
    optimizer = optim.SGD(classifier.model.parameters(),
                          lr=args.lr,
                          momentum=0.95)

    trainer = create_supervised_trainer(classifier,
                                        optimizer,
                                        criterion,
                                        device=device)
    evaluator = create_supervised_evaluator(classifier,
                                            metrics={
                                                'accuracy': BinaryAccuracy(),
                                                'loss': Loss(criterion)
                                            },
                                            device=device)

    @trainer.on(Events.STARTED)
    def init(engine):
        print("Timestamp {}".format(starttime), "\n")
        for arg in vars(args):  # Arguments
            print(arg, getattr(args, arg))
        print()
        print(classifier, "\n")  # Model
        print("Startup took {:.3f} sec".format(time.time() - starttime))

    @trainer.on(Events.EPOCH_COMPLETED)
    def evaluate(engine):
        # Evaluation on training set
        evaluator.run(train_loader)
        train_metrics = evaluator.state.metrics
        train_accuracy = train_metrics['accuracy']
        train_error = 1. - train_accuracy
        train_loss = train_metrics['loss']

        # Evaluation on validation set
        evaluator.run(val_loader)
        val_metrics = evaluator.state.metrics
        val_accuracy = val_metrics['accuracy']
        val_error = 1. - val_accuracy
        val_loss = val_metrics['loss']

        logger.update(engine.state.epoch, train_loss, train_error, val_loss,
                      val_error)

        print("Epoch {:>4}\t"
              "Train Loss  {:.4f}\t"
              "Train Acc  {:.2f}%\t"
              "Val Loss  {:.4f}\t"
              "Val Acc  {:.2f}%\t"
              "\tTime  {:.0f} sec\t".format(engine.state.epoch, train_loss,
                                            train_accuracy * 100, val_loss,
                                            val_accuracy * 100,
                                            time.time() - starttime))

        if val_error < classifier.best[1] and args.autosave:
            savetimer = time.time()
            classifier.best = (engine.state.epoch, val_error, val_loss)
            saved = classifier.save(engine.state.epoch, rmprev=args.autodelete)
            print(
                "New best (val error {}%), model state saved as '{}' ({:.3f} sec)"
                .format(val_error * 100, saved,
                        time.time() - savetimer))

    trainer.run(train_loader, max_epochs=args.epochs)