Example #1
0
        def validate_network(epoch):
            # Start validating
            self.app.model.eval()
            val_loss = 0.
            # Init a R2score instance
            r2score = R2Score(device='cuda')
            rmse = RootMeanSquaredError(device='cuda')
            pred = []
            for _, datum in enumerate(self.validate_set):
                x, y = datum
                out = self.app.model(x)
                loss = self.app.loss_function(out, y)
                for i in out.tolist():
                    pred.extend(i)
                val_loss += loss.data.item()
                r2score.update((out, y))
                rmse.update((out, y))

            val_loss /= len(self.validate_set)
            print('Test finished, loss=%f, r2=%f, rmse=%f' % (val_loss, r2score.compute(), rmse.compute()))

            # Log the validate loss & r2 for tensorboard
            self.app.train_summary.add_scalar('test_r2', r2score.compute(), epoch)
            self.app.train_summary.add_scalar('Test_Loss', val_loss, epoch)
            self.app.train_summary.add_scalar('test_rmse', rmse.compute(), epoch)

            summary(epoch, pred)
Example #2
0
def test_integration_r2_score_with_output_transform():

    np.random.seed(1)
    size = 105
    np_y_pred = np.random.rand(size, 1)
    np_y = np.random.rand(size, 1)
    np.random.shuffle(np_y)

    batch_size = 15

    def update_fn(engine, batch):
        idx = (engine.state.iteration - 1) * batch_size
        y_true_batch = np_y[idx:idx + batch_size]
        y_pred_batch = np_y_pred[idx:idx + batch_size]
        return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(
            y_true_batch)

    engine = Engine(update_fn)

    m = R2Score(output_transform=lambda x: (x[1], x[2]))
    m.attach(engine, "r2_score")

    data = list(range(size // batch_size))
    r_squared = engine.run(data, max_epochs=1).metrics["r2_score"]

    assert r2_score(np_y, np_y_pred) == pytest.approx(r_squared)
Example #3
0
def test_wrong_input_shapes():
    m = R2Score()

    with pytest.raises(
            ValueError,
            match=r"Input data shapes should be the same, but given"):
        m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))

    with pytest.raises(
            ValueError,
            match=r"Input data shapes should be the same, but given"):
        m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))

    with pytest.raises(
            ValueError,
            match=r"Input data shapes should be the same, but given"):
        m.update((
            torch.rand(4, 1, 2),
            torch.rand(4, ),
        ))

    with pytest.raises(
            ValueError,
            match=r"Input data shapes should be the same, but given"):
        m.update((
            torch.rand(4, ),
            torch.rand(4, 1, 2),
        ))
Example #4
0
    def _test(n_epochs, metric_device):
        metric_device = torch.device(metric_device)
        n_iters = 80
        s = 16
        n_classes = 2

        offset = n_iters * s
        y_true = torch.randint(0, 10, size=(offset * idist.get_world_size(),)).to(device).float()
        y_preds = torch.randint(0, 10, size=(offset * idist.get_world_size(),)).to(device).float()

        def update(engine, i):
            return (
                y_preds[i * s + rank * offset : (i + 1) * s + rank * offset],
                y_true[i * s + rank * offset : (i + 1) * s + rank * offset],
            )

        engine = Engine(update)

        r2 = R2Score(device=metric_device)
        r2.attach(engine, "r2")

        data = list(range(n_iters))
        engine.run(data=data, max_epochs=n_epochs)

        assert "r2" in engine.state.metrics

        res = engine.state.metrics["r2"]
        if isinstance(res, torch.Tensor):
            res = res.cpu().numpy()

        true_res = r2_score(y_true.cpu().numpy(), y_preds.cpu().numpy())

        assert pytest.approx(res) == true_res
Example #5
0
def test_zero_sample():
    m = R2Score()
    with pytest.raises(
            NotComputableError,
            match=
            r"R2Score must have at least one example before it can be computed"
    ):
        m.compute()
Example #6
0
def test_r2_score():

    size = 51
    np_y_pred = np.random.rand(size, )
    np_y = np.random.rand(size, )

    m = R2Score()
    y_pred = torch.from_numpy(np_y_pred)
    y = torch.from_numpy(np_y)

    m.reset()
    m.update((y_pred, y))

    assert r2_score(np_y, np_y_pred) == pytest.approx(m.compute())
Example #7
0
def test_wrong_input_shapes():
    m = R2Score()

    with pytest.raises(ValueError):
        m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))

    with pytest.raises(ValueError):
        m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))

    with pytest.raises(ValueError):
        m.update((torch.rand(4, 1, 2), torch.rand(4, )))

    with pytest.raises(ValueError):
        m.update((torch.rand(4, ), torch.rand(4, 1, 2)))
Example #8
0
    def _test(metric_device):
        metric_device = torch.device(metric_device)
        m = R2Score(device=metric_device)
        torch.manual_seed(10 + rank)

        y_pred = torch.randint(0, 10, size=(10, ), device=device).float()
        y = torch.randint(0, 10, size=(10, ), device=device).float()

        m.update((y_pred, y))

        # gather y_pred, y
        y_pred = idist.all_gather(y_pred)
        y = idist.all_gather(y)

        np_y_pred = y_pred.cpu().numpy()
        np_y = y.cpu().numpy()
        res = m.compute()
        assert r2_score(np_y, np_y_pred) == pytest.approx(res, abs=tol)
Example #9
0
def test_r2_score_2():

    np.random.seed(1)
    size = 105
    np_y_pred = np.random.rand(size, 1)
    np_y = np.random.rand(size, 1)
    np.random.shuffle(np_y)

    m = R2Score()
    y_pred = torch.from_numpy(np_y_pred)
    y = torch.from_numpy(np_y)

    m.reset()
    batch_size = 16
    n_iters = size // batch_size + 1
    for i in range(n_iters):
        idx = i * batch_size
        m.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size]))

    assert r2_score(np_y, np_y_pred) == pytest.approx(m.compute())
Example #10
0
def train_network(model: nn.Module, training_loader: DataLoader,
                  validation_loader: DataLoader):
    """Trains the given neural network model.

    Parameters
    ----------
    model (nn.Module): The PyTorch model to be trained

    training_loader (DataLoader): Training data loader

    validation_loader (DataLoader): Validation data loader
    """
    device = "cuda:0" if cast(Any, torch).cuda.is_available() else "cpu"

    if device == "cuda:0":
        model.cuda()

    optimizer = cast(Any, torch).optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.MSELoss()

    trainer = create_supervised_trainer(model,
                                        optimizer,
                                        criterion,
                                        device=device)

    save_handler = Checkpoint(
        {
            "model": model,
            "optimizer": optimizer,
            "trainer": trainer
        },
        DiskSaver("dist/models", create_dir=True),
        n_saved=2,
    )
    trainer.add_event_handler(Events.EPOCH_COMPLETED(every=100), save_handler)

    # Create a logger
    tb_logger = TensorboardLogger(log_dir="logs/training" +
                                  datetime.now().strftime("-%Y%m%d-%H%M%S"),
                                  flush_secs=1)

    tb_logger.attach_output_handler(
        trainer,
        event_name=Events.ITERATION_COMPLETED,
        tag="training",
        output_transform=lambda loss: {"loss": loss},
    )

    # Training evaluator
    training_evaluator = create_supervised_evaluator(model,
                                                     metrics={
                                                         "r2": R2Score(),
                                                         "MSELoss":
                                                         Loss(criterion)
                                                     },
                                                     device=device)

    tb_logger.attach_output_handler(
        training_evaluator,
        event_name=Events.EPOCH_COMPLETED,
        tag="training",
        metric_names=["MSELoss", "r2"],
        global_step_transform=global_step_from_engine(trainer),
    )

    # Validation evaluator
    evaluator = create_supervised_evaluator(model,
                                            metrics={
                                                "r2": R2Score(),
                                                "MSELoss": Loss(criterion)
                                            },
                                            device=device)

    tb_logger.attach_output_handler(
        evaluator,
        event_name=Events.EPOCH_COMPLETED,
        tag="validation",
        metric_names=["MSELoss", "r2"],
        global_step_transform=global_step_from_engine(trainer),
    )

    @trainer.on(Events.EPOCH_COMPLETED(every=10))
    def log_training_results(trainer):
        training_evaluator.run(training_loader)

        metrics = training_evaluator.state.metrics
        print(
            f"Training Results - Epoch: {trainer.state.epoch}",
            f" Avg r2: {metrics['r2']:.2f} Avg loss: {metrics['MSELoss']:.2f}",
        )

    @trainer.on(Events.EPOCH_COMPLETED(every=10))
    def log_validation_results(trainer):
        evaluator.run(validation_loader)

        metrics = evaluator.state.metrics
        print(
            f"Validation Results - Epoch: {trainer.state.epoch}",
            f" Avg r2: {metrics['r2']:.2f} Avg loss: {metrics['MSELoss']:.2f}\n",
        )

    trainer.run(training_loader, max_epochs=int(1e6))