示例#1
0
def test_cross_entropy_loss():
    """
    Test masking for cross entropy loss.
    """
    set_default_backend("pytorch")

    y_pred = torch.rand(10, 10, 10)
    y = torch.ones(10, 1, 10)
    bins = np.linspace(0, 1, 11)
    y[:, 0, :] = 0.55

    loss = CrossEntropyLoss(bins, mask=-1.0)
    ref = -y_pred[:, 5, :] + torch.log(torch.exp(y_pred).sum(1))
    assert np.all(
        np.isclose(
            loss(y_pred, y).detach().numpy(),
            ref.mean().detach().numpy()))

    y[5:, :, :] = -1.0
    y[:, :, 5:] = -1.0
    ref = -y_pred[:5, 5, :5] + torch.log(torch.exp(y_pred[:5, :, :5]).sum(1))
    assert np.all(
        np.isclose(
            loss(y_pred, y).detach().numpy(),
            ref.mean().detach().numpy()))
示例#2
0
def test_quantile_loss():
    """
    Ensure that quantile loss corresponds to half of absolute error
    loss and that masking works as expected.
    """
    set_default_backend("pytorch")

    loss = QuantileLoss([0.5], mask=-1e3)

    y_pred = torch.rand(10, 1, 10)
    y = torch.rand(10, 1, 10)

    l = loss(y_pred, y).detach().numpy()

    dy = (y_pred - y).detach().numpy()
    l_ref = 0.5 * np.mean(np.abs(dy))

    assert np.isclose(l, l_ref)

    y_pred = torch.rand(20, 1, 10)
    y_pred[10:] = -2e3
    y = torch.rand(20, 1, 10)
    y[10:] = -2e3

    loss = QuantileLoss([0.5], mask=-1e3)
    l = loss(y_pred, y).detach().numpy()
    l_ref = loss(y_pred[:10], y[:10]).detach().numpy()
    assert np.isclose(l, l_ref)
示例#3
0
def test_qrnn_training_state():
    """
    Ensure that training attributes of models are conserved through
    training.
    """
    set_default_backend("pytorch")

    x = np.random.rand(1024, 16)
    y = np.random.rand(1024)
    training_data = torch.utils.data.TensorDataset(torch.tensor(x),
                                                   torch.tensor(y))
    training_loader = torch.utils.data.DataLoader(training_data,
                                                  batch_size=128)

    model = nn.Sequential(nn.BatchNorm1d(16), nn.Linear(16, 10))
    qrnn = QRNN(np.linspace(0.05, 0.95, 10), model=model)

    qrnn.model.train(False)
    qrnn.train(training_loader, n_epochs=1)

    mean = model[0].running_mean.detach().numpy()
    assert np.all(np.isclose(mean, 0.0))
    var = model[0].running_var.detach().numpy()
    assert np.all(np.isclose(var, 1.0))

    qrnn.model.train(True)
    qrnn.train(training_loader, n_epochs=1)

    mean = model[0].running_mean.detach().numpy()
    assert not np.all(np.isclose(mean, 0.0))
    var = model[0].running_var.detach().numpy()
    assert not np.all(np.isclose(var, 1.0))
示例#4
0
def test_training_multiple_outputs():
    """
    Ensure that training with batch objects as dicts and provided keys
    argument works.
    """
    set_default_backend("keras")

    class MultipleOutputModel(keras.Model):
        def __init__(self):
            super().__init__()
            self.hidden = keras.layers.Dense(128, "relu", input_shape=(16, ))
            self.head_1 = keras.layers.Dense(11, None)
            self.head_2 = keras.layers.Dense(11, None)

        def call(self, x):
            x = self.hidden(x)
            y_1 = self.head_1(x)
            y_2 = self.head_2(x)
            return {"y_1": y_1, "y_2": y_2}

    x = np.random.rand(1024, 16)
    y = np.random.rand(1024)

    batched_data = [{
        "x": x[i * 128:(i + 1) * 128],
        "y": {
            "y_1": y[i * 128:(i + 1) * 128],
            "y_2": y[i * 128:(i + 1) * 128]
        }
    } for i in range(1024 // 128)]

    model = MultipleOutputModel()
    qrnn = QRNN(np.linspace(0.05, 0.95, 11), model=model)
    qrnn.train(batched_data, n_epochs=10, keys=("x", "y"))
示例#5
0
    def test_qrnn(self, backend):
        """
        Test training of QRNNs using numpy arrays as input.
        """
        set_default_backend(backend)
        qrnn = QRNN(np.linspace(0.05, 0.95, 10),
                    n_inputs=self.x_train.shape[1])
        qrnn.train((self.x_train, self.y_train),
                   validation_data=(self.x_train, self.y_train),
                   n_epochs=2)

        qrnn.predict(self.x_train)

        x, qs = qrnn.cdf(self.x_train[:2, :])
        assert qs[0] == 0.0
        assert qs[-1] == 1.0

        x, y = qrnn.pdf(self.x_train[:2, :])
        assert x.shape == y.shape

        mu = qrnn.posterior_mean(self.x_train[:2, :])
        assert len(mu.shape) == 1

        r = qrnn.sample_posterior(self.x_train[:4, :], n_samples=2)
        assert r.shape == (4, 2)

        r = qrnn.sample_posterior_gaussian_fit(self.x_train[:4, :],
                                               n_samples=2)
        assert r.shape == (4, 2)
示例#6
0
def test_training_multiple_outputs():
    """
    Ensure that training with batch objects as dicts and provided keys
    argument works.
    """
    set_default_backend("pytorch")

    class MultipleOutputModel(nn.Module):
        def __init__(self):
            super().__init__()
            self.hidden = nn.Linear(16, 128)
            self.head_1 = nn.Linear(128, 11)
            self.head_2 = nn.Linear(128, 11)

        def forward(self, x):
            x = torch.relu(self.hidden(x))
            y_1 = self.head_1(x)
            y_2 = self.head_2(x)
            return {"y_1": y_1, "y_2": y_2}

    x = np.random.rand(1024, 16)
    y = np.random.rand(1024)

    batched_data = [{
        "x": torch.tensor(x[i * 128:(i + 1) * 128]),
        "y": {
            "y_1": torch.tensor(y[i * 128:(i + 1) * 128]),
            "y_2": torch.tensor(y[i * 128:(i + 1) * 128])
        }
    } for i in range(1024 // 128)]

    model = MultipleOutputModel()
    qrnn = QRNN(np.linspace(0.05, 0.95, 11), model=model)
    qrnn.train(batched_data, n_epochs=5, keys=("x", "y"))
示例#7
0
 def test_drnn_datasets(self, backend):
     """
     Provide data as dataset object instead of numpy arrays.
     """
     set_default_backend(backend)
     backend = get_default_backend()
     data = backend.BatchedDataset((self.x_train, self.y_train), 256)
     drnn = DRNN(self.bins, n_inputs=self.x_train.shape[1])
     drnn.train(data, n_epochs=2)
示例#8
0
 def test_qrnn_datasets(self, backend):
     """
     Provide data as dataset object instead of numpy arrays.
     """
     set_default_backend(backend)
     backend = get_default_backend()
     data = backend.BatchedDataset((self.x_train, self.y_train), 256)
     qrnn = QRNN(np.linspace(0.05, 0.95, 10),
                 n_inputs=self.x_train.shape[1])
     qrnn.train(data, n_epochs=2)
示例#9
0
def test_training_transformation_mrnn_quantiles():
    """
    Ensure that training in transformed space works.
    """
    set_default_backend("pytorch")

    class MultipleOutputModel(nn.Module):
        def __init__(self):
            super().__init__()
            self.hidden = nn.Linear(16, 128)
            self.head_1 = nn.Linear(128, 10)
            self.head_2 = nn.Linear(128, 1)
            self.head_3 = nn.Linear(128, 10)

        def forward(self, x):
            x = torch.relu(self.hidden(x))
            y_1 = self.head_1(x)
            y_2 = self.head_2(x)
            y_3 = self.head_3(x)
            return {"y_1": y_1, "y_2": y_2, "y_3": y_3}

    x = np.random.rand(2024, 16) + 1.0
    y = np.sum(x, axis=-1)
    y += np.random.normal(size=y.size)

    batched_data = [{
        "x":
        torch.tensor(x[i * 128:(i + 1) * 128], dtype=torch.float32),
        "y": {
            "y_1": torch.tensor(y[i * 128:(i + 1) * 128], dtype=torch.float32),
            "y_2": torch.tensor(y[i * 128:(i + 1) * 128]**2,
                                dtype=torch.float32),
            "y_3": torch.tensor(y[i * 128:(i + 1) * 128]**2,
                                dtype=torch.float32)
        }
    } for i in range(1024 // 128)]

    model = MultipleOutputModel()
    transformations = {"y_1": Log10(), "y_2": Log10()}
    losses = {
        "y_1": Quantiles(np.linspace(0.05, 0.95, 10)),
        "y_2": Mean(),
        "y_3": Density(np.linspace(-2, 2, 11))
    }

    mrnn = MRNN(losses=losses, model=model)
    metrics = [
        "Bias", "CRPS", "MeanSquaredError", "ScatterPlot", "CalibrationPlot"
    ]
    mrnn.train(batched_data,
               validation_data=batched_data,
               n_epochs=5,
               keys=("x", "y"),
               metrics=metrics)
示例#10
0
def test_training_multi_mrnn():
    """
    Ensure that training with batch objects as dicts and provided keys
    argument works.
    """
    set_default_backend("pytorch")

    class MultipleOutputModel(nn.Module):
        def __init__(self):
            super().__init__()
            self.hidden = nn.Linear(16, 128)
            self.head_1 = nn.Linear(128, 10)
            self.head_2 = nn.Linear(128, 1)
            self.head_3 = nn.Linear(128, 20)

        def forward(self, x):
            x = torch.relu(self.hidden(x))
            y_1 = self.head_1(x)
            y_2 = self.head_2(x)
            y_3 = self.head_3(x)
            return {"y_1": y_1, "y_2": y_2, "y_3": y_3}

    x = np.random.rand(2024, 16) + 1.0
    y = np.sum(x, axis=-1)
    y += np.random.normal(size=y.size)

    batched_data = [{
        "x":
        torch.tensor(x[i * 128:(i + 1) * 128]).to(torch.float32),
        "y": {
            "y_1": torch.tensor(y[i * 128:(i + 1) * 128], dtype=torch.float32),
            "y_2": torch.tensor(y[i * 128:(i + 1) * 128]**2,
                                dtype=torch.float32),
            "y_3": torch.tensor(y[i * 128:(i + 1) * 128]**2,
                                dtype=torch.float32)
        }
    } for i in range(1024 // 128)]

    model = MultipleOutputModel()

    bins = np.linspace(0, 1, 12)
    bins = {"y_1": bins, "y_2": bins}

    losses = {
        "y_1": Quantiles(np.linspace(0.05, 0.95, 10)),
        "y_2": Mean(),
        "y_3": Density(np.linspace(-2, 2, 21))
    }

    mrnn = MRNN(losses=losses, model=model)
    mrnn.train(batched_data, n_epochs=1)
示例#11
0
def test_training_with_dataloader():
    """
    Ensure that training with a pytorch dataloader works.
    """
    set_default_backend("keras")
    x = np.random.rand(1024, 16)
    y = np.random.rand(1024)

    batched_data = [{
        "x": x[i * 128:(i + 1) * 128],
        "y": y[i * 128:(i + 1) * 128],
    } for i in range(1024 // 128)]

    qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
    qrnn.train(batched_data, n_epochs=1)
示例#12
0
def test_qrnn_training_with_dataloader():
    """
    Ensure that training with a pytorch dataloader works.
    """
    set_default_backend("pytorch")

    x = np.random.rand(1024, 16)
    y = np.random.rand(1024)

    training_data = torch.utils.data.TensorDataset(torch.tensor(x),
                                                   torch.tensor(y))
    training_loader = torch.utils.data.DataLoader(training_data,
                                                  batch_size=128)
    qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])

    qrnn.train(training_loader, n_epochs=1)
示例#13
0
    def test_drnn(self, backend):
        """
        Test training of DRNN using numpy arrays as input.
        """
        set_default_backend(backend)
        drnn = DRNN(self.bins, n_inputs=self.x_train.shape[1])
        drnn.train((self.x_train, self.y_train),
                   validation_data=(self.x_train, self.y_train),
                   n_epochs=2)

        drnn.predict(self.x_train)

        mu = drnn.posterior_mean(self.x_train[:2, :])
        assert len(mu.shape) == 1

        r = drnn.sample_posterior(self.x_train[:4, :], n_samples=2)
        assert r.shape == (4, 2)
示例#14
0
def test_training_with_dict_and_keys():
    """
    Ensure that training with batch objects as dicts and provided keys
    argument works.
    """
    set_default_backend("keras")
    x = np.random.rand(1024, 16)
    y = np.random.rand(1024)

    batched_data = [{
        "x": x[i * 128:(i + 1) * 128],
        "x_2": x[i * 128:(i + 1) * 128],
        "y": y[i * 128:(i + 1) * 128],
    } for i in range(1024 // 128)]

    qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
    qrnn.train(batched_data, n_epochs=1, keys=("x", "y"))
示例#15
0
def test_qrnn_training_with_dict():
    """
    Ensure that training with batch objects as dicts works.
    """
    set_default_backend("pytorch")

    x = np.random.rand(1024, 16)
    y = np.random.rand(1024)

    batched_data = [{
        "x": torch.tensor(x[i * 128:(i + 1) * 128]),
        "y": torch.tensor(y[i * 128:(i + 1) * 128]),
    } for i in range(1024 // 128)]

    qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])

    qrnn.train(batched_data, n_epochs=1)
示例#16
0
    def test_save_drnn(self, backend):
        """
        Test saving and loading of DRNNs.
        """
        set_default_backend(backend)
        drnn = DRNN(self.bins, n_inputs=self.x_train.shape[1])
        f = tempfile.NamedTemporaryFile()
        drnn.save(f.name)
        drnn_loaded = DRNN.load(f.name)

        x_pred = drnn.predict(self.x_train)
        x_pred_loaded = drnn.predict(self.x_train)

        if not type(x_pred) == np.ndarray:
            x_pred = x_pred.detach()

        assert np.allclose(x_pred, x_pred_loaded)
示例#17
0
    def test_save_qrnn(self, backend):
        """
        Test saving and loading of QRNNs.
        """
        set_default_backend(backend)
        qrnn = QRNN(np.linspace(0.05, 0.95, 10),
                    n_inputs=self.x_train.shape[1])
        f = tempfile.NamedTemporaryFile()
        qrnn.save(f.name)
        qrnn_loaded = QRNN.load(f.name)

        x_pred = qrnn.predict(self.x_train)
        x_pred_loaded = qrnn.predict(self.x_train)

        if not type(x_pred) == np.ndarray:
            x_pred = x_pred.detach()

        assert np.allclose(x_pred, x_pred_loaded)
示例#18
0
def test_qrnn_training_metrics():
    """
    Ensure that training with a single target and metrics works.
    """
    set_default_backend("pytorch")

    x = np.random.rand(1024, 16)
    y = np.random.rand(1024)

    batched_data = [{
        "x": torch.tensor(x[i * 128:(i + 1) * 128]),
        "x_2": torch.tensor(x[i * 128:(i + 1) * 128]),
        "y": torch.tensor(y[i * 128:(i + 1) * 128]),
    } for i in range(1024 // 128)]

    qrnn = QRNN(np.linspace(0.05, 0.95, 10), n_inputs=x.shape[1])
    metrics = ["Bias", "MeanSquaredError", "CRPS"]
    qrnn.train(batched_data, n_epochs=1, keys=("x", "y"), metrics=metrics)
示例#19
0
def test_qrnn_training_metrics_conv():
    """
    E

    """
    set_default_backend("pytorch")

    x_train = np.random.rand(
        1024,
        16,
        32,
        32,
    )
    y_train = np.random.rand(1024, 1, 32, 32)
    x_val = np.random.rand(
        32,
        16,
        32,
        32,
    )
    y_val = np.random.rand(32, 1, 32, 32)

    training_data = torch.utils.data.TensorDataset(torch.tensor(x_train),
                                                   torch.tensor(y_train))
    training_loader = torch.utils.data.DataLoader(training_data,
                                                  batch_size=128)
    validation_data = torch.utils.data.TensorDataset(torch.tensor(x_val),
                                                     torch.tensor(y_val))
    validation_loader = torch.utils.data.DataLoader(validation_data,
                                                    batch_size=1)

    model = nn.Sequential(nn.Conv2d(16, 10, 1))

    qrnn = QRNN(np.linspace(0.05, 0.95, 10), model=model)

    metrics = [
        "Bias", "MeanSquaredError", "CRPS", "CalibrationPlot", "ScatterPlot"
    ]
    qrnn.train(training_loader,
               validation_data=validation_loader,
               n_epochs=2,
               metrics=metrics,
               batch_size=1,
               mask=-1)
示例#20
0
def test_training_metrics_multi():
    """
    Ensure that training with batch objects as dicts and provided keys
    argument works.
    """
    set_default_backend("pytorch")

    class MultipleOutputModel(nn.Module):
        def __init__(self):
            super().__init__()
            self.hidden = nn.Linear(16, 128)
            self.head_1 = nn.Linear(128, 11)
            self.head_2 = nn.Linear(128, 11)

        def forward(self, x):
            x = torch.relu(self.hidden(x))
            y_1 = self.head_1(x)
            y_2 = self.head_2(x)
            return {"y_1": y_1, "y_2": y_2}

    x = np.random.rand(2024, 16) + 1.0
    y = np.sum(x, axis=-1)
    y += np.random.normal(size=y.size)

    batched_data = [{
        "x": torch.tensor(x[i * 128:(i + 1) * 128]),
        "y": {
            "y_1": torch.tensor(y[i * 128:(i + 1) * 128]),
            "y_2": torch.tensor(y[i * 128:(i + 1) * 128]**2)
        }
    } for i in range(1024 // 128)]

    model = MultipleOutputModel()
    bins = np.linspace(0, 1, 12)
    bins = {"y_1": bins, "y_2": bins}
    qrnn = DRNN(bins=bins, model=model)
    metrics = [
        "Bias", "MeanSquaredError", "CRPS", "ScatterPlot", "QuantileFunction"
    ]
    qrnn.train(batched_data,
               validation_data=batched_data,
               n_epochs=5,
               keys=("x", "y"),
               metrics=metrics)
示例#21
0
    def test_drnn_dict_iterable(self, backend):
        """
        Test training with dataset object that yields dicts instead of
        tuples.
        """
        set_default_backend(backend)
        backend = get_default_backend()

        class DictWrapper:
            def __init__(self, data):
                self.data = data

            def __iter__(self):
                for x, y in self.data:
                    yield {"x": x, "y": y}

            def __len__(self):
                return len(self.data)

        data = backend.BatchedDataset((self.x_train, self.y_train), 256)
        drnn = DRNN(self.bins, n_inputs=self.x_train.shape[1])
        drnn.train(DictWrapper(data), n_epochs=2, keys=("x", "y"))
示例#22
0
def test_mse_loss():
    """
    Test masking for cross entropy loss.
    """
    set_default_backend("pytorch")

    y_pred = torch.rand(10, 10, 10)
    y = torch.ones(10, 10, 10)
    y[:, 0, :] = 0.55

    loss = MSELoss(mask=-1.0)
    ref = ((y_pred - y)**2).mean()
    assert np.all(
        np.isclose(
            loss(y_pred, y).detach().numpy(),
            ref.mean().detach().numpy()))

    y[5:, :, :] = -1.0
    y[:, :, 5:] = -1.0
    ref = ((y_pred[:5, :, :5] - y[:5, :, :5])**2).mean()
    assert np.all(
        np.isclose(
            loss(y_pred, y).detach().numpy(),
            ref.mean().detach().numpy()))