Exemple #1
0
    def test_qrnn(self, backend):
        """
        Test training of QRNNs using numpy arrays as input.
        """
        set_backend(backend)
        qrnn = QRNN(self.x_train.shape[1], np.linspace(0.05, 0.95, 10))
        qrnn.train((self.x_train, self.y_train), maximum_epochs=1)

        qrnn.predict(self.x_train)

        x, qs = qrnn.cdf(self.x_train[:2, :])
        assert qs[0] == 0.0
        assert qs[-1] == 1.0

        x, y = qrnn.pdf(self.x_train[:2, :])
        assert x.shape == y.shape

        mu = qrnn.posterior_mean(self.x_train[:2, :])
        assert len(mu.shape) == 1

        r = qrnn.sample_posterior(self.x_train[:4, :], n=2)
        assert r.shape == (4, 2)

        r = qrnn.sample_posterior_gaussian_fit(self.x_train[:4, :], n=2)
        assert r.shape == (4, 2)
Exemple #2
0
 def test_qrnn_datasets(self, backend):
     """
     Provide data as dataset object instead of numpy arrays.
     """
     set_backend(backend)
     backend = get_backend(backend)
     data = backend.BatchedDataset((self.x_train, self.y_train), 256)
     qrnn = QRNN(self.x_train.shape[1], np.linspace(0.05, 0.95, 10))
     qrnn.train(data, maximum_epochs=1)
Exemple #3
0
import argparse

parser = argparse.ArgumentParser(description='Train unet.')
parser.add_argument("training_data", type=str, nargs=1, help="The training data.")
parser.add_argument("levels", type=int, nargs=1, help="Number of downscaling blocks.")
parser.add_argument("n_features", type=int, nargs=1, help="Number of features in network.")

args = parser.parse_args()
training_data = args.training_data[0]
level = args.levels[0]
n_features = args.n_features[0]

################################################################################
# Train network
################################################################################

data = GpmData(training_data)
n = len(data)
training_data, validation_data = torch.utils.data.random_split(data, [int(0.9 * n), n - int(0.9 * n)])
training_loader = DataLoader(training_data, batch_size=16, shuffle=True)
validation_loader = DataLoader(validation_data, batch_size=16, shuffle=True)

quantiles = [0.05, 0.15, 0.25, 0.35, 0.45, 0.5, 0.55, 0.65, 0.75, 0.85, 0.95]
unet = UNet(13, 11, 64, 5)
qrnn = QRNN(13, quantiles, model = unet)

qrnn.train(training_loader, validation_loader, gpu = True, lr = 1e-2,  n_epochs=20)
qrnn.train(training_loader, validation_loader, gpu = True, lr = 5e-3,  n_epochs=20)
qrnn.train(training_loader, validation_loader, gpu = True, lr = 2e-3,  n_epochs=20)
qrnn.model.save("unet.pt")
Exemple #4
0
import argparse

parser = argparse.ArgumentParser(description='Train unet.')
parser.add_argument("training_data", type=str, nargs=1, help="The training data.")
parser.add_argument("levels", type=int, nargs=1, help="Number of downscaling blocks.")
parser.add_argument("n_features", type=int, nargs=1, help="Number of features in network.")

args = parser.parse_args()
training_data = args.training_data[0]
level = args.levels[0]
n_features = args.n_features[0]

################################################################################
# Train network
################################################################################

data = GpmData(training_data)
n = len(data)
training_data, validation_data = torch.utils.data.random_split(data, [int(0.9 * n), n - int(0.9 * n)])
training_loader = DataLoader(training_data, batch_size=32, shuffle=True)
validation_loader = DataLoader(validation_data, batch_size=32, shuffle=True)

skip_connection = "all"
quantiles = np.array([0.01, 0.05, 0.15, 0.25, 0.35, 0.45, 0.5,
                      0.55, 0.65, 0.75, 0.85, 0.95, 0.99])
unet = UNet(13, n_features=128, quantiles, skip_connection=skip_connection)
qrnn = QRNN(13, model=unet)

qrnn.train(training_loader, gpu = True, lr = 1e-2,  momentum=0.99)
qrnn.save("unet.pt")