def test_y_noise_reg(): x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape( (10, 3)) y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape( (10, 3)) noise = NormalizingFlowNetwork( 3, n_flows=3, hidden_sizes=(16, 16), trainable_base_dist=True, noise_reg=("fixed_rate", 1.0), ) noise.fit(x_train, y_train, epochs=10, verbose=0) input_model = noise._get_input_model() # y_input should not include randomness during evaluation y1 = input_model(y_train, training=False).numpy() y2 = input_model(y_train, training=False).numpy() assert np.all(y1 == y2) # loss should include randomness during learning y1 = input_model(y_train, training=True).numpy() y2 = input_model(y_train, training=True).numpy() assert not np.all(y1 == y2)
def test_mle_score(): x_train = np.linspace(-1, 1, 10).reshape((10, 1)) y_train = np.linspace(-1, 1, 10).reshape((10, 1)) mle = NormalizingFlowNetwork(1, n_flows=0, hidden_sizes=(6, 6), trainable_base_dist=True) mle.fit(x_train, y_train, epochs=10, verbose=0) # deterministic, so should be the same assert mle_log_likelihood_score(DummyWrapper(mle), x_train, y_train) == pytest.approx( -mle.evaluate(x_train, y_train) )
def test_ml_nf_fitting(): m1 = NormalizingFlowNetwork(1, n_flows=3, hidden_sizes=(10, 10), trainable_base_dist=True) on_sinusoidal_gaussian_testing(m1, 0.45) m2 = NormalizingFlowNetwork(1, n_flows=3, hidden_sizes=(10, 10), trainable_base_dist=True) on_bimodal_gaussian_testing(m2, 0.1012)
def test_bayesian_score(): # sinusoidal data with heteroscedastic noise x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1)) noise = tfp.distributions.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train), scale_diag=abs(x_train)) y_train = noise.sample().numpy() mle = NormalizingFlowNetwork(1, n_flows=0, hidden_sizes=(6, 6), trainable_base_dist=True) mle.fit(x_train, y_train, epochs=20, verbose=0) mle.map_mode = False # deterministic, so should be the same # mle furthermore has no regularisation loss / KL-divs added, therefore evaluate and nll are the same assert bayesian_log_likelihood_score( DummyWrapper(mle), x_train, y_train) == pytest.approx(-mle.evaluate(x_train, y_train)) be = BayesNormalizingFlowNetwork( n_dims=1, kl_weight_scale=1.0 / x_train.shape[0], n_flows=0, hidden_sizes=(6, 6), trainable_base_dist=True, ) be.fit(x_train, y_train, epochs=200, verbose=0) score = bayesian_log_likelihood_score(DummyWrapper(be), x_train, y_train) loss = sum([be.evaluate(x_train, y_train) for _ in range(50)]) / 50 # as the loss has the KL div to the prior added to it, it's negative has to be smaller than the nll score assert score > -loss
def test_ml_dims(): # test 1 D for model in [ NormalizingFlowNetwork(1, n_flows=1, hidden_sizes=(2, 2), trainable_base_dist=False), MixtureDensityNetwork(1, n_centers=2, hidden_sizes=(2, 2)), KernelMixtureNetwork(1, n_centers=3, hidden_sizes=(2, 2)), ]: model_output_dims_1d_testing(model) # test 3 D for model in [ NormalizingFlowNetwork(3, n_flows=1, hidden_sizes=(2, 2), trainable_base_dist=True), MixtureDensityNetwork(3, n_centers=2, hidden_sizes=(2, 2)), KernelMixtureNetwork(3, n_centers=3, hidden_sizes=(2, 2)), ]: model_output_dims_3d_testing(model)
def test_dense_layer_generation(): layers = NormalizingFlowNetwork(1)._get_dense_layers(hidden_sizes=(2, 2, 2), output_size=2, activation="linear") assert len(layers) == 6
def test_x_noise_reg(): x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1)) noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train), scale_diag=abs(x_train)) y_train = noise.sample().numpy() too_much_noise = NormalizingFlowNetwork( 1, n_flows=2, hidden_sizes=(16, 16), noise_reg=("fixed_rate", 3.0), trainable_base_dist=True, ) too_much_noise.fit(x_train, y_train, epochs=700, verbose=0) x_test = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1)) noise = tfd.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_test), scale_diag=abs(x_test)) y_test = noise.sample().numpy() out1 = too_much_noise.pdf(x_test, y_test).numpy() out2 = too_much_noise.pdf(x_test, y_test).numpy() # making sure that the noise regularisation is deactivated in testing mode assert all(out1 == out2) little_noise = NormalizingFlowNetwork( 1, n_flows=2, hidden_sizes=(16, 16), noise_reg=("rule_of_thumb", 0.1), trainable_base_dist=True, ) little_noise.fit(x_train, y_train, epochs=700, verbose=0) little_noise_score = tf.reduce_sum(little_noise.pdf(x_test, y_test)) / 700.0 too_much_noise_score = tf.reduce_sum(too_much_noise.pdf(x_test, y_test)) / 700.0 assert little_noise_score > too_much_noise_score