def test_map_mode(): x_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape( (10, 3)) y_train = np.linspace([[-1]] * 3, [[1]] * 3, 10, dtype=np.float32).reshape( (10, 3)) map_model = BayesNormalizingFlowNetwork( 3, kl_weight_scale=1.0 / x_train.shape[0], n_flows=3, hidden_sizes=(16, 16), trainable_base_dist=True, noise_reg=("rule_of_thumb", 1.0), map_mode=True, ) map_model.fit(x_train, y_train, epochs=10, verbose=0) assert map_model.evaluate(x_train, y_train) == map_model.evaluate(x_train, y_train) bayes_model = BayesNormalizingFlowNetwork( 3, kl_weight_scale=1.0 / x_train.shape[0], n_flows=3, hidden_sizes=(16, 16), trainable_base_dist=True, noise_reg=("rule_of_thumb", 1.0), map_mode=False, ) bayes_model.fit(x_train, y_train, epochs=10, verbose=0) assert bayes_model.evaluate(x_train, y_train) != bayes_model.evaluate( x_train, y_train)
def test_bayesian_score(): # sinusoidal data with heteroscedastic noise x_train = np.linspace(-3, 3, 300, dtype=np.float32).reshape((300, 1)) noise = tfp.distributions.MultivariateNormalDiag(loc=5 * tf.math.sin(2 * x_train), scale_diag=abs(x_train)) y_train = noise.sample().numpy() mle = NormalizingFlowNetwork(1, n_flows=0, hidden_sizes=(6, 6), trainable_base_dist=True) mle.fit(x_train, y_train, epochs=20, verbose=0) mle.map_mode = False # deterministic, so should be the same # mle furthermore has no regularisation loss / KL-divs added, therefore evaluate and nll are the same assert bayesian_log_likelihood_score( DummyWrapper(mle), x_train, y_train) == pytest.approx(-mle.evaluate(x_train, y_train)) be = BayesNormalizingFlowNetwork( n_dims=1, kl_weight_scale=1.0 / x_train.shape[0], n_flows=0, hidden_sizes=(6, 6), trainable_base_dist=True, ) be.fit(x_train, y_train, epochs=200, verbose=0) score = bayesian_log_likelihood_score(DummyWrapper(be), x_train, y_train) loss = sum([be.evaluate(x_train, y_train) for _ in range(50)]) / 50 # as the loss has the KL div to the prior added to it, it's negative has to be smaller than the nll score assert score > -loss