def test_robust_max_multiclass_symmetric(num_classes, num_points, tol, epsilon): """ This test is based on the observation that for symmetric inputs the class predictions must have equal probability. """ rng = np.random.RandomState(1) p = 1. / num_classes F = tf.ones((num_points, num_classes), dtype=default_float()) Y = tf.convert_to_tensor(rng.randint(num_classes, size=(num_points, 1)), dtype=default_float()) likelihood = MultiClass(num_classes) likelihood.invlink.epsilon = tf.convert_to_tensor(epsilon, dtype=default_float()) mu, _ = likelihood.predict_mean_and_var(F, F) pred = likelihood.predict_density(F, F, Y) variational_expectations = likelihood.variational_expectations(F, F, Y) expected_mu = (p * (1. - epsilon) + (1. - p) * epsilon / (num_classes - 1)) * np.ones((num_points, 1)) expected_log_density = np.log(expected_mu) # assert_allclose() would complain about shape mismatch assert (np.allclose(mu, expected_mu, tol, tol)) assert (np.allclose(pred, expected_log_density, 1e-3, 1e-3)) validation_variational_expectation = (p * np.log(1. - epsilon) + (1. - p) * np.log(epsilon / (num_classes - 1))) assert_allclose( variational_expectations, np.ones((num_points, 1)) * validation_variational_expectation, tol, tol)
def test_robust_max_multiclass_predict_log_density( num_classes, num_points, mock_prob, expected_prediction, tol, epsilon ): class MockRobustMax(RobustMax): def prob_is_largest(self, Y, Fmu, Fvar, gh_x, gh_w): return tf.ones((num_points, 1), dtype=default_float()) * mock_prob likelihood = MultiClass(num_classes, invlink=MockRobustMax(num_classes, epsilon)) F = tf.ones((num_points, num_classes)) rng = np.random.RandomState(1) Y = to_default_int(rng.randint(num_classes, size=(num_points, 1))) prediction = likelihood.predict_log_density(F, F, Y) assert_allclose(prediction, expected_prediction, tol, tol)
def test_multiclass(self): K = 3 lik = MultiClass(K) N, Ns, D_Y = self.X.shape[0], self.Xs.shape[0], self.D_Y Y = np.random.choice([0., 1., 2.], N * 1).reshape(N, 1) Ys = np.random.choice([0., 1., 2.], Ns * 1).reshape(Ns, 1) for L in [1, 2]: self.compare_to_single_layer(Y, Ys, lik, L, num_outputs=K)
def make_dgp(num_layers, X, Y, Z): kernels = [RBF(variance=2.0, lengthscales=2.0)] layer_sizes = [784] for l in range(num_layers - 1): kernels.append(RBF(variance=2.0, lengthscales=2.0)) layer_sizes.append(30) model = DeepGP(X, Y, Z, kernels, layer_sizes, MultiClass(10), num_outputs=10) # init hidden layers to be near deterministic for layer in model.layers[:-1]: layer.q_sqrt.assign(layer.q_sqrt * 1e-5) return model
LikelihoodSetup(Ordinal(np.array([-1, 1])), Y=tf.random.uniform(Datum.Yshape, 0, 3, dtype=default_int())), LikelihoodSetup(Poisson(invlink=tf.square), Y=tf.random.poisson(Datum.Yshape, 1.0, dtype=default_float())), LikelihoodSetup(Exponential(invlink=tf.square), Y=tf.random.uniform(Datum.Yshape, dtype=default_float())), LikelihoodSetup(Gamma(invlink=tf.square), Y=tf.random.uniform(Datum.Yshape, dtype=default_float())), LikelihoodSetup(Bernoulli(invlink=tf.sigmoid), Y=tf.random.uniform(Datum.Yshape, dtype=default_float())), pytest.param(LikelihoodSetup(MultiClass(2), Y=tf.argmax(Datum.Y, 1).numpy().reshape(-1, 1), rtol=1e-3, atol=1e-3), marks=pytest.mark.skip), ] def get_likelihood(likelihood_setup): if not isinstance(likelihood_setup, LikelihoodSetup): # pytest.param() likelihood_setup, = likelihood_setup.values return likelihood_setup.likelihood
Exponential(invlink=tf.square), Y=tf.random.uniform(Datum.Yshape, dtype=default_float()), ), LikelihoodSetup( Gamma(invlink=tf.square), Y=tf.random.uniform(Datum.Yshape, dtype=default_float()), ), LikelihoodSetup( Bernoulli(invlink=tf.sigmoid), Y=tf.random.uniform(Datum.Yshape, dtype=default_float()), ), ] likelihood_setups = scalar_likelihood_setups + [ LikelihoodSetup( MultiClass(3), Y=tf.argmax(Datum.Y, 1).numpy().reshape(-1, 1), rtol=1e-3, atol=1e-3, ), ] def filter_analytic_scalar_likelihood(method_name): assert method_name in ( "_variational_expectations", "_predict_log_density", "_predict_mean_and_var", ) def is_analytic(likelihood):