def test_scalar_priors_broadcast(self): batch_shape = [4, 3] num_timesteps = 10 num_features = 2 design_matrix = self._build_placeholder( np.random.randn(*(batch_shape + [num_timesteps, num_features]))) # Build a model with scalar Normal(0., 1.) prior. linear_regression = LinearRegression( design_matrix=design_matrix, weights_prior=tfd.Normal(loc=self._build_placeholder(0.), scale=self._build_placeholder(1.))) weights_prior = linear_regression.parameters[0].prior self.assertAllEqual([num_features], self.evaluate(weights_prior.event_shape_tensor())) self.assertAllEqual(batch_shape, self.evaluate(weights_prior.batch_shape_tensor())) prior_sampled_weights = weights_prior.sample() ssm = linear_regression.make_state_space_model( num_timesteps=num_timesteps, param_vals={"weights": prior_sampled_weights}) lp = ssm.log_prob(ssm.sample()) self.assertAllEqual(batch_shape, self.evaluate(lp).shape)
def test_basic_statistics(self): # Verify that this model constructs a distribution with mean # `matmul(design_matrix, weights)` and stddev 0. batch_shape = [4, 3] num_timesteps = 10 num_features = 2 design_matrix = self._build_placeholder( np.random.randn(*(batch_shape + [num_timesteps, num_features]))) linear_regression = LinearRegression(design_matrix=design_matrix) true_weights = self._build_placeholder( np.random.randn(*(batch_shape + [num_features]))) predicted_time_series = tf.linalg.matmul(design_matrix, true_weights[..., tf.newaxis]) ssm = linear_regression.make_state_space_model( num_timesteps=num_timesteps, param_vals={"weights": true_weights}) self.assertAllEqual(self.evaluate(ssm.mean()), predicted_time_series) self.assertAllEqual( *self.evaluate((ssm.stddev(), tf.zeros_like(predicted_time_series))))
def test_custom_weights_prior(self): batch_shape = [4, 3] num_timesteps = 10 num_features = 2 design_matrix = self._build_placeholder( np.random.randn(*(batch_shape + [num_timesteps, num_features]))) # Build a model with scalar Exponential(1.) prior. linear_regression = LinearRegression( design_matrix=design_matrix, weights_prior=tfd.Exponential( rate=self._build_placeholder(np.ones(batch_shape)))) # Check that the prior is broadcast to match the shape of the weights. weights = linear_regression.parameters[0] self.assertAllEqual([num_features], self.evaluate(weights.prior.event_shape_tensor())) self.assertAllEqual(batch_shape, self.evaluate(weights.prior.batch_shape_tensor())) prior_sampled_weights = weights.prior.sample() ssm = linear_regression.make_state_space_model( num_timesteps=num_timesteps, param_vals={"weights": prior_sampled_weights}) lp = ssm.log_prob(ssm.sample()) self.assertAllEqual(batch_shape, self.evaluate(lp).shape) # Verify that the bijector enforces the prior constraint that # weights must be nonnegative. self.assertAllFinite( self.evaluate( weights.prior.log_prob( weights.bijector( tf.random.normal(tf.shape(weights.prior.sample(64)), seed=test_util.test_seed(), dtype=self.dtype)))))