def __init__(self, a, theta, alpha, beta, validate_args=False, allow_nan_stats=True, name='Amoroso'): parameters = dict(locals()) with tf.name_scope(name) as name: self._a = tensor_util.convert_nonref_to_tensor(a) self._theta = tensor_util.convert_nonref_to_tensor(theta) self._alpha = tensor_util.convert_nonref_to_tensor(alpha) self._beta = tensor_util.convert_nonref_to_tensor(beta) gamma = tfd.Gamma(alpha, 1.) chain = tfb.Invert( tfb.Chain([ tfb.Exp(), tfb.Scale(beta), tfb.Shift(-tf.math.log(theta)), tfb.Log(), tfb.Shift(-a), ])) super().__init__(distribution=gamma, bijector=chain, validate_args=validate_args, parameters=parameters, name=name)
def __init__(self, *args, **kwargs): """Initialize PositiveContinuousRV. Developer Note -------------- The inverse of the exponential bijector is the log bijector. """ super().__init__(*args, **kwargs) self._transformed_distribution = tfd.TransformedDistribution( distribution=self._distribution, bijector=bijectors.Invert(bijectors.Exp()))
def test_transformed_executor_logp_tensorflow(transformed_model): norm_log = tfd.TransformedDistribution(tfd.HalfNormal(1), bij.Invert(bij.Exp())) _, state = pm.evaluate_model_transformed(transformed_model(), values=dict(__log_n=-math.pi)) np.testing.assert_allclose( state.collect_log_prob(), norm_log.log_prob(-math.pi), equal_nan=False ) _, state = pm.evaluate_model_transformed(transformed_model(), values=dict(n=math.exp(-math.pi))) np.testing.assert_allclose( state.collect_log_prob(), norm_log.log_prob(-math.pi), equal_nan=False )
def german_credit_model(): x_numeric = tf.constant(numericals.astype(np.float32)) x_categorical = [tf.one_hot(c, c.max() + 1) for c in categoricals] all_x = tf.concat([x_numeric] + x_categorical, 1) num_features = int(all_x.shape[1]) overall_log_scale = ed.Normal(loc=0., scale=10., name='overall_log_scale') beta_log_scales = ed.TransformedDistribution( tfd.Gamma(0.5 * tf.ones([num_features]), 0.5), bijector=tfb.Invert(tfb.Exp()), name='beta_log_scales') beta = ed.Normal(loc=tf.zeros([num_features]), scale=tf.exp(overall_log_scale + beta_log_scales), name='beta') logits = tf.einsum('nd,md->mn', all_x, beta[tf.newaxis, :]) return ed.Bernoulli(logits=logits, name='y')
def __init__(self): # NOTE: We actually need the inverse to match PyMC3, do we? self._transform = tfb.Exp()
def __init__(self, upper_limit): transform = tfb.Chain( [tfb.Shift(upper_limit), tfb.Scale(-1), tfb.Exp()]) super().__init__(transform)
def __init__(self, lower_limit): transform = tfb.Chain([tfb.Shift(lower_limit), tfb.Exp()]) super().__init__(transform)
def __init__(self): # NOTE: We actually need the inverse to match PyMC3, do we? transform = tfb.Exp() super().__init__(transform)
n_T = 6 K = 3 model = create_model(n_C=n_C, n_T=n_T, K=K) s = model.sample() s model.log_prob(s) # model.sample(2) # FIXME bijectors = [ tfb.Sigmoid(), # p tfb.Sigmoid(), # gamma_C tfb.Sigmoid(), # gamma_T tfb.SoftmaxCentered(), # eta_C tfb.SoftmaxCentered(), # eta_T tfb.Identity(), # loc tfb.Exp() # sigma_sq ] d1 = util.read_data('../../data/TGFBR2/cytof-data/donor1.csv', 'CD16', 2000, 2) model = create_model(n_C=d1['y_C'].shape[0], n_T=d1['y_T'].shape[0], K=5) _ = model.sample() def target_log_prob_fn(p, gamma_C, gamma_T, eta_C, eta_T, loc, sigma_sq): return model.log_prob(p=p, gamma_C=gamma_C, gamma_T=gamma_T, eta_C=eta_C, eta_T=eta_T, loc=loc, sigma_sq=sigma_sq,
I0_logp = tfd.Gamma(concentration=tf.constant(1.5, dtype=DTYPE), rate=tf.constant(0.05, dtype=DTYPE)).log_prob(p['I0']) r_logp = tfd.Gamma(concentration=tf.constant(0.1, dtype=DTYPE), rate=tf.constant(0.1, dtype=DTYPE)).log_prob(p['gamma']) state_init = simulator.create_initial_state(init_matrix=seeding * p['I0']) t, sim, solve = simulator.simulate(p, state_init) y_logp = covid19uk_logp(y_incr, sim, 0.1, p['r']) logp = beta_logp + beta3_logp + gamma_logp + I0_logp + r_logp + tf.reduce_sum(y_logp) return logp def trace_fn(_, pkr): return ( pkr.inner_results.log_accept_ratio, pkr.inner_results.accepted_results.target_log_prob, pkr.inner_results.accepted_results.step_size) unconstraining_bijector = [tfb.Exp()] initial_mcmc_state = np.array([0.05, 1.0, 0.25, 1.0, 50.], dtype=np.float64) # beta1, gamma, I0 print("Initial log likelihood:", logp(initial_mcmc_state)) @tf.function(autograph=False, experimental_compile=True) def sample(n_samples, init_state, scale, num_burnin_steps=0): return tfp.mcmc.sample_chain( num_results=n_samples, num_burnin_steps=num_burnin_steps, current_state=init_state, kernel=tfp.mcmc.TransformedTransitionKernel( inner_kernel=tfp.mcmc.RandomWalkMetropolis( target_log_prob_fn=logp, new_state_fn=random_walk_mvnorm_fn(scale) ), bijector=unconstraining_bijector),
class PositiveContinuousRV(RandomVariable): _bijector = bijectors.Exp()
tf.zeros(ncomponents, dtype, name='mu'), tf.ones(ncomponents, dtype, name='sigma') * 0.1, tf.ones([], dtype, name='alpha') * 0.5, tf.fill(ncomponents - 1, value=np.float64(0.5), name='v') ] # Create bijectors to transform unconstrained to and from constrained parameters-space. # For example, if X ~ Exponential(theta), then X is constrained to be positive. A transformation # that puts X onto an unconstrained space is Y = log(X). In that case, the bijector used # should be the **inverse-transform**, which is exp(.) (i.e. so that X = exp(Y)). # # NOTE: Define the inverse-transforms for each parameter in sequence. bijectors = [ tfb.Identity(), # mu tfb.Exp(), # sigma tfb.Exp(), # alpha tfb.Sigmoid() # v ] # ## HMC # In[16]: # Define HMC sampler. @tf.function(autograph=False, experimental_compile=True) def hmc_sample(num_results, num_burnin_steps, current_state,