示例#1
0
def feed_forward(
        state, data_shape, num_layers=2, activation=tf.nn.relu,
        mean_activation=None, stop_gradient=False, trainable=True, units=100,
        std=1.0, low=-1.0, high=1.0, dist='normal'):
    """Create a model returning unnormalized MSE distribution."""
    hidden = state
    if stop_gradient:
        hidden = tf.stop_gradient(hidden)
    for _ in range(num_layers):
        hidden = tf.compat.v1.layers.dense(hidden, units, activation)
    mean = tf.compat.v1.layers.dense(
        hidden, int(np.prod(data_shape)), mean_activation, trainable=trainable)
    mean = tf.reshape(mean, tools.shape(state)[:-1] + data_shape)
    if std == 'learned':
        std = tf.compat.v1.layers.dense(
            hidden, int(np.prod(data_shape)), None, trainable=trainable)
        std = tf.nn.softplus(std + 0.55) + 0.01
        std = tf.reshape(std, tools.shape(state)[:-1] + data_shape)
    if dist == 'normal':
        dist = tfd.Normal(mean, std)
    elif dist == 'truncated_normal':
        # https://www.desmos.com/calculator/3o96eyqxib
        dist = tfd.TruncatedNormal(mean, std, low, high)
    elif dist == 'tanh_normal':
        # https://www.desmos.com/calculator/sxpp7ectjv
        dist = tfd.Normal(mean, std)
        dist = tfd.TransformedDistribution(dist, tfp.bijectors.Tanh())
    elif dist == 'deterministic':
        dist = tfd.Deterministic(mean)
    else:
        raise NotImplementedError(dist)
    dist = tfd.Independent(dist, len(data_shape))
    return dist
示例#2
0
def test_Wilson(mc_samples):
    centric = np.random.randint(0, 2, 100).astype(np.float32)
    epsilon = np.random.randint(1, 6, 100).astype(np.float32)
    prior = WilsonPrior(centric, epsilon)
    F = np.random.random(100).astype(np.float32)
    probs = prior.prob(F)
    log_probs = prior.log_prob(F)
    assert np.all(np.isfinite(probs))
    assert np.all(np.isfinite(log_probs))

    #This part checks indexing and gradient numerics
    q = tfd.TruncatedNormal(  #<-- use this dist because wilson has positive support
        tf.Variable(prior.mean()),
        tfp.util.TransformedVariable(
            prior.stddev(),
            tfp.bijectors.Softplus(),
        ),
        low=1e-5,
        high=1e10,
    )
    with tf.GradientTape() as tape:
        z = q.sample(mc_samples)
        log_probs = prior.log_prob(z)
    grads = tape.gradient(log_probs, q.trainable_variables)

    assert np.all(np.isfinite(log_probs))
    for grad in grads:
        assert np.all(np.isfinite(grad))
示例#3
0
def ReferencePrior_test(p, ref, mc_samples):
    #This part checks indexing and gradient numerics
    q = tfd.TruncatedNormal(  #<-- use this dist because RW has positive support
        tf.Variable(Fobs),
        tfp.util.TransformedVariable(
            SigFobs,
            tfp.bijectors.Softplus(),
        ),
        low=1e-5,
        high=1e10,
    )
    with tf.GradientTape() as tape:
        z = q.sample(mc_samples)
        log_probs = p.log_prob(z)
    grads = tape.gradient(log_probs, q.trainable_variables)

    assert np.all(np.isfinite(log_probs))
    for grad in grads:
        assert np.all(np.isfinite(grad))
    assert np.all(log_probs.numpy()[..., ~observed] == 0.)

    #This tests that the observed values follow the correct distribution
    z = ref.sample(mc_samples)
    expected = ref.log_prob(z).numpy()[..., observed]
    result = p.log_prob(z).numpy()[..., observed]
    assert np.allclose(expected, result, atol=1e-5)
示例#4
0
def test_laue(likelihood_model, prior_model, scaling_model, laue_inputs,
              mc_samples):
    nrefls = np.max(BaseModel.get_refl_id(laue_inputs)) + 1
    n_images = np.max(BaseModel.get_image_id(laue_inputs)) + 1

    #For the students
    dof = 4.
    if likelihood_model == StudentTLikelihood:
        likelihood = likelihood_model(dof)
    else:
        likelihood = likelihood_model()

    if prior_model == WilsonPrior:
        prior = prior_model(
            np.random.choice([True, False], nrefls),
            np.ones(nrefls).astype('float32'),
        )
    elif prior_model == StudentTReferencePrior:
        prior = prior_model(
            np.ones(nrefls).astype('float32'),
            np.ones(nrefls).astype('float32'), dof)
    else:
        prior = prior_model(
            np.ones(nrefls).astype('float32'),
            np.ones(nrefls).astype('float32'),
        )

    mlp_scaler = MLPScaler(2, 3)
    if scaling_model == HybridImageScaler:
        image_scaler = ImageScaler(n_images)
        scaler = HybridImageScaler(mlp_scaler, image_scaler)
    elif scaling_model == MLPScaler:
        scaler = mlp_scaler

    surrogate_posterior = tfd.TruncatedNormal(
        tf.Variable(prior.mean()),
        tfp.util.TransformedVariable(
            prior.stddev() / 10.,
            tfb.Softplus(),
        ),
        low=1e-5,
        high=1e10,
    )

    merger = VariationalMergingModel(surrogate_posterior, prior, likelihood,
                                     scaler, mc_samples)
    ipred = merger(laue_inputs)

    isfinite = np.all(np.isfinite(ipred.numpy()))
    assert isfinite

    merger = VariationalMergingModel(surrogate_posterior, prior, likelihood,
                                     scaler)
    merger.compile('Adam')
示例#5
0
 def __init__(self, data, options):
     self.kernel = options.kernel
     self.options = options
     self.τ = data.τ
     self.N_p = data.τ.shape[0]
     self.num_tfs = data.f_obs.shape[1]
     t_1, t_2 = get_time_square(self.τ, self.N_p)
     self.t_dist = t_1 - t_2
     self.tt = t_1 * t_2
     self.t2 = tf.square(t_1)
     self.tprime2 = tf.square(t_2)
     self.fixed_dist = FixedDistribution(
         tf.ones(self.num_tfs, dtype='float64'))
     min_dist = min(data.t[1:] - data.t[:-1])
     min_dist = max(min_dist, 1.)
     self._ranges = {
         'rbf': [
             (f64(1e-4), f64(5)),  #1+max(np.var(data.f_obs, axis=2))
             (f64(min_dist**2) - 1.2, f64(data.t[-1]**2))
         ],
         'mlp': [(f64(1), f64(10)), (f64(3.5), f64(20))],
     }
     self._priors = {
         'rbf': [
             tfd.Uniform(f64(1), f64(20)),
             tfd.Uniform(f64(min_dist**2), f64(10))
         ],
         'mlp': [
             tfd.Uniform(f64(3.5), f64(10)),
             tfd.InverseGamma(f64(0.01), f64(0.01))
         ],
     }
     v_prop = lambda v: tfd.TruncatedNormal(v, 0.007, low=0, high=100)
     l2_prop = lambda l2: tfd.TruncatedNormal(l2, 0.007, low=0, high=100)
     proposals = [v_prop, l2_prop]
     self._proposals = {
         'rbf': proposals,
     }
     self._names = {'rbf': ['v', 'l2'], 'mlp': ['w', 'b']}
示例#6
0
def feed_forward(
    features, data_shape, num_layers=2, activation=tf.nn.relu,
    mean_activation=None, stop_gradient=False, trainable=True, units=100,
    std=1.0, low=-1.0, high=1.0, dist='normal', min_std=1e-2, init_std=1.0):
  hidden = features
  if stop_gradient:
    hidden = tf.stop_gradient(hidden)
  for _ in range(num_layers):
    hidden = tf.layers.dense(hidden, units, activation, trainable=trainable)
  mean = tf.layers.dense(
      hidden, int(np.prod(data_shape)), mean_activation, trainable=trainable)
  mean = tf.reshape(mean, tools.shape(features)[:-1] + data_shape)
  if std == 'learned':
    std = tf.layers.dense(
        hidden, int(np.prod(data_shape)), None, trainable=trainable)
    init_std = np.log(np.exp(init_std) - 1)
    std = tf.nn.softplus(std + init_std) + min_std
    std = tf.reshape(std, tools.shape(features)[:-1] + data_shape)
  if dist == 'normal':
    dist = tfd.Normal(mean, std)
    dist = tfd.Independent(dist, len(data_shape))
  elif dist == 'deterministic':
    dist = tfd.Deterministic(mean)
    dist = tfd.Independent(dist, len(data_shape))
  elif dist == 'binary':
    dist = tfd.Bernoulli(mean)
    dist = tfd.Independent(dist, len(data_shape))
  elif dist == 'trunc_normal':
    # https://www.desmos.com/calculator/rnksmhtgui
    dist = tfd.TruncatedNormal(mean, std, low, high)
    dist = tfd.Independent(dist, len(data_shape))
  elif dist == 'tanh_normal':
    # https://www.desmos.com/calculator/794s8kf0es
    dist = distributions.TanhNormal(mean, std)
  elif dist == 'tanh_normal_tanh':
    # https://www.desmos.com/calculator/794s8kf0es
    mean = 5.0 * tf.tanh(mean / 5.0)
    dist = distributions.TanhNormal(mean, std)
  elif dist == 'onehot_score':
    dist = distributions.OneHot(mean, gradient='score')
  elif dist == 'onehot_straight':
    dist = distributions.OneHot(mean, gradient='straight')
  else:
    raise NotImplementedError(dist)
  return dist
示例#7
0
文件: mh.py 项目: mossjacob/trangp
    def __init__(self, data, options):
        self.data = data
        min_dist = min(data.t[1:] - data.t[:-1])
        self.N_p = data.τ.shape[0]
        self.N_m = data.t.shape[0]  # Number of observations
        self.num_replicates = data.f_obs.shape[0]
        self.num_tfs = data.f_obs.shape[1]
        self.num_genes = data.m_obs.shape[1]

        self.kernel_selector = GPKernelSelector(data, options)
        self.likelihood = TranscriptionLikelihood(data, options)
        self.options = options
        # Adaptable variances
        a = tf.constant(-0.5, dtype='float64')
        b2 = tf.constant(2., dtype='float64')
        self.h_f = 0.15 * tf.ones(self.N_p, dtype='float64')

        # Interaction weights
        w_0 = Parameter('w_0',
                        tfd.Normal(0, 2),
                        np.zeros(self.num_genes),
                        step_size=0.2 *
                        tf.ones(self.num_genes, dtype='float64'))
        w_0.proposal_dist = lambda mu, j: tfd.Normal(mu, w_0.step_size[j])
        w = Parameter('w',
                      tfd.Normal(0, 2),
                      1 * np.ones((self.num_genes, self.num_tfs)),
                      step_size=0.2 * tf.ones(self.num_genes, dtype='float64'))
        w.proposal_dist = lambda mu, j: tfd.Normal(mu, w.step_size[
            j])  #) w_j) # At the moment this is the same as w_j0 (see pg.8)
        # Latent function
        fbar = Parameter(
            'fbar', self.fbar_prior, 0.5 * np.ones(
                (self.num_replicates, self.num_tfs, self.N_p)))

        # GP hyperparameters
        V = Parameter('V',
                      tfd.InverseGamma(f64(0.01), f64(0.01)),
                      f64(1),
                      step_size=0.05,
                      fixed=not options.tf_mrna_present)
        V.proposal_dist = lambda v: tfd.TruncatedNormal(
            v, V.step_size, low=0, high=100
        )  #v_i Fix to 1 if translation model is not used (pg.8)
        L = Parameter('L',
                      tfd.Uniform(f64(min_dist**2 - 0.5), f64(data.t[-1]**2)),
                      f64(4),
                      step_size=0.05)  # TODO auto set
        L.proposal_dist = lambda l2: tfd.TruncatedNormal(
            l2, L.step_size, low=0, high=100)  #l2_i

        # Translation kinetic parameters
        δbar = Parameter('δbar', tfd.Normal(a, b2), f64(-0.3), step_size=0.05)
        δbar.proposal_dist = lambda mu: tfd.Normal(mu, δbar.step_size)
        # White noise for genes
        σ2_m = Parameter('σ2_m',
                         tfd.InverseGamma(f64(0.01), f64(0.01)),
                         1e-4 * np.ones(self.num_genes),
                         step_size=0.01)
        σ2_m.proposal_dist = lambda mu: tfd.TruncatedNormal(
            mu, σ2_m.step_size, low=0, high=0.1)
        # Transcription kinetic parameters
        constraint_index = 2 if self.options.initial_conditions else 1

        def constrain_kbar(kbar, gene):
            '''Constrains a given row in kbar'''
            # if gene == 3:
            #     kbar[constraint_index] = np.log(0.8)
            #     kbar[constraint_index+1] = np.log(1.0)
            kbar[kbar < -10] = -10
            kbar[kbar > 3] = 3
            return kbar

        num_var = 4 if self.options.initial_conditions else 3
        kbar_initial = -0.1 * np.ones(
            (self.num_genes, num_var), dtype='float64')
        for j, k in enumerate(kbar_initial):
            kbar_initial[j] = constrain_kbar(k, j)
        kbar = Parameter('kbar',
                         tfd.Normal(a, b2),
                         kbar_initial,
                         constraint=constrain_kbar,
                         step_size=0.05 * tf.ones(num_var, dtype='float64'))
        kbar.proposal_dist = lambda mu: tfd.MultivariateNormalDiag(
            mu, kbar.step_size)

        if not options.preprocessing_variance:
            σ2_f = Parameter('σ2_f',
                             tfd.InverseGamma(f64(0.01), f64(0.01)),
                             1e-4 * np.ones(self.num_tfs),
                             step_size=tf.constant(0.5, dtype='float64'))
            super().__init__(
                TupleParams_pre(fbar, δbar, kbar, σ2_m, w, w_0, L, V, σ2_f))
        else:
            super().__init__(TupleParams(fbar, δbar, kbar, σ2_m, w, w_0, L, V))
from tensorflow_probability import distributions as tfd
from scipy.stats.distributions import truncnorm
import numpy as np


class TruncNorm():
    @staticmethod
    def posterior(loc, scale, low=0., high=1e32):
        return truncnorm(low, high, loc, scale)


if __name__ == "__main__":

    X = np.linspace(0., 10., 1000)
    n = 10
    loc, scale = np.random.random(size=(2, n))

    expected = tfd.TruncatedNormal(loc[:, None], scale[:, None], 0.,
                                   1e32).log_prob(X).numpy()
    test = TruncNorm.posterior(loc[:, None], scale[:, None]).logpdf(X)

    from IPython import embed
    embed()