Exemplo n.º 1
0
def sample(z, size, c1=None, c2=None, c3=None, c4=None, c5=None):
    if c1 is not None:
        z_con1 = np.array([c1] * size)
        z_con1 = np.reshape(z_con1, [size, 1])
    else:
        z_con1 = tfd.Uniform(low=-1.0, high=1.0).sample([size, 1])
    if c2 is not None:
        z_con2 = np.array([c2] * size)
        z_con2 = np.reshape(z_con2, [size, 1])
    else:
        z_con2 = tfd.Uniform(low=-1.0, high=1.0).sample([size, 1])
    if c3 is not None:
        z_con3 = np.array([c3] * size)
        z_con3 = np.reshape(z_con3, [size, 1])
    else:
        z_con3 = tfd.Uniform(low=-1.0, high=1.0).sample([size, 1])
    if c4 is not None:
        z_con4 = np.array([c4] * size)
        z_con4 = np.reshape(z_con4, [size, 1])
    else:
        z_con4 = tfd.Uniform(low=-1.0, high=1.0).sample([size, 1])
    if c5 is not None:
        z_con5 = np.array([c5] * size)
        z_con5 = np.reshape(z_con5, [size, 1])
    else:
        z_con5 = tfd.Uniform(low=-1.0, high=1.0).sample([size, 1])
    noise = tf.concat([z, z_con1, z_con2, z_con3, z_con4, z_con5], axis=-1)
    return noise
Exemplo n.º 2
0
def sample(size, cat=-1, c1=None, c2=None):
    z = tfd.Uniform(low=-1.0, high=1.0).sample([size, 62])

    if c1 is not None:
        z_con1 = np.array([c1] * size)
        z_con1 = np.reshape(z_con1, [size, 1])
    else:
        z_con1 = tfd.Uniform(low=-1.0, high=1.0).sample([size, 1])

    if c2 is not None:
        z_con2 = np.array([c2] * size)
        z_con2 = np.reshape(z_con2, [size, 1])
    else:
        z_con2 = tfd.Uniform(low=-1.0, high=1.0).sample([size, 1])

    if cat >= 0:
        z_cat = np.array([cat] * size)
        z_cat = tf.one_hot(z_cat, 10)
    else:
        z_cat = tfd.Categorical(probs=tf.ones([10]) * 0.1).sample([
            size,
        ])
        z_cat = tf.one_hot(z_cat, 10)

    noise = tf.concat([z, z_con1, z_con2, z_cat], axis=-1)

    return noise, z_con1, z_con2, z_cat
Exemplo n.º 3
0
def setup_and_run_hmc(threadid):
    np.random.seed(threadid)
    tf.random.set_seed(threadid)

    def sp(x):
        # softplus transform with shift
        return tf.nn.softplus(x)  #+1e-4

    def periodic_kernel(x1, x2):
        # periodic kernel with single variable parameter. Other parameters are set
        # to encode daily activity pattern (period=1) and lengthscale of 30 minutes
        return tfp.math.psd_kernels.ExpSinSquared(x1, x2, np.float64(1.0))

    # initial value of kernel amplitude
    aparams_init = [0.0, 1.0, 0.1]

    lparams_init = [0.0]

    # transform for parameter to ensure positive
    atransforms = [sp, sp]

    # prior distribution on parameter
    apriors = [
        tfd.Uniform(low=np.float64(-100.), high=np.float64(100.0)),
        tfd.Normal(loc=np.float64(0.), scale=np.float64(10.)),
        tfd.Normal(loc=np.float64(0.), scale=np.float64(10.))
    ]

    lpriors = [tfd.Uniform(low=np.float64(-100.), high=np.float64(100.0))]

    # create the model
    mover = moveNS(T,
                   X,
                   Z,
                   BATCH_SIZE=1000,
                   lparams_init=lparams_init,
                   lpriors=lpriors,
                   akernel=periodic_kernel,
                   aparams_init=aparams_init,
                   apriors=apriors,
                   atransforms=atransforms)

    start = time.time()

    # sample from the posterior
    #mover.hmc_sample(num_samples=2000, skip=0, burn_in=1000)
    mover.hmc_sample(num_samples=2000, skip=0, burn_in=1000)
    end = time.time()

    z_len = 720

    Zp = T[:
           z_len]  # process the samples and save them for the amplitude kernel
    amps = np.power(mover.get_amplitude_samples(X=Zp), 2)
    np.save('data/amps_periodic_' + str(threadid) + '.npy', amps)
    print(threadid, end - start)
Exemplo n.º 4
0
    def _model():
        p = yield Root(tfd.Beta(dtype(1), dtype(1), name="p"))
        gamma_C = yield Root(tfd.Beta(dtype(1), dtype(1), name="gamma_C"))
        gamma_T = yield Root(tfd.Beta(dtype(1), dtype(1), name="gamma_T"))
        eta_C = yield Root(tfd.Dirichlet(np.ones(K, dtype=dtype) / K,
                                         name="eta_C"))
        eta_T = yield Root(tfd.Dirichlet(np.ones(K, dtype=dtype) / K,
                                         name="eta_T"))
        loc = yield Root(tfd.Sample(tfd.Normal(dtype(0), dtype(1)),
                                    sample_shape=K, name="loc"))
        nu = yield Root(tfd.Sample(tfd.Uniform(dtype(10), dtype(50)),
                                   sample_shape=K, name="nu"))
        phi = yield Root(tfd.Sample(tfd.Normal(dtype(m_phi), dtype(s_phi)),
                                    sample_shape=K, name="phi"))
        sigma_sq = yield Root(tfd.Sample(tfd.InverseGamma(dtype(3), dtype(2)),
                                         sample_shape=K,
                              name="sigma_sq"))
        scale = np.sqrt(sigma_sq)

        gamma_T_star = compute_gamma_T_star(gamma_C, gamma_T, p)
        eta_T_star = compute_eta_T_star(gamma_C[..., tf.newaxis],
                                        gamma_T[..., tf.newaxis],
                                        eta_C, eta_T,
                                        p[..., tf.newaxis],
                                        gamma_T_star[..., tf.newaxis])

        # likelihood
        y_C = yield mix(nC, eta_C, loc, scale, name="y_C")
        n0C = yield tfd.Binomial(nC, gamma_C, name="n0C")
        y_T = yield mix(nT, eta_T_star, loc, scale, name="y_T")
        n0T = yield tfd.Binomial(nT, gamma_T_star, name="n0T")
Exemplo n.º 5
0
 def actor(self, feat):
   shape = feat.shape[:-1] + [self._config.num_actions]
   if self._config.actor_dist == 'onehot':
     return tools.OneHotDist(tf.zeros(shape))
   else:
     ones = tf.ones(shape, self._float)
     return tfd.Uniform(-ones, ones)
Exemplo n.º 6
0
 def actor(self, feat):
     shape = feat.shape[:-1] + self.act_space.shape
     if self.config.actor.dist == 'onehot':
         return common.OneHotDist(tf.zeros(shape))
     else:
         dist = tfd.Uniform(-tf.ones(shape), tf.ones(shape))
         return tfd.Independent(dist, 1)
Exemplo n.º 7
0
    def _base_dist(self, *args, **kwargs):
        """
        Weibull base distribution.

        The inverse of the Weibull bijector applied to a U[0, 1] random
        variable gives a Weibull-distributed random variable.
        """
        return tfd.TransformedDistribution(
            distribution=tfd.Uniform(low=0.0, high=1.0),
            bijector=tfp.bijectors.Invert(
                tfp.bijectors.Weibull(*args, **kwargs)),
            name="Weibull",
        )
Exemplo n.º 8
0
 def __init__(self, data, options):
     self.kernel = options.kernel
     self.options = options
     self.τ = data.τ
     self.N_p = data.τ.shape[0]
     self.num_tfs = data.f_obs.shape[1]
     t_1, t_2 = get_time_square(self.τ, self.N_p)
     self.t_dist = t_1 - t_2
     self.tt = t_1 * t_2
     self.t2 = tf.square(t_1)
     self.tprime2 = tf.square(t_2)
     self.fixed_dist = FixedDistribution(
         tf.ones(self.num_tfs, dtype='float64'))
     min_dist = min(data.t[1:] - data.t[:-1])
     min_dist = max(min_dist, 1.)
     self._ranges = {
         'rbf': [
             (f64(1e-4), f64(5)),  #1+max(np.var(data.f_obs, axis=2))
             (f64(min_dist**2) - 1.2, f64(data.t[-1]**2))
         ],
         'mlp': [(f64(1), f64(10)), (f64(3.5), f64(20))],
     }
     self._priors = {
         'rbf': [
             tfd.Uniform(f64(1), f64(20)),
             tfd.Uniform(f64(min_dist**2), f64(10))
         ],
         'mlp': [
             tfd.Uniform(f64(3.5), f64(10)),
             tfd.InverseGamma(f64(0.01), f64(0.01))
         ],
     }
     v_prop = lambda v: tfd.TruncatedNormal(v, 0.007, low=0, high=100)
     l2_prop = lambda l2: tfd.TruncatedNormal(l2, 0.007, low=0, high=100)
     proposals = [v_prop, l2_prop]
     self._proposals = {
         'rbf': proposals,
     }
     self._names = {'rbf': ['v', 'l2'], 'mlp': ['w', 'b']}
Exemplo n.º 9
0
def sample_batch(n_samples, max_dim, max_power, dtype):
    """ Calculate a sum of distance samples for various dimensions and norms

    Generates a batch of *n_samples* of pairs of points, and uses those points
    to build a table of 1 to *max_dim* rows and 1 to *max_power* columns,
    where each entry is the sum across all samples of the distance metric
    with the given power for pairs of points in the given dimensional
    hypercube.
    """
    # create two lists of random points and their vector difference,
    # where the first dimension is the sample number and the second the
    # coordinates for the sample
    zero = tf.zeros((), dtype=dtype)  # used to pass in dtype to Uniform
    x1s = tfd.Uniform(low=zero).sample((n_samples, max_dim))
    x2s = tfd.Uniform(low=zero).sample((n_samples, max_dim))
    vector_difference = x2s - x1s

    # add a third dimension to the tensor which is the vector coordinate
    # raised to ascending powers (eg x, x^2, x^3...)
    sum_terms = tf.abs(
        tfm.cumprod(tf.tile(
            tf.reshape(vector_difference, (n_samples, max_dim, 1)),
            (1, 1, max_power)),
                    axis=2))

    # generate cumulative sums along the coordinate (second) dimension
    # and raise the sum to the 1/n power (where n is the third dimension)
    # to generate a new tensor where the first dimension is the samples,
    # the second the dimension of the hypercube, and the third the power
    # of the norm.
    norm = tfm.pow(
        tfm.cumsum(sum_terms, axis=1),
        tf.reshape(1 / tf.cast(tf.range(1, max_power + 1), dtype=dtype),
                   (1, 1, max_power)))

    # return the sum of the norms
    return tf.reduce_sum(norm, axis=0)
Exemplo n.º 10
0
def get_one_sample_untransformed(shape, distribution_type, distribution_params,
                                 seed):
    """Get one untransoformed sample."""
    if distribution_type == DistributionType.UNIFORM:
        low, high = distribution_params["low"], distribution_params["high"]
        distribution = tfd.Uniform(low=tf.constant(low, shape=shape[1:]),
                                   high=tf.constant(
                                       high,
                                       shape=shape[1:],
                                   ))
        sample = distribution.sample(shape[0], seed=seed)
    elif distribution_type == DistributionType.LOG_UNIFORM:
        low, high = distribution_params["low"], distribution_params["high"]
        distribution = tfd.Uniform(low=tf.constant(np.log(low),
                                                   shape=shape[1:],
                                                   dtype=tf.float32),
                                   high=tf.constant(np.log(high),
                                                    shape=shape[1:],
                                                    dtype=tf.float32))
        sample = tf.exp(distribution.sample(shape[0], seed=seed))
    else:
        raise ValueError(
            "Unknown distribution type {}".format(distribution_type))
    return sample
Exemplo n.º 11
0
    def _init_distribution(conditions):

        concentration, scale = conditions["concentration"], conditions["scale"]

        scale_tensor, concentration_tensor = (
            tf.convert_to_tensor(scale),
            tf.convert_to_tensor(concentration),
        )
        broadcast_shape = dist_util.prefer_static_broadcast_shape(
            scale_tensor.shape, concentration_tensor.shape
        )

        return tfd.TransformedDistribution(
            distribution=tfd.Uniform(low=tf.zeros(broadcast_shape), high=tf.ones(broadcast_shape)),
            bijector=bij.Invert(bij.WeibullCDF(scale=scale, concentration=concentration)),
            name="Weibull",
        )
Exemplo n.º 12
0
def polar1(save=False):

    input_dim = 2
    inn = INN(64, input_dim, -1.0, 1.0)
    inn.load_weights("./network/polar1.tf")

    inn.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001,
                                                   beta_1=0.99,
                                                   beta_2=0.999,
                                                   amsgrad=False),
                loss='mse',
                metrics=['accuracy'])

    loc1 = np.asarray([3.0, 0.0])
    loc2 = np.zeros(6)
    s = tfd.Sample(tfd.Uniform(low=[0.1, -5.0], high=[5.0, 5.0]))
    s2 = tfd.Sample(tfd.Normal(loc=loc2, scale=0.05))

    for i in range(0):
        x, y = s.sample(10000).numpy().T
        x_t = np.asarray([x, y]).T
        r, phi = polar(x, y)
        y_t = np.asarray([r, phi]).T
        inn.fit(x_t, y_t, epochs=2, batch_size=256, verbose=2)

    x, y = np.random.normal(4.0, 1.0, (600)), np.random.normal(0.0, 1.0, (600))
    x_t = np.asarray([x, y]).T
    r, phi = polar(x, y)

    x_t = np.asarray([x, y]).T  #network input
    rp, phip = inn.predict(x_t).T
    y_t = np.asarray([r, phi]).T  #inverse network input
    xp, yp = inn.inv(y_t).numpy().T

    # print(np.mean(np.abs(r-rr)))
    # print(np.mean(np.abs(phi-rphi)))

    # for polar1forward------------------------------------------------------------
    fig = py.figure(figsize=(11, 9))
    fig.subplots_adjust(left=0.2, right=0.95, bottom=0.2, top=0.95)

    ax = fig.add_subplot(111)
    ax.plot(rp * np.cos(phip), rp * np.sin(phip), 'ro', label='transformed')
    ax.plot(x, y, 'go', label='original')

    ax.set_xlabel("X", fontsize=30)
    ax.set_ylabel("Y", fontsize=30)
    ax.tick_params(axis='both', which='major', labelsize=29)
    ax.legend(fontsize=25)
    if save:
        fig.savefig('polar1forward.png', dpi=300)

    # for polar1backward - ------------------------------------------------------    fig = py.figure(figsize=(10,9))
    fig = py.figure(figsize=(11, 9))
    fig.subplots_adjust(left=0.2, right=0.95, bottom=0.2, top=0.95)

    ax = fig.add_subplot(111)
    ax.plot(xp, yp, 'ro', label='inverse transformed')
    ax.plot(x, y, 'go', label='original')

    ax.set_xlabel("X", fontsize=30)
    ax.set_ylabel("Y", fontsize=30)
    ax.tick_params(axis='both', which='major', labelsize=29)
    ax.legend(fontsize=25)
    if save:
        fig.savefig('polar1backward.png', dpi=300)
    # py.show()

    # for polar1mesh-----------------------------------------
    # rr, rphi = polar(xmesh,ymesh)
    # out = inn.predict(mesh)
    # r,phi = out.T
    # y_t = np.asarray([rr,rphi]).T
    # out = inn.inv(y_t).numpy()
    # xr,yr = out.T

    # fig = py.figure(figsize=(15,15))
    # fig.subplots_adjust(left=0.1,right = 0.9,bottom=0.2,top=0.8)

    # ax = fig.add_subplot(111)
    # ax.plot(r*np.cos(phi),r*np.sin(phi),'bo',label ='transformed')
    # ax.plot(xr,yr,'ro',label='inverse transform')
    # ax.plot(xmesh,ymesh,'go',label='original',alpha=0.5)

    # ax.set_xlabel("X",fontsize=25)
    # ax.set_ylabel("Y",fontsize=25)
    # ax.tick_params(axis='both', which='major', labelsize=24)
    # ax.legend(fontsize = 20)
    # if save:
    # fig.savefig('polar1mesh.png',dpi=300)

    py.show()

    # inn.save_weights('./network/polar1.tf')
    return None
Exemplo n.º 13
0
 def _base_dist(self, lower: TensorLike, upper: TensorLike, *args,
                **kwargs):
     return tfd.Uniform(low=lower, high=upper, *args, **kwargs)
Exemplo n.º 14
0
def make_model(well_complex, well_ligand,
               fi_complex, fi_ligand, debug = False, kernel = 'random_walk_metropolis',
               num_results = 1000, num_burnin_steps = 300):

    """Build a tfp model for an assay that consists of N wells of protein:ligand
    at various concentrations and an additional N wells of ligand in buffer,
    with the ligand at the same concentrations as the corresponding protein:ligand wells.

    Parameters
    ----------
    well_complex : titration.SingleWell, the main titration cell
    well_ligand : titration.SingleWell, the reference cell
    fi_complex : np.ndarray, measured fluorescense intensity of complex cell
    fi_ligand : np.ndarray, measured fluorescense intensity of the reference cell
    debug :
         (Default value = False)
    kernel :
         (Default value = 'random_walk_metropolis')
    num_results :
         (Default value = 1000)
    num_burnin_steps :
         (Default value = 300)

    Returns
    -------

    """

    n_wells = fi_complex.shape[0]

    # define max and min fluorescense intensity
    fi_max = np.max([np.max(fi_complex), np.max(fi_ligand)])
    fi_min = np.min([np.min(fi_complex), np.min(fi_ligand)])

    # grab the path_length from the wells
    assert well_complex.path_length == well_ligand.path_length
    path_length = tf.constant(well_complex.path_length, dtype=tf.float32)

    # grab the concentrations rv from the complex well
    concs_p_complex_rv = well_complex.concs_p_rv
    concs_l_complex_rv = well_complex.concs_l_rv

    # grab the concentrations rv from the ligand plate
    concs_l_ligand_rv = well_ligand.concs_l_rv

    # the guesses, to be used as initial state
    delta_g_guess = tf.constant(-1.0, dtype=tf.float32) # kT # use the value from ChEMBL
    concs_p_complex_guess = tf.constant(well_complex.concs[0, :], dtype=tf.float32)
    concs_l_complex_guess = tf.constant(well_complex.concs[1, :], dtype=tf.float32)
    concs_l_ligand_guess = tf.constant(well_ligand.concs[1, :], dtype=tf.float32)
    fi_pl_guess = tf.constant(np.true_divide(fi_max - fi_min,
                                np.min([np.max(concs_p_complex_guess), np.max(concs_l_complex_guess)])), dtype=tf.float32)
    fi_p_guess = tf.constant(fi_min, dtype=tf.float32)
    fi_l_guess = tf.constant(np.true_divide(fi_max-fi_min, np.max(concs_l_complex_guess)), dtype=tf.float32)
    fi_plate_guess = tf.constant(fi_min, dtype=tf.float32)
    fi_buffer_guess = tf.constant(np.true_divide(fi_min, path_length), dtype=tf.float32)
    fi_complex_guess = tf.constant(fi_complex, dtype=tf.float32)
    fi_ligand_guess = tf.constant(fi_ligand, dtype=tf.float32)
    # jeffrey_log_sigma_complex_guess = tf.constant(np.tile([0.5 * (np.log(fi_max)-10.0)], (n_wells,)), dtype=tf.float32)
    # jeffrey_log_sigma_ligand_guess = tf.constant(np.tile([0.5 * (np.log(fi_max)-10.0)], (n_wells,)), dtype=tf.float32)
    jeffrey_log_sigma_complex_guess = tf.constant(np.tile([np.log(2)], (n_wells,)), dtype=tf.float32)
    jeffrey_log_sigma_ligand_guess = tf.constant(np.tile([np.log(2)], (n_wells,)), dtype=tf.float32)
    #======================================================================
    # Define a whole bunch of rv
    #======================================================================

    # TODO: not sure which way is faster:
    # define rv inside or outside this function?

    # define free energy prior
    delta_g_rv = tfd.Uniform(low=tf.log(tf.constant(1e-15, dtype=tf.float32)), high=tf.constant(0.0, dtype=tf.float32))

    # define fluorescense intensity prior
    fi_plate_rv = tfd.Uniform(low=tf.constant(0.0, dtype=tf.float32), high=tf.constant(fi_max, dtype=tf.float32))
    fi_buffer_rv = tfd.Uniform(low=tf.constant(0.0, dtype=tf.float32), high=tf.constant(np.true_divide(fi_max, path_length), dtype=tf.float32))

    fi_pl_rv = tfd.Uniform(low=tf.constant(0.0, dtype=tf.float32), high=tf.constant(2*np.max([np.max(np.true_divide(fi_max, concs_p_complex_guess)),
                                                  np.max(np.true_divide(fi_max, concs_l_complex_guess))]), dtype=tf.float32))
    fi_p_rv = tfd.Uniform(low=tf.constant(0.0, dtype=tf.float32), high=tf.constant(2*np.max(np.true_divide(fi_max, concs_p_complex_guess)), dtype=tf.float32))
    fi_l_rv = tfd.Uniform(low=tf.constant(0.0, dtype=tf.float32), high=tf.constant(2*np.max(np.true_divide(fi_max, concs_l_complex_guess)), dtype=tf.float32))

    jeffrey_log_sigma_rv = tfd.Uniform(low=tf.constant(np.tile([-1.0], (n_wells,)), dtype=tf.float32), high=tf.constant(np.tile([np.log(10)], (n_wells,)), dtype=tf.float32))

    rvs = None
    trajs = None

    if debug == True:
        rvs = [
                delta_g_rv,
                fi_plate_rv, fi_buffer_rv,
                fi_pl_rv, fi_p_rv, fi_l_rv,
                concs_p_complex_rv, concs_l_complex_rv,
                concs_l_ligand_rv,
                jeffrey_log_sigma_rv
        ]

        trajs = [np.array([]) for dummy_idx in range(11)]


    current_state=[
                   delta_g_guess,
                   fi_plate_guess, fi_buffer_guess,
                   fi_pl_guess, fi_p_guess, fi_l_guess,
                   fi_complex_guess, fi_ligand_guess,
                   concs_p_complex_guess, concs_l_complex_guess,
                   concs_l_ligand_guess,
                   jeffrey_log_sigma_complex_guess, jeffrey_log_sigma_ligand_guess,
                  ]

    # define the joint_log_prob function to be used in MCMC
    def joint_log_prob(delta_g, # primary parameters to INFER
                       fi_plate = fi_plate_guess, fi_buffer = fi_buffer_guess,
                       fi_pl = fi_pl_guess, fi_p = fi_p_guess, fi_l = fi_l_guess, # fluorescence intesensity to INFER
                       fi_complex = fi_complex_guess, fi_ligand = fi_ligand_guess,
                       concs_p_complex = concs_p_complex_guess, concs_l_complex = concs_l_complex_guess, # primary parameters to INFER
                       concs_l_ligand = concs_l_ligand_guess, # primary parameters to INFER
                       jeffrey_log_sigma_complex = jeffrey_log_sigma_complex_guess,
                       jeffrey_log_sigma_ligand = jeffrey_log_sigma_ligand_guess): # TODO: figure out a way to get rid of this


        #======================================================================
        # Calculate the relationships between the observed values,
        # the values to infer, and the constants.
        #======================================================================

        # using a binding model, get the true concentrations of protein, ligand,
        # and protein-ligand complex

        concs_p_, concs_l_, concs_pl_ = equilibrium_concentrations(delta_g, concs_p_complex, concs_l_complex)

        # predict observed fluorescence intensity
        fi_complex_ = fi_p * concs_p_ + fi_l * concs_l_ + fi_pl * concs_pl_ + path_length * fi_buffer + fi_plate
        fi_ligand_ = fi_l * concs_l_ligand + path_length * fi_buffer + fi_plate

        # make this rv inside the function, since it changes with jeffery_log_sigma
        fi_complex_rv = tfd.Normal(loc=tf.constant(fi_complex, dtype=tf.float32), scale=tf.square(tf.exp(jeffrey_log_sigma_complex)))
        fi_ligand_rv = tfd.Normal(loc=tf.constant(fi_ligand, dtype=tf.float32), scale=tf.square(tf.exp(jeffrey_log_sigma_ligand)))

        #======================================================================
        # Sum up the log_prob.
        #======================================================================
        # NOTE: this is very weird. the LogNormal offered by tfp
        # is actually just normal distribution but with transformation before input
        # so you have to transfer again yourself

        log_prob = (delta_g_rv.log_prob(delta_g) # initialize a log_prob
                 + fi_plate_rv.log_prob(fi_plate)
                 + fi_buffer_rv.log_prob(fi_buffer)
                 + fi_pl_rv.log_prob(fi_pl)
                 + fi_p_rv.log_prob(fi_p)
                 + fi_l_rv.log_prob(fi_l)
                 + tf.reduce_sum(fi_complex_rv.log_prob(fi_complex_))
                 + tf.reduce_sum(fi_ligand_rv.log_prob(fi_ligand_))
                 + concs_p_complex_rv.log_prob(tf.exp(concs_p_complex))
                 + concs_l_complex_rv.log_prob(tf.exp(concs_l_complex))
                 + concs_l_ligand_rv.log_prob(tf.exp(concs_l_ligand))
                 # + concs_p_complex_rv.log_prob(concs_p_complex)
                 # + concs_l_complex_rv.log_prob(concs_l_complex)
                 # + concs_l_ligand_rv.log_prob(concs_l_ligand)
                 + tf.reduce_sum(jeffrey_log_sigma_rv.log_prob(jeffrey_log_sigma_complex))
                 + tf.reduce_sum(jeffrey_log_sigma_rv.log_prob(jeffrey_log_sigma_ligand)))

        if debug == True: # plot trajectories of the inference
            for idx, value in enumerate([delta_g, fi_plate, fi_buffer, fi_pl, fi_p, fi_l, concs_p_complex,
                    concs_l_complex, concs_l_ligand, jeffrey_log_sigma_complex, jeffrey_log_sigma_ligand]):
                if value.ndim == 0:
                    trajs[idx] = np.append(trajs[idx], value.numpy())
                else:
                    if trajs[idx].size == 0:
                        trajs[idx] = np.expand_dims(value.numpy(), axis=0)
                    else:
                        trajs[idx] = np.concatenate([trajs[idx], np.expand_dims(value.numpy(), axis=0)], axis=0)

        return log_prob

    # HamiltonianMonteCarlo implementation
    if kernel == 'hamiltonian_monte_carlo':
        # put the log_prob function and initial guesses into a mcmc chain
        chain_states, kernel_results = tfp.mcmc.sample_chain(
            num_results=int(num_results),
            num_burnin_steps=int(num_burnin_steps),
            parallel_iterations=1,
            current_state=current_state,
            kernel=tfp.mcmc.TransformedTransitionKernel(
                bijector=[
                          tfp.bijectors.Identity(), # delta_g
                          tfp.bijectors.Identity(), tfp.bijectors.Identity(), #fi_plate, fi_buffer
                          tfp.bijectors.Identity(), tfp.bijectors.Identity(), tfp.bijectors.Identity(), # fi_pl, fi_p, fi_l
                          tfp.bijectors.Identity(), tfp.bijectors.Identity(), # fi_complex, fi_ligand
                          tfp.bijectors.Tanh(), tfp.bijectors.Tanh(), # concs_p_complex, concs_l_complex
                          tfp.bijectors.Tanh(), # concs_l_ligand
                          tfp.bijectors.Identity(), tfp.bijectors.Identity() # jeffrey_log_sigma_complex, jeffrey_log_sigma_ligand
                         ],
                inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
                target_log_prob_fn=joint_log_prob,
                num_leapfrog_steps=2,
                step_size=tf.Variable(0.5),
                step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy()
                )))

    elif kernel == 'random_walk_metropolis':
        # RandomWalkMetropolis implementation
        chain_states, kernel_results = tfp.mcmc.sample_chain(
                     num_results=int(num_results),
                     current_state=current_state,
                     kernel=tfp.mcmc.RandomWalkMetropolis(joint_log_prob),
                     num_burnin_steps=int(num_burnin_steps),
                     parallel_iterations=1)

    return chain_states, kernel_results, rvs, trajs
Exemplo n.º 15
0
        z_con5 = np.array([c5] * size)
        z_con5 = np.reshape(z_con5, [size, 1])
    else:
        z_con5 = tfd.Uniform(low=-1.0, high=1.0).sample([size, 1])
    noise = tf.concat([z, z_con1, z_con2, z_con3, z_con4, z_con5], axis=-1)
    return noise


number = int(input('Model number: '))
generator = tl.models.Model.load('./models/model{}.h5'.format(number),
                                 load_weights=True)
generator.eval()

output_image = []
cc = np.linspace(-1, 1, 10)
z = tfd.Uniform(low=-1.0, high=1.0).sample([1, 128])
for i in range(5):
    imgs = []
    z = tfd.Uniform(low=-1.0, high=1.0).sample([1, 128])
    for ii in range(10):
        noise = sample(z, 1, c1=cc[ii])
        img = generator(noise)[0]
        img = (img + 1.) / 2.
        imgs.append(np.reshape(img, [32, 32]))
    imgs = np.concatenate(imgs, 1)
    output_image.append(imgs)

output_image = np.concatenate(output_image, 0)
plt.figure(figsize=(15, 8))
plt.suptitle("varying continuous latent code")
plt.imshow(output_image, cmap="gray")
Exemplo n.º 16
0
 def _init_distribution(conditions, **kwargs):
     low, high = conditions["low"], conditions["high"]
     return tfd.Uniform(low=low, high=high, **kwargs)
Exemplo n.º 17
0
 def sample(self):
     component_mean = tfd.Uniform().sample(
         [self._length_scale, self._var_dim])
     return component_mean
Exemplo n.º 18
0
from createdata import *

cmin = -10.  # lower range of uniform distribution on c
cmax = 10.  # upper range of uniform distribution on c

mmu = 0.  # mean of Gaussian distribution on m
msigma = 10.  # standard deviation of Gaussian distribution on m

# convert x values and data to 32 bit float
x = x.astype(np.float32)  # x is being use globally here
data = data.astype(np.float32)

# set model - contains priors and the expected linear model
model = tfd.JointDistributionSequential([
    tfd.Normal(loc=mmu, scale=msigma, name="m"),  # m prior
    tfd.Uniform(cmin, cmax, name="c"),  # c prior
    lambda c, m: (tfd.Independent(
        tfd.Normal(loc=(m[..., tf.newaxis] * x + c[..., tf.newaxis]),
                   scale=sigma),
        name="data",
        reinterpreted_batch_ndims=1,
    ))
])


def target_log_prob_fn(mvalue, cvalue):
    """Unnormalized target density as a function of states."""
    return model.log_prob((mvalue, cvalue, data))


Nsamples = 2000  # final number of samples
Exemplo n.º 19
0
    def __init__(self, data, options):
        self.data = data
        min_dist = min(data.t[1:] - data.t[:-1])
        self.N_p = data.τ.shape[0]
        self.N_m = data.t.shape[0]  # Number of observations
        self.num_replicates = data.f_obs.shape[0]
        self.num_tfs = data.f_obs.shape[1]
        self.num_genes = data.m_obs.shape[1]

        self.kernel_selector = GPKernelSelector(data, options)
        self.likelihood = TranscriptionLikelihood(data, options)
        self.options = options
        # Adaptable variances
        a = tf.constant(-0.5, dtype='float64')
        b2 = tf.constant(2., dtype='float64')
        self.h_f = 0.15 * tf.ones(self.N_p, dtype='float64')

        # Interaction weights
        w_0 = Parameter('w_0',
                        tfd.Normal(0, 2),
                        np.zeros(self.num_genes),
                        step_size=0.2 *
                        tf.ones(self.num_genes, dtype='float64'))
        w_0.proposal_dist = lambda mu, j: tfd.Normal(mu, w_0.step_size[j])
        w = Parameter('w',
                      tfd.Normal(0, 2),
                      1 * np.ones((self.num_genes, self.num_tfs)),
                      step_size=0.2 * tf.ones(self.num_genes, dtype='float64'))
        w.proposal_dist = lambda mu, j: tfd.Normal(mu, w.step_size[
            j])  #) w_j) # At the moment this is the same as w_j0 (see pg.8)
        # Latent function
        fbar = Parameter(
            'fbar', self.fbar_prior, 0.5 * np.ones(
                (self.num_replicates, self.num_tfs, self.N_p)))

        # GP hyperparameters
        V = Parameter('V',
                      tfd.InverseGamma(f64(0.01), f64(0.01)),
                      f64(1),
                      step_size=0.05,
                      fixed=not options.tf_mrna_present)
        V.proposal_dist = lambda v: tfd.TruncatedNormal(
            v, V.step_size, low=0, high=100
        )  #v_i Fix to 1 if translation model is not used (pg.8)
        L = Parameter('L',
                      tfd.Uniform(f64(min_dist**2 - 0.5), f64(data.t[-1]**2)),
                      f64(4),
                      step_size=0.05)  # TODO auto set
        L.proposal_dist = lambda l2: tfd.TruncatedNormal(
            l2, L.step_size, low=0, high=100)  #l2_i

        # Translation kinetic parameters
        δbar = Parameter('δbar', tfd.Normal(a, b2), f64(-0.3), step_size=0.05)
        δbar.proposal_dist = lambda mu: tfd.Normal(mu, δbar.step_size)
        # White noise for genes
        σ2_m = Parameter('σ2_m',
                         tfd.InverseGamma(f64(0.01), f64(0.01)),
                         1e-4 * np.ones(self.num_genes),
                         step_size=0.01)
        σ2_m.proposal_dist = lambda mu: tfd.TruncatedNormal(
            mu, σ2_m.step_size, low=0, high=0.1)
        # Transcription kinetic parameters
        constraint_index = 2 if self.options.initial_conditions else 1

        def constrain_kbar(kbar, gene):
            '''Constrains a given row in kbar'''
            # if gene == 3:
            #     kbar[constraint_index] = np.log(0.8)
            #     kbar[constraint_index+1] = np.log(1.0)
            kbar[kbar < -10] = -10
            kbar[kbar > 3] = 3
            return kbar

        num_var = 4 if self.options.initial_conditions else 3
        kbar_initial = -0.1 * np.ones(
            (self.num_genes, num_var), dtype='float64')
        for j, k in enumerate(kbar_initial):
            kbar_initial[j] = constrain_kbar(k, j)
        kbar = Parameter('kbar',
                         tfd.Normal(a, b2),
                         kbar_initial,
                         constraint=constrain_kbar,
                         step_size=0.05 * tf.ones(num_var, dtype='float64'))
        kbar.proposal_dist = lambda mu: tfd.MultivariateNormalDiag(
            mu, kbar.step_size)

        if not options.preprocessing_variance:
            σ2_f = Parameter('σ2_f',
                             tfd.InverseGamma(f64(0.01), f64(0.01)),
                             1e-4 * np.ones(self.num_tfs),
                             step_size=tf.constant(0.5, dtype='float64'))
            super().__init__(
                TupleParams_pre(fbar, δbar, kbar, σ2_m, w, w_0, L, V, σ2_f))
        else:
            super().__init__(TupleParams(fbar, δbar, kbar, σ2_m, w, w_0, L, V))
Exemplo n.º 20
0
def main(config):
    logdir = pathlib.Path(config.logdir).expanduser()
    config.traindir = config.traindir or logdir / 'train_eps'
    config.evaldir = config.evaldir or logdir / 'eval_eps'
    config.steps //= config.action_repeat
    config.eval_every //= config.action_repeat
    config.log_every //= config.action_repeat
    config.time_limit //= config.action_repeat
    config.act = getattr(tf.nn, config.act)

    if config.debug:
        tf.config.experimental_run_functions_eagerly(True)
    if config.gpu_growth:
        message = 'No GPU found. To actually train on CPU remove this assert.'
        assert tf.config.experimental.list_physical_devices('GPU'), message
        for gpu in tf.config.experimental.list_physical_devices('GPU'):
            tf.config.experimental.set_memory_growth(gpu, True)
    assert config.precision in (16, 32), config.precision
    if config.precision == 16:
        prec.set_policy(prec.Policy('mixed_float16'))
    print('Logdir', logdir)
    logdir.mkdir(parents=True, exist_ok=True)
    config.traindir.mkdir(parents=True, exist_ok=True)
    config.evaldir.mkdir(parents=True, exist_ok=True)
    step = count_steps(config.traindir)
    logger = tools.Logger(logdir, config.action_repeat * step)

    print('Create envs.')
    if config.offline_traindir:
        directory = config.offline_traindir.format(**vars(config))
    else:
        directory = config.traindir
    train_eps = tools.load_episodes(directory, limit=config.dataset_size)
    if config.offline_evaldir:
        directory = config.offline_evaldir.format(**vars(config))
    else:
        directory = config.evaldir
    eval_eps = tools.load_episodes(directory, limit=1)
    make = lambda mode: make_env(config, logger, mode, train_eps, eval_eps)
    train_envs = [make('train') for _ in range(config.envs)]
    eval_envs = [make('eval') for _ in range(config.envs)]
    acts = train_envs[0].action_space
    config.num_actions = acts.n if hasattr(acts, 'n') else acts.shape[0]

    prefill = max(0, config.prefill - count_steps(config.traindir))
    print(f'Prefill dataset ({prefill} steps).')
    if hasattr(acts, 'discrete'):
        random_actor = tools.OneHotDist(tf.zeros_like(acts.low)[None])
    else:
        random_actor = tfd.Independent(
            tfd.Uniform(acts.low[None], acts.high[None]), 1)

    def random_agent(o, d, s):
        action = random_actor.sample()
        logprob = random_actor.log_prob(action)
        return {'action': action, 'logprob': logprob}, None

    tools.simulate(random_agent, train_envs, prefill)
    tools.simulate(random_agent, eval_envs, episodes=1)
    logger.step = config.action_repeat * count_steps(config.traindir)

    print('Simulate agent.')
    train_dataset = make_dataset(train_eps, config)
    eval_dataset = iter(make_dataset(eval_eps, config))
    agent = Dreamer(config, logger, train_dataset)
    if (logdir / 'variables.pkl').exists():
        agent.load(logdir / 'variables.pkl')
        agent._should_pretrain._once = False

    state = None
    while agent._step.numpy().item() < config.steps:
        logger.write()
        print('Start evaluation.')
        video_pred = agent._wm.video_pred(next(eval_dataset))
        logger.video('eval_openl', video_pred)
        eval_policy = functools.partial(agent, training=False)
        tools.simulate(eval_policy, eval_envs, episodes=1)
        print('Start training.')
        state = tools.simulate(agent,
                               train_envs,
                               config.eval_every,
                               state=state)
        agent.save(logdir / 'variables.pkl')
    for env in train_envs + eval_envs:
        try:
            env.close()
        except Exception:
            pass
Exemplo n.º 21
0
 def _init_distribution(conditions):
     return tfd.Uniform(low=-np.inf, high=np.inf)
Exemplo n.º 22
0
        z_cat = tf.one_hot(z_cat, 10)
    else:
        z_cat = tfd.Categorical(probs=tf.ones([10])*0.1).sample([size, ])
        z_cat = tf.one_hot(z_cat, 10)
    noise = tf.concat([z, z_con1, z_con2, z_cat], axis=-1)
    return noise


number = int(input('Model number: '))
generator = tl.models.Model.load(
    './models/model{}.h5'.format(number), load_weights=True)
generator.eval()

output_image = []
for i in range(10):
    z = tfd.Uniform(low=-1.0, high=1.0).sample([5, 62])
    noise = sample(z, 5, cat=i)
    imgs = generator(noise)
    imgs = (imgs + 1.) / 2.

    imgs = np.split(imgs, 5, 0)
    imgs = [np.reshape(img, [28, 28]) for img in imgs]
    imgs = np.concatenate(imgs, 0)
    output_image.append(imgs)

output_image = np.concatenate(output_image, 1)
plt.figure(figsize=(15, 8))
plt.suptitle("varying discrete latent code")
plt.imshow(output_image, cmap="gray")
plt.axis("off")
plt.savefig("./test/cat_res.png")
Exemplo n.º 23
0
 def _init_distribution(conditions, **kwargs):
     return tfd.Uniform(low=0.0, high=np.inf, **kwargs)
Exemplo n.º 24
0
 def sample(self):
     component_mean = tfd.Uniform().sample(
         [self.num_components, self.var_dim])
     return component_mean