Exemplo n.º 1
0
    def calculate_protein(self, fbar, k_fbar, Δ):  # Calculate p_i vector
        τ = self.data.τ
        f_i = inverse_positivity(fbar)
        δ_i = tf.reshape(logit(k_fbar), (-1, 1))
        if self.options.delays:
            # Add delay
            Δ = tf.cast(Δ, 'int32')

            for r in range(self.num_replicates):
                f_ir = rotate(f_i[r], -Δ)
                mask = ~tf.sequence_mask(Δ, f_i.shape[2])
                f_ir = tf.where(mask, f_ir, 0)
                mask = np.zeros((self.num_replicates, 1, 1), dtype='float64')
                mask[r] = 1
                f_i = (1 - mask) * f_i + mask * f_ir

        # Approximate integral (trapezoid rule)
        resolution = τ[1] - τ[0]
        sum_term = tfm.multiply(tfm.exp(δ_i * τ), f_i)
        cumsum = 0.5 * resolution * tfm.cumsum(
            sum_term[:, :, :-1] + sum_term[:, :, 1:], axis=2)
        integrals = tf.concat([
            tf.zeros((self.num_replicates, self.num_tfs, 1), dtype='float64'),
            cumsum
        ],
                              axis=2)
        exp_δt = tfm.exp(-δ_i * τ)
        p_i = exp_δt * integrals
        return p_i
Exemplo n.º 2
0
    def h_(self, X, k, j, primefirst=True):
        Dj = tf.reshape(self.D, (1, -1))
        Dj = broadcast_tile(Dj, 1, 7)
        Dj = tf.tile(Dj, [35, 1])
        Dk = tf.reshape(self.D, (-1, 1))
        Dk = broadcast_tile(Dk, 7, 1)
        Dk = tf.tile(Dk, [1, 35])
        gk = tf.transpose(
            broadcast_tile(tf.reshape(self.gamma(), (-1, 1)), 7, 1))
        gk = tf.tile(gk, [35, 1])
        if not primefirst:
            Dk, Dj = Dj, Dk
            gk = tf.transpose(
                broadcast_tile(tf.reshape(self.gamma(), (1, -1)), 1, 7))
            gk = tf.tile(gk, [1, 35])

        l = self.lengthscale
        t_x = tf.reshape(X[:self.block_size], (-1, ))
        t_prime, t, t_dist = self.get_distance_matrix(primefirst=primefirst,
                                                      t_x=t_x)
        t_prime = tf.tile(t_prime, [5, 5])
        t = tf.tile(t, [5, 5])
        t_dist = tf.tile(t_dist, [5, 5])
        multiplier = tfm.exp(gk**2) / (Dj + Dk)
        first_erf_term = tfm.erf(t_dist / l - gk) + tfm.erf(t / l + gk)
        second_erf_term = tfm.erf(t_prime / l - gk) + tfm.erf(gk)
        return multiplier * (tf.multiply(tfm.exp(-Dk*t_dist) , first_erf_term) - \
                             tf.multiply(tfm.exp(-Dk*t_prime-Dj*t) , second_erf_term))
Exemplo n.º 3
0
def energy_4_log_pdf(z):
    z2 = z[:, 1]
    x1 = -0.5 * ((z2 - w1(z)) / 0.4)**2
    x2 = -0.5 * ((z2 - w1(z) + w3(z)) / 0.35)**2
    a = math.maximum(x1, x2)
    exp1 = math.exp(x1 - a)
    exp2 = math.exp(x2 - a)
    return a + math.log(exp1 + exp2)
Exemplo n.º 4
0
def energy_1_log_pdf(z):
    z1, z2 = z[:, 0], z[:, 1]
    norm = (z1**2 + z2**2)**0.5
    exp1 = math.exp(-0.5 * ((z1 - 2) / 0.6)**2)
    exp2 = math.exp(-0.5 * ((z1 + 2) / 0.6)**2)
    u = 0.5 * ((norm - 2) / 0.4)**2 - math.log(exp1 + exp2)

    return -u
Exemplo n.º 5
0
def circle_log_pdf(z):
    z1, z2 = z[:, 0], z[:, 1]
    norm = (z1**2 + z2**2)**0.5
    exp1 = math.exp(-0.2 * ((z1 - 2) / 0.8)**2)
    exp2 = math.exp(-0.2 * ((z1 + 2) / 0.8)**2)
    u = 0.5 * ((norm - 4) / 0.4)**2 - math.log(exp1 + exp2)

    return -u
Exemplo n.º 6
0
 def k_xf(self, j, X, X2):
     t_prime, t_, t_dist = self.get_distance_matrix(
         t_x=tf.reshape(X[:self.block_size], (-1, )),
         t_y=tf.reshape(X2, (-1, )))
     l = self.lengthscale
     erf_term = tfm.erf(t_dist / l - self.gamma(j)) + tfm.erf(t_ / l +
                                                              self.gamma(j))
     return self.S[j] * l * 0.5 * tfm.sqrt(PI) * tfm.exp(
         self.gamma(j)**2) * tfm.exp(-self.D[j] * t_dist) * erf_term
Exemplo n.º 7
0
def pdf_1D(z, density_name=''):
    assert density_name in AVAILABLE_1D_DISTRIBUTIONS, "Incorrect density name."
    if density_name == '':
        return 1
    elif density_name == 'two_hills':
        y = 0.5
        sigma2 = 0.1
        likelihood = (1 / math.sqrt(2 * pi * sigma2)) * math.exp(-(
            (y - (z**2))**2) / (2 * sigma2))
        prior = (1 / math.sqrt(2 * pi))**math.exp(-(z**2) / 2)
        return likelihood * prior
Exemplo n.º 8
0
def PST(I, LPF, Phase_strength, Warp_strength, Threshold_min, Threshold_max):
    #inverting Threshold_min to simplyfy optimization porcess, so we can clip all variable between 0 and 1
    LPF = ops.convert_to_tensor_v2(LPF)
    Phase_strength = ops.convert_to_tensor_v2(Phase_strength)
    Warp_strength = ops.convert_to_tensor_v2(Warp_strength)
    I = ops.convert_to_tensor_v2(I)
    Threshold_min = ops.convert_to_tensor_v2(Threshold_min)
    Threshold_max = ops.convert_to_tensor_v2(Threshold_max)

    Threshold_min = -Threshold_min
    L = 0.5
    x = tf.linspace(-L, L, I.shape[0])
    y = tf.linspace(-L, L, I.shape[1])
    [X1, Y1] = (tf.meshgrid(x, y))
    X = tf.transpose(X1)
    Y = tf.transpose(Y1)
    [THETA, RHO] = cart2pol(X, Y)
    # Apply localization kernel to the original image to reduce noise
    Image_orig_f = sig.fft2d(tf.dtypes.cast(I, tf.complex64))

    tmp6 = (LPF**2.0) / tfm.log(2.0)
    tmp5 = tfm.sqrt(tmp6)
    tmp4 = (tfm.divide(RHO, tmp5))
    tmp3 = -tfm.pow(tmp4, 2)
    tmp2 = tfm.exp(tmp3)
    expo = fftshift(tmp2)
    Image_orig_filtered = tfm.real(
        sig.ifft2d((tfm.multiply(tf.dtypes.cast(Image_orig_f, tf.complex64),
                                 tf.dtypes.cast(expo, tf.complex64)))))
    # Constructing the PST Kernel
    tp1 = tfm.multiply(RHO, Warp_strength)
    PST_Kernel_1 = tfm.multiply(
        tp1, tfm.atan(tfm.multiply(RHO, Warp_strength))
    ) - 0.5 * tfm.log(1.0 + tfm.pow(tf.multiply(RHO, Warp_strength), 2.0))
    PST_Kernel = PST_Kernel_1 / tfm.reduce_max(PST_Kernel_1) * Phase_strength
    # Apply the PST Kernel
    temp = tfm.multiply(
        fftshift(
            tfm.exp(
                tfm.multiply(tf.dtypes.complex(0.0, -1.0),
                             tf.dtypes.cast(PST_Kernel,
                                            tf.dtypes.complex64)))),
        sig.fft2d(tf.dtypes.cast(Image_orig_filtered, tf.dtypes.complex64)))
    Image_orig_filtered_PST = sig.ifft2d(temp)

    # Calculate phase of the transformed image
    PHI_features = tfm.angle(Image_orig_filtered_PST)

    out = PHI_features
    out = (out / tfm.reduce_max(out)) * 3

    return out
Exemplo n.º 9
0
 def h(self, X, k, j, t_y=None, primefirst=True):
     l = self.lengthscale
     #         print(l, self.D[k], self.D[j])
     t_x = tf.reshape(X[:self.block_size], (-1, ))
     t_prime, t, t_dist = self.get_distance_matrix(primefirst=primefirst,
                                                   t_x=t_x,
                                                   t_y=t_y)
     multiplier = tfm.exp(self.gamma(k)**2) / (self.D[j] + self.D[k])
     first_erf_term = tfm.erf(t_dist / l -
                              self.gamma(k)) + tfm.erf(t / l +
                                                       self.gamma(k))
     second_erf_term = tfm.erf(t_prime / l - self.gamma(k)) + tfm.erf(
         self.gamma(k))
     return multiplier * (tf.multiply(tfm.exp(-self.D[k]*t_dist) , first_erf_term) - \
                          tf.multiply(tfm.exp(-self.D[k]*t_prime-self.D[j]*t) , second_erf_term))
Exemplo n.º 10
0
    def __init__(self, num_input):
        num_output = 2  # Based on two genders given (K)

        in_ = keras.layers.Input(shape=(num_input, ))
        fc1 = keras.layers.Dense(num_input // 2,
                                 activation="relu",
                                 name="fully_connected1")(
                                     in_)  # They use different activation tanh
        fc2 = keras.layers.Dense(num_input // 4,
                                 activation="relu",
                                 name="fully_connected2")(fc1)
        fc3 = keras.layers.Dense(num_input // 8, activation="relu")(fc2)

        # Mixture Density Outputs
        mu_output = keras.layers.Dense((num_input * num_output),
                                       activation=None,
                                       name="mean_layer")(fc3)
        variance_layer = keras.layers.Dense(num_output,
                                            activation=None,
                                            name="variance_layer")(fc3)
        var_output = keras.layers.Lambda(lambda x: exp(x),
                                         output_shape=(num_output, ),
                                         name="exp_var_layer")(variance_layer)
        pi_output = keras.layers.Dense(num_output,
                                       acitvation="softmax",
                                       name="pi_layer")(fc3)

        model = keras.models.Model(in_, [mu_output, var_output, pi_output],
                                   name="MDN")
        adam = keras.optimizers.Adam()

        # TODO: Can I compile the model here? Is loss the custom loss function? Similar to CAN
        model.compile(optimizer="adam")
        self.model = model
Exemplo n.º 11
0
 def K_ff(self, X):
     print('k_diag')
     """I've used the fact that we call this method for K_ff when finding the covariance as a hack so
     I know if I should return K_ff or K_xx. In this case we're returning K_ff!!
     $K_{ff}^{post} = K_{ff} - K_{fx} K_{xx}^{-1} K_{xf}$"""
     _, _, t_dist = self.get_distance_matrix(t_x=tf.reshape(X, (-1, )))
     K_ff = self.kervar**2 * tfm.exp(-(t_dist**2) / (self.lengthscale**2))
     return (K_ff)
Exemplo n.º 12
0
 def rbf(self, v, l2):
     if self.options.kernel_exponential:
         v = tf.exp(v)
         l2 = tf.exp(l2)
     sq_dist = tf.divide(tfm.square(self.t_dist),
                         tf.reshape(2 * l2, (-1, 1, 1)))
     K = tf.reshape(v, (-1, 1, 1)) * tfm.exp(-sq_dist)
     m = tf.zeros((self.N_p), dtype='float64')
     return m, K
Exemplo n.º 13
0
def compute_gaussian_kl(z_log_var, z_mean):
    """ Compute the KL divergence between a Gaussian and a Normal distribution. Based on Locatello et al.
    implementation (https://github.com/google-research/disentanglement_lib)

    :param z_log_var: the log variance of the Gaussian
    :param z_mean: the mean of the Gaussian
    :return: the KL divergence
    """
    kl_loss = tfm.square(z_mean) + tfm.exp(z_log_var) - z_log_var - 1
    return 0.5 * tfm.reduce_sum(kl_loss, [1])
Exemplo n.º 14
0
def compute_gaussian_log_pdf(z, z_mean, z_log_var):
    """ Compute the log probability density of a Gaussian distribution. Based on Locatello et al. implementation
    (https://github.com/google-research/disentanglement_lib)

    :param z: the sampled values
    :param z_mean: the mean of the Gaussian
    :param z_log_var: the log variance of the Gaussian
    :return: the log probability density
    """
    log2pi = tfm.log(2. * tf.constant(pi))
    return -0.5 * (tfm.square(z - z_mean) * tfm.exp(-z_log_var) + z_log_var + log2pi)
Exemplo n.º 15
0
    def predict_m(self, kbar, k_fbar, wbar, fbar, w_0bar, Δ):
        # Take relevant parameters out of log-space
        if self.options.kinetic_exponential:
            kin = (tf.reshape(tf.exp(logit(kbar[:, i])), (-1, 1))
                   for i in range(kbar.shape[1]))
        else:
            kin = (tf.reshape(logit(kbar[:, i]), (-1, 1))
                   for i in range(kbar.shape[1]))
        if self.options.initial_conditions:
            a_j, b_j, d_j, s_j = kin
        else:
            b_j, d_j, s_j = kin
        w = (wbar)
        w_0 = tf.reshape((w_0bar), (-1, 1))
        τ = self.data.τ
        N_p = self.data.τ.shape[0]

        p_i = inverse_positivity(fbar)
        if self.options.translation:
            p_i = self.calculate_protein(fbar, k_fbar, Δ)

        # Calculate m_pred
        resolution = τ[1] - τ[0]
        interactions = tf.matmul(w, tfm.log(p_i + 1e-100)) + w_0
        G = tfm.sigmoid(interactions)  # TF Activation Function (sigmoid)
        sum_term = G * tfm.exp(d_j * τ)
        integrals = tf.concat(
            [
                tf.zeros((self.num_replicates, self.num_genes, 1),
                         dtype='float64'),  # Trapezoid rule
                0.5 * resolution *
                tfm.cumsum(sum_term[:, :, :-1] + sum_term[:, :, 1:], axis=2)
            ],
            axis=2)
        exp_dt = tfm.exp(-d_j * τ)
        integrals = tfm.multiply(exp_dt, integrals)

        m_pred = b_j / d_j + s_j * integrals
        if self.options.initial_conditions:
            m_pred += tfm.multiply((a_j - b_j / d_j), exp_dt)
        return m_pred
Exemplo n.º 16
0
    def call(self, alpha_true, logits_pred):
        epsilon = self.epsilon
        alpha_pred = exp(logits_pred)

        KL = lgamma(tf.math.reduce_sum(alpha_pred)) - tf.math.reduce_sum(
            lgamma(alpha_pred + epsilon)) - lgamma(
                tf.math.reduce_sum(alpha_true)) + tf.math.reduce_sum(
                    lgamma(alpha_true + epsilon)) + tf.math.reduce_sum(
                        (alpha_pred - alpha_true) *
                        (digamma(alpha_pred + epsilon) -
                         digamma(tf.math.reduce_sum(alpha_pred))))
        return KL
Exemplo n.º 17
0
    def _convert_to_pxyz(self, x):
        from numpy import cos, sin, sinh, exp, clip, stack
        pt = exp(clip(x[:, 0], -7., 7.)) - 0.1
        pt *= self._pt_scale
        eta = x[:, 1]
        phi = x[:, 2]

        px = pt * cos(phi)
        py = pt * sin(phi)
        pz = pt * sinh(clip(eta, -5, 5))

        return stack([px, py, pz], axis=1)
Exemplo n.º 18
0
    def _convert_to_pxyz(self, x):
        from tensorflow.math import cos, sin, sinh, exp
        from tensorflow import clip_by_value, stack
        pt = exp(clip_by_value(x[:, 0], -7., 7.)) - 0.1
        pt *= self._pt_scale
        eta = x[:, 1]
        phi = x[:, 2]

        px = pt * cos(phi)
        py = pt * sin(phi)
        pz = pt * sinh(clip_by_value(eta, -5, 5))

        return stack([px, py, pz], axis=1)
Exemplo n.º 19
0
def gaussian_log_likelihood(x, mu_x, log_sig_sq_x, SMALL_CONSTANT=1e-5):
    '''
    Element-wise Gaussian log likelihood
    INPUTS:
        x = points
        mu_x - means of Gaussians
        log_sig_sq_x - log variance of Gaussian
    OPTIONAL INPUTS:
        SMALL_CONSTANT - small constant to avoid taking the log of 0 or dividing by 0
    OUTPUTS:
        log_lik - element-wise log likelihood
    '''

    # -E_q(z|x) log(p(x|z))
    normalising_factor = -0.5 * tfm.log(
        SMALL_CONSTANT + tfm.exp(log_sig_sq_x)) - 0.5 * np.log(2.0 * np.pi)
    square_diff_between_mu_and_x = tfm.square(mu_x - x)
    inside_exp = -0.5 * tfm.divide(square_diff_between_mu_and_x,
                                   SMALL_CONSTANT + tfm.exp(log_sig_sq_x))
    log_lik = normalising_factor + inside_exp

    return log_lik
Exemplo n.º 20
0
def kl_normal(mu_1, log_sig_sq_1, mu_2, log_sig_sq_2):
    '''
    Element-wise KL divergence between two normal distributions
    INPUTS:
        mu_1 - mean of firat distribution
        log_sig_sq_1 - log variance of first diatribution
        mu_2 - mean of second distribution
        log_sig_sq_2 - log variance of second diatribution
    OUTPUTS:
        KL - element-wise KL divergence
    '''

    v_mean = mu_2  #2
    aux_mean = mu_1  #1
    v_log_sig_sq = log_sig_sq_2  #2
    aux_log_sig_sq = log_sig_sq_1  #1
    v_log_sig = tfm.log(tfm.sqrt(tfm.exp(v_log_sig_sq)))  #2
    aux_log_sig = tfm.log(tfm.sqrt(tfm.exp(aux_log_sig_sq)))  #1
    KL = v_log_sig - aux_log_sig + tf.divide(
        tfm.exp(aux_log_sig_sq) + tfm.square(aux_mean - v_mean),
        2.0 * tfm.exp(v_log_sig_sq)) - 0.5

    return KL
Exemplo n.º 21
0
def reparameterisation_trick(mu, log_sig_sq):
    '''
    Sample from Gaussian such that it stays differentiable
    INPUTS:
        mu - mean of distribution
        log_sig_sq - log variance of diatribution
    OUTPUTS:
        samp - sample from distribution
    '''

    eps = tf.random.normal([tf.shape(mu)[0], tf.shape(mu)[1]],
                           0,
                           1.,
                           dtype=tf.float32)
    samp = tfm.add(mu, tfm.multiply(tfm.sqrt(tfm.exp(log_sig_sq)), eps))

    return samp
Exemplo n.º 22
0
        def proceed():
            num_tfs = current_state.shape[0]
            new_state = current_state
            Δrange = np.arange(self.lower, self.upper + 1, dtype='float64')
            Δrange_tf = tf.range(self.lower, self.upper + 1, dtype='float64')
            for i in range(num_tfs):
                # Generate normalised cumulative distribution
                probs = list()
                mask = np.zeros((num_tfs, ), dtype='float64')
                mask[i] = 1

                for Δ in Δrange:
                    test_state = (1 - mask) * new_state + mask * Δ

                    # if j == 0:
                    #     cumsum.append(tf.reduce_sum(self.likelihood.genes(
                    #         all_states=all_states,
                    #         state_indices=self.state_indices,
                    #         Δ=test_state,
                    #     )) + tf.reduce_sum(self.prior.log_prob(Δ)))
                    # else:

                    probs.append(
                        tf.reduce_sum(
                            self.likelihood.genes(
                                all_states=all_states,
                                state_indices=self.state_indices,
                                Δ=test_state,
                            )) + tf.reduce_sum(self.prior.log_prob(Δ)))
                # curri = tf.cast(current_state[i], 'int64')
                # start_index = tf.reduce_max([self.lower, curri-2])
                # probs = tf.gather(probs, tf.range(start_index,
                #                                   tf.reduce_min([self.upper+1, curri+3])))

                probs = tf.stack(probs) - tfm.reduce_max(probs)
                probs = tfm.exp(probs)
                probs = probs / tfm.reduce_sum(probs)
                cumsum = tfm.cumsum(probs)
                u = tf.random.uniform([], dtype='float64')
                index = tf.where(
                    cumsum == tf.reduce_min(cumsum[(cumsum - u) > 0]))
                chosen = Δrange_tf[index[0][0]]
                new_state = (1 - mask) * new_state + mask * chosen
            return new_state
Exemplo n.º 23
0
def train(epoch, model):
    # LEARNING_RATE = lr / tf.math((1 + 10 * (epoch - 1) / epochs), 0.75)
    # print('learning rate{: .4f}'.format(LEARNING_RATE))

    optimizer = tf.train.AdamOptimizer(0.01,
                                       beta1=0.9,
                                       beta2=0.9,
                                       epsilon=l2_decay)

    iter_source = iter(source_loader)
    iter_target = iter(target_train_loader)
    num_iter = len_source_loader
    for i in range(1, num_iter):
        data_source, label_source = iter_source.get_next()
        data_target, _ = iter_target.get_next()
        if i % len_target_loader == 0:
            iter_target = iter(target_train_loader)
        if cuda:
            data_source, label_source = data_source.cuda(), label_source.cuda(
            )  # tf.matmul(data_source)
            data_target = data_target.cuda()  # tf.matmul(data_target)
        data_source, label_source = tf.Variable(data_source), tf.Variable(
            label_source)
        data_target = tf.Variable(data_target)

        label_source_pred, loss_mmd = model(data_source, data_target)
        loss_cls = tf.losses.sparse_softmax_cross_entropy(
            label_source_pred, label_source)
        gamma = 2 / (1 + math.exp(-10 * (epoch) / epochs)) - 1
        loss = loss_cls + gamma * loss_mmd

        if i % log_interval == 0:
            print(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tsoft_Loss: {:.6f}\tmmd_Loss: {:.6f}'
                .format(epoch, i * len(data_source), len_source_dataset,
                        100. * i / len_source_loader, loss.data[0],
                        loss_cls.data[0], loss_mmd.data[0]))
    train_op = optimizer.minimize(loss)

    with tf.Session() as sess:
        for i in range(num_batches):
            _, loss_val = sess.run([train_op, loss])
Exemplo n.º 24
0
    def call(self, ensemble_logits, logits):
        '''
        ensemble_logits are the outputs from our ensemble (batch x ensembles x classes)
        logits are the predicted outputs from our model (batch x classes)
        '''
        logits = tf.cast(logits, dtype=tf.float64)
        ensemble_logits = tf.cast(ensemble_logits, dtype=tf.float64)
        alphas = exp(logits / self.temp)

        precision = reduce_sum(alphas, axis=1)  #sum over classes

        ensemble_probs = softmax(ensemble_logits / self.temp,
                                 axis=2)  #softmax over classes
        # Smooth for num. stability:
        probs_mean = 1 / (tf.shape(ensemble_probs)[2]
                          )  #divide by nr of classes
        # Subtract mean, scale down, add mean back)
        ensemble_probs = self.tp_scaling * (ensemble_probs -
                                            probs_mean) + probs_mean

        log_ensemble_probs_geo_mean = reduce_mean(log(ensemble_probs +
                                                      self.smooth_val),
                                                  axis=1)  #mean over ensembles

        target_independent_term = reduce_sum(
            lgamma(alphas + self.smooth_val), axis=1) - lgamma(
                precision + self.smooth_val
            )  #sum over lgammma of classes - lgamma(precision)

        target_dependent_term = -reduce_sum(
            (alphas - 1.) * log_ensemble_probs_geo_mean,
            axis=1)  # -sum over classes

        cost = target_dependent_term + target_independent_term
        # tf.print(self.temp)
        return reduce_mean(cost) * (self.temp**2)  #mean of all batches
Exemplo n.º 25
0
def eight_schools_log_pdf(z, centered=EIGHT_SCHOOL_CENTERED):
    prior_mu = tfd.Normal(loc=0, scale=5)
    prior_tau = tfd.HalfCauchy(loc=0, scale=5)

    mu, log_tau = z[:, -2], z[:, -1]
    # Adapt size of mu an tau.
    mu = tf.transpose(eight_schools_replicate * mu)
    log_tau = tf.transpose(eight_schools_replicate * log_tau)

    if centered:
        # shapes, thetas=(8,N), mu=(N,), tau=(N,)
        thetas = z[:, 0:eight_schools_K]

        likelihood = tfd.Normal(loc=thetas,
                                scale=eight_schools_sigma[0:eight_schools_K])
        prior_theta = tfd.Normal(loc=mu, scale=math.exp(log_tau))
        log_det_jac = math.log(math.exp(
            log_tau))  # kept log(exp()) for mathematical understanding.

        return likelihood.log_prob(
            eight_schools_y[0:eight_schools_K]) + prior_theta.log_prob(
                thetas) + prior_mu.log_prob(mu) + prior_tau.log_prob(
                    math.exp(log_tau)) + log_det_jac

    else:
        # shapes, thetas=(8,N), mu=(N,), tau=(N,)
        thetas_tilde = z[:, 0:eight_schools_K]

        zeros = tf.zeros(mu.shape)
        ones = tf.ones(log_tau.shape)

        thetas = mu + thetas_tilde * math.exp(log_tau)

        likelihood = tfd.Normal(loc=thetas,
                                scale=eight_schools_sigma[0:eight_schools_K])
        prior_theta = tfd.Normal(loc=zeros, scale=ones)
        log_det_jac = math.log(math.exp(
            log_tau))  # kept log(exp()) for mathematical understanding.

        return likelihood.log_prob(
            eight_schools_y[0:eight_schools_K]) + prior_theta.log_prob(
                thetas_tilde) + prior_mu.log_prob(mu) + prior_tau.log_prob(
                    math.exp(log_tau)) + log_det_jac
Exemplo n.º 26
0
def pdf_2D(z, density_name=''):
    assert density_name in AVAILABLE_2D_DISTRIBUTIONS, "Incorrect density name."
    if density_name == '':
        return 1
    elif density_name == 'banana':
        z1, z2 = z[:, 0], z[:, 1]
        mu = np.array([0.5, 0.5], dtype='float32')
        cov = np.array([[0.06, 0.055], [0.055, 0.06]], dtype='float32')
        scale = tf.linalg.cholesky(cov)
        p = tfd.MultivariateNormalTriL(loc=mu, scale_tril=scale)
        z2 = z1**2 + z2
        z1, z2 = tf.expand_dims(z1, 1), tf.expand_dims(z2, 1)
        z = tf.concat([z1, z2], axis=1)
        return p.prob(z)
    elif density_name == 'circle':
        z1, z2 = z[:, 0], z[:, 1]
        norm = (z1**2 + z2**2)**0.5
        exp1 = math.exp(-0.2 * ((z1 - 2) / 0.8)**2)
        exp2 = math.exp(-0.2 * ((z1 + 2) / 0.8)**2)
        u = 0.5 * ((norm - 4) / 0.4)**2 - math.log(exp1 + exp2)
        return math.exp(-u)
    elif density_name == 'eight_schools':
        y_i = 0
        sigma_i = 10
        thetas, mu, log_tau = z[:, 0], z[:, 1], z[:, 2]
        likelihood = tfd.Normal(loc=thetas, scale=sigma_i)
        prior_theta = tfd.Normal(loc=mu, scale=math.exp(log_tau))
        prior_mu = tfd.Normal(loc=0, scale=5)
        prior_tau = tfd.HalfCauchy(loc=0, scale=5)
        return likelihood.prob(y_i) * prior_theta.prob(thetas) * prior_mu.prob(
            mu) * prior_tau.prob(math.exp(log_tau)) * math.exp(log_tau)
    elif density_name == 'figure_eight':
        mu1 = 1 * np.array([-1, -1], dtype='float32')
        mu2 = 1 * np.array([1, 1], dtype='float32')
        scale = 0.45 * np.array([1, 1], dtype='float32')
        pi = 0.5
        comp1 = tfd.MultivariateNormalDiag(loc=mu1, scale_diag=scale)
        comp2 = tfd.MultivariateNormalDiag(loc=mu2, scale_diag=scale)
        return (1 - pi) * comp1.prob(z) + pi * comp2.prob(z)
Exemplo n.º 27
0
if not shared:
	# weight loss
	a_list = []
	b_list = []
	with tf.variable_scope('weight_regulizer'):
		for i in range(nb_cnn+2):
			a_list.append(tf.Variable(1.0, name='a_{}'.format(i)))
			b_list.append(tf.Variable(0.0, name='b_{}'.format(i)))
	# source kernel and target kernel
	source_kernels = [v for v in tf.trainable_variables('source') if 'kernel' in v.name]
	target_kernels = [v for v in tf.trainable_variables('target') if 'kernel' in v.name]
	source_bias = [v for v in tf.trainable_variables('source') if 'bias' in v.name]
	target_bias = [v for v in tf.trainable_variables('target') if 'bias' in v.name]
	layer_loss_list = []
	for a, b, sk, tk, sb, tb in list(zip(a_list[:-1], b_list[:-1], source_kernels[:-1], target_kernels[:-1], source_bias[:-1], target_bias[:-1])):
		layer_loss_list.append(tm.exp(tf.nn.l2_loss(tm.scalar_mul(a, sk) + b - tk)) -1)
		layer_loss_list.append(tm.exp(tf.nn.l2_loss(tm.scalar_mul(a, sb) + b - tb)) -1)
	# 	layer_loss_list.append(tm.exp(tf.nn.l2_loss(tm.subtract(tm.add(tm.scalar_mul(a, sb), b), tb)))-1)
	# source bais and target bais
	w_loss = tf.add_n(layer_loss_list)
	total_loss = total_loss + w_loss
	gen_step = tf.train.AdamOptimizer(lr).minimize(total_loss, var_list = target_vars_list + source_vars_list + tf.trainable_variables('weight_regulizer'))
else:
	gen_step = tf.train.AdamOptimizer(lr).minimize(total_loss, var_list = target_vars_list)

D_loss_list = []
sC_loss_list = []
tC_loss_list = []
test_auc_list = []
val_auc_list = []
train_auc_list = []
Exemplo n.º 28
0
def calculate_score_loss(score, y_k):
    x = tf.gather(tf.gather(score, 0), 0)
    return log(1 + exp(-y_k * x))
Exemplo n.º 29
0
def positivity(f_i):
    return tfm.log(tfm.exp(f_i) - 1)
Exemplo n.º 30
0
def calculate_segment_loss(segment, true_mask):
    prod = tf.multiply(segment, true_mask)
    unit_val = log(1 + exp(-prod))
    return tf.reduce_sum(unit_val)