Esempio n. 1
0
    def call(self, inputs, mask=None):
        # Builds input
        x, y = tf.split(inputs, num_or_size_splits=2, axis=1)
        x2 = tfm.square(x)
        y2 = tfm.square(y)
        xy = tfm.multiply(x, y)

        quad_inputs = tf.stack(
            [x2, xy, y2, x, y, tf.ones((tf.shape(x)))], axis=1)
        quad_outputs = tf.squeeze(tf.matmul(self.coeffs, quad_inputs),
                                  axis=[1])

        return quad_outputs
Esempio n. 2
0
    def call(self, inputs, mask=None):
        # Builds input
        x, y = tf.split(inputs, num_or_size_splits=2, axis=1)
        x2 = tfm.square(x)
        y2 = tfm.square(y)
        xy = tfm.multiply(x, y)

        quad_inputs = tf.stack(
            [x2, xy, y2, x, y, tf.ones((tf.shape(x)))], axis=1)
        quad_outputs = tf.reduce_sum(
            tf.multiply(self.coeffs, tf.transpose(quad_inputs,
                                                  perm=(0, 2, 1))), 2)

        return quad_outputs
Esempio n. 3
0
def loss_funcxpos2(y_reco, y_true, re=False):
    from tensorflow.math import sin, cos, acos, abs, reduce_mean, subtract, square
    # Energy loss
    loss_energy = reduce_mean(abs(subtract(y_reco[:,0], y_true[:,0]))) #this works well but could maybe be improved

    zeni = [cos(y_true[:,1]) - y_reco[:,1] , 
            sin(y_true[:,1]) - y_reco[:,2]]

    azi  = [cos(y_true[:,2]) - y_reco[:,3] , 
            sin(y_true[:,2]) - y_reco[:,4]]

    loss_angle = reduce_mean(square(azi[0]))+reduce_mean(square(azi[1]))+reduce_mean(square(zeni[0]))+reduce_mean(square(zeni[1]))
    if not re:
        return loss_energy+loss_angle
    else:   
        return float(loss_energy+loss_angle), [float(loss_energy), float(loss_angle)]
Esempio n. 4
0
def root_sum_squared_error(inputs):

    #if K.ndim(y_true) > 2:
    #	return K.mean(K.sqrt(K.sum(K.square(y_true - y_pred),
    #				axis=K.arange(1, K.ndim(y_true)) )))
    #else:
    return tf_math.sqrt(
        tf_math.reduce_sum(tf_math.square(inputs[0] - inputs[1]),
                           axis=(1, 2, 3, 4)))
Esempio n. 5
0
    def __call__(self, y_true, y_pred):
        from numpy import square, mean, reshape, concatenate, expand_dims

        assert (y_true.shape[-1] == 3 * 2)
        assert (y_pred.shape[-1] == 3 * 2)
        y_true = reshape(y_true, [-1, 3])
        y_pred = reshape(y_pred, [-1, 3])

        if self._use_pxyz:
            y_true = self._convert_to_pxyz(y_true)
            y_pred = self._convert_to_pxyz(y_pred)
            return mean(square(y_true - y_pred))
        else:
            d_sq = square(y_true - y_pred)
            d_phi = square(_delta_phi_np(y_true[:, 2], y_pred[:, 2]))
            d_phi = expand_dims(d_phi, axis=-1)
            diff = concatenate([d_sq[:, 0:2], d_phi], axis=-1)
            return mean(diff)
Esempio n. 6
0
 def rbf(self, v, l2):
     if self.options.kernel_exponential:
         v = tf.exp(v)
         l2 = tf.exp(l2)
     sq_dist = tf.divide(tfm.square(self.t_dist),
                         tf.reshape(2 * l2, (-1, 1, 1)))
     K = tf.reshape(v, (-1, 1, 1)) * tfm.exp(-sq_dist)
     m = tf.zeros((self.N_p), dtype='float64')
     return m, K
Esempio n. 7
0
def compute_gaussian_kl(z_log_var, z_mean):
    """ Compute the KL divergence between a Gaussian and a Normal distribution. Based on Locatello et al.
    implementation (https://github.com/google-research/disentanglement_lib)

    :param z_log_var: the log variance of the Gaussian
    :param z_mean: the mean of the Gaussian
    :return: the KL divergence
    """
    kl_loss = tfm.square(z_mean) + tfm.exp(z_log_var) - z_log_var - 1
    return 0.5 * tfm.reduce_sum(kl_loss, [1])
Esempio n. 8
0
def compute_gaussian_log_pdf(z, z_mean, z_log_var):
    """ Compute the log probability density of a Gaussian distribution. Based on Locatello et al. implementation
    (https://github.com/google-research/disentanglement_lib)

    :param z: the sampled values
    :param z_mean: the mean of the Gaussian
    :param z_log_var: the log variance of the Gaussian
    :return: the log probability density
    """
    log2pi = tfm.log(2. * tf.constant(pi))
    return -0.5 * (tfm.square(z - z_mean) * tfm.exp(-z_log_var) + z_log_var + log2pi)
Esempio n. 9
0
    def __call__(self, y_true, y_pred):
        from tensorflow.math import square, reduce_mean
        from tensorflow import reshape, concat, expand_dims
        from tensorflow.keras import backend as K

        assert (K.int_shape(y_true)[-1] == 3 * 2)
        assert (K.int_shape(y_pred)[-1] == 3 * 2)
        y_true = reshape(y_true, [-1, 3])
        y_pred = reshape(y_pred, [-1, 3])

        if self._use_pxyz:
            y_true = self._convert_to_pxyz(y_true)
            y_pred = self._convert_to_pxyz(y_pred)
            return reduce_mean(square(y_true - y_pred))
        else:
            d_sq = square(y_true - y_pred)
            d_phi = square(_delta_phi_tf(y_true[:, 2], y_pred[:, 2]))
            d_phi = expand_dims(d_phi, axis=-1)
            diff = concat([d_sq[:, 0:2], d_phi], axis=-1)
            return reduce_mean(diff)
Esempio n. 10
0
    def _genes(self, fbar, kbar, k_fbar, wbar, w_0bar, σ2_m, Δ):
        m_pred = self.predict_m(kbar, k_fbar, wbar, fbar, w_0bar, Δ)
        sq_diff = tfm.square(self.data.m_obs - tf.transpose(
            tf.gather(tf.transpose(m_pred), self.data.common_indices)))

        variance = tf.reshape(σ2_m, (-1, 1))
        if self.preprocessing_variance:
            variance = logit(
                variance) + self.data.σ2_m_pre  # add PUMA variance
        log_lik = -0.5 * tfm.log(2 * PI * variance) - 0.5 * sq_diff / variance
        log_lik = tf.reduce_sum(log_lik)
        return log_lik
Esempio n. 11
0
    def call(self, inputs, mask=None):
        # Saves dimensions to make code nicer
        batch_size = inputs.shape[0]
        height = inputs.shape[1]
        width = inputs.shape[2]
        num_filters = inputs.shape[3]

        # Reshapes so last 2 dimensions are a single filter
        inputs = tf.transpose(inputs, [0, 3, 1, 2])

        # Reshapes into columns for x,y
        inputs = tf.reshape(inputs, [batch_size, num_filters, -1, 2])

        # Transposes to get correct dimensions for matmul
        inputs = tf.transpose(inputs, [0, 1, 3, 2])

        # Splits tensor into x & y
        x, y = tf.split(inputs, 2, 2)

        # Calculates other components of quadratic input
        x2 = tfm.square(x)
        y2 = tfm.square(y)
        xy = tfm.multiply(x, y)

        # Builds quadratic input
        quad_input = tf.concat(
            [x2, xy, y2, x, y, tf.ones((tf.shape(x)))], axis=2)

        # Matmul -> Quadratic output
        quad_output = tf.matmul(self.coeffs, quad_input)

        # Reshapes back to original size with width / 2
        quad_output = tf.reshape(
            quad_output, [batch_size, num_filters, height,
                          int(width / 2)])

        # Rearranges axis to have num_filters last (as CONV2D expects)
        quad_output = tf.transpose(quad_output, [0, 2, 3, 1])

        return quad_output
Esempio n. 12
0
 def call(self, x):
     input_shape = x.shape.as_list()
     axis = tuple(
         range(0 if self.batch else 1,
               len(input_shape) if input_shape else 4))
     if self.mean:
         x = x - tfm.reduce_mean(x, axis=axis, keepdims=True)
         if self.std:
             std = tfm.sqrt(
                 tfm.reduce_mean(tfm.square(x), axis=axis, keepdims=True))
     elif self.std:
         std = tfm.reduce_std(x, axis=axis, keepdims=True)
     return x / ((self.eps + std) if self.eps else std) if self.std else x
Esempio n. 13
0
    def tfs(self, σ2_f, fbar):
        '''
        Computes log-likelihood of the transcription factors.
        '''
        # assert self.options.tf_mrna_present
        if not self.preprocessing_variance:
            variance = tf.reshape(σ2_f, (-1, 1))
        else:
            variance = self.data.σ2_f_pre
        f_pred = inverse_positivity(fbar)
        sq_diff = tfm.square(self.data.f_obs - tf.transpose(
            tf.gather(tf.transpose(f_pred), self.data.common_indices)))
        log_lik = -0.5 * tfm.log(2 * PI * variance) - 0.5 * sq_diff / variance
        log_lik = tf.reduce_sum(log_lik)

        return log_lik
Esempio n. 14
0
def psnr(y_label, y_pred):
    """
    PSNR is Peek Signal to Noise Ratio, which is similar to mean squared error.

    It can be calculated as
    PSNR = 20 * log10(MAXp) - 10 * log10(MSE)

    When providing an unscaled input, MAXp = 255. Therefore 20 * log10(255)== 48.1308036087.
    However, since we are scaling our input, MAXp = 1. Therefore 20 * log10(1) = 0.
    Thus we remove that component completely and only compute the remaining MSE component.
    """
    _result = subtract(y_label, y_pred)
    _result = square(_result)
    _result = tf_mean(_result)
    _result = multiply(-10., log(_result, 10.))
    return _result
Esempio n. 15
0
    def mappable_mse_loss(self, y_true, y_pred):
        """

        a mappable tensorflow function that calculates the loss caused by mse (non PC errors

        :param y_true: true y
        :param y_pred: predicted y
        :return: mse loss of the function
        """

        H, W, C = y_true.shape

        output_channels = C // self.anchor_boxes

        # compute the log loss
        log_error = self.log_error(y_true, y_pred)
        squared_error = math.square(y_pred - y_true)

        # mask out the log error and mse
        log_mask = tf.concat([tf.zeros((H, W, 1)),
                              tf.ones((H, W, 2)),
                              tf.zeros((H, W, 2)),
                              tf.ones((H, W, output_channels - 5))], axis=-1)

        mse_mask = tf.concat([tf.zeros((H, W, 1)),
                              tf.zeros((H, W, 2)),
                              tf.ones((H, W, 2)),
                              tf.zeros((H, W, output_channels - 5))], axis=-1)

        log_mask = tf.concat([log_mask for i in range(self.anchor_boxes)],
                             axis=-1)

        mse_mask = tf.concat([mse_mask for i in range(self.anchor_boxes)],
                             axis=-1)

        error = tf.math.multiply_no_nan(squared_error, mse_mask) + \
                tf.math.multiply_no_nan(log_error, log_mask)

        # remove the first term of the error

        # reshape y
        y_true_first_term = tf.reshape(y_true, shape=y_true.shape + (1,))

        raw_squared_error = tf.multiply(error, y_true_first_term[:, :, 0, :])
        # tf.print(raw_squared_error)

        return self.mse_lambda * raw_squared_error
Esempio n. 16
0
def gaussian_log_likelihood(x, mu_x, log_sig_sq_x, SMALL_CONSTANT=1e-5):
    '''
    Element-wise Gaussian log likelihood
    INPUTS:
        x = points
        mu_x - means of Gaussians
        log_sig_sq_x - log variance of Gaussian
    OPTIONAL INPUTS:
        SMALL_CONSTANT - small constant to avoid taking the log of 0 or dividing by 0
    OUTPUTS:
        log_lik - element-wise log likelihood
    '''

    # -E_q(z|x) log(p(x|z))
    normalising_factor = -0.5 * tfm.log(
        SMALL_CONSTANT + tfm.exp(log_sig_sq_x)) - 0.5 * np.log(2.0 * np.pi)
    square_diff_between_mu_and_x = tfm.square(mu_x - x)
    inside_exp = -0.5 * tfm.divide(square_diff_between_mu_and_x,
                                   SMALL_CONSTANT + tfm.exp(log_sig_sq_x))
    log_lik = normalising_factor + inside_exp

    return log_lik
Esempio n. 17
0
    def __call__(self, x, probes):
        """Propagate forward in time for the length of the input.

        Parameters
        ----------
        x :
            Input sequence(s), batched in first dimension
        probe_output : bool
            Defines whether the output is the probe vector or the entire spatial
            distribution of the scalar wave field in time
        """
        # hacky way of figuring out if we're on the GPU from inside the model
        device = "cuda" if next(self.parameters()).is_cuda else "cpu"

        # First dim is batch
        batch_size = x.shape[0]

        # init hidden states
        y1 = tf.zeros([batch_size, self.Nx, self.Ny], dtype=tf.dtypes.float32)
        y2 = tf.zeros([batch_size, self.Nx, self.Ny], dtype=tf.dtypes.float32)
        y_all = []

        for xi in x:
            y, y1 = self.time_step(xi, y1, y2)
            y_all.append(y)

        y = tf.stack(y_all, axis=1)
        total_sum = 0
        y_outs = []
        for probe_crd in probes:
            px, py = probe_crd
            y_out = math.reduce_sum(math.square(y[:,:,px,py]))
            total_sum += y_out
            y_outs.append(y_out)

        y_outs = tf.constant(y_outs) / total_sum

        return y_outs
Esempio n. 18
0
def kl_normal(mu_1, log_sig_sq_1, mu_2, log_sig_sq_2):
    '''
    Element-wise KL divergence between two normal distributions
    INPUTS:
        mu_1 - mean of firat distribution
        log_sig_sq_1 - log variance of first diatribution
        mu_2 - mean of second distribution
        log_sig_sq_2 - log variance of second diatribution
    OUTPUTS:
        KL - element-wise KL divergence
    '''

    v_mean = mu_2  #2
    aux_mean = mu_1  #1
    v_log_sig_sq = log_sig_sq_2  #2
    aux_log_sig_sq = log_sig_sq_1  #1
    v_log_sig = tfm.log(tfm.sqrt(tfm.exp(v_log_sig_sq)))  #2
    aux_log_sig = tfm.log(tfm.sqrt(tfm.exp(aux_log_sig_sq)))  #1
    KL = v_log_sig - aux_log_sig + tf.divide(
        tfm.exp(aux_log_sig_sq) + tfm.square(aux_mean - v_mean),
        2.0 * tfm.exp(v_log_sig_sq)) - 0.5

    return KL
Esempio n. 19
0
 def m_sq_diff_fn(all_states):
     fbar, k_fbar, kbar, wbar, w_0bar, σ2_m, Δ = self.likelihood.get_parameters_from_state(all_states, self.state_indices)
     m_pred = self.likelihood.predict_m(kbar, k_fbar, wbar, fbar, w_0bar, Δ)
     sq_diff = tfm.square(self.data.m_obs - tf.transpose(tf.gather(tf.transpose(m_pred),self.data.common_indices)))
     return tf.reduce_sum(sq_diff, axis=0)
Esempio n. 20
0
 def f_sq_diff_fn(all_states):
     f_pred = inverse_positivity(all_states[self.state_indices['latents']][0])
     sq_diff = tfm.square(self.data.f_obs - tf.transpose(tf.gather(tf.transpose(f_pred),self.data.common_indices)))
     return tf.reduce_sum(sq_diff, axis=0)
Esempio n. 21
0
def disc_train_step(
    real_batch,
    label,
    noise_batch,
    step,
    eps=EPS,
):
    """
    Discriminator training step. So far only supports the relavg_gp loss.
    # TODO: abstract out loss and support more types of losses.

    Args:
        real_batch: np.array (batch_size, x, y, ch)
            Batch of randomly sampled real images.
        label: (batch_size, n_classes)
            Batch of labels corresponding to the randomly sampled reals above.
        noise_batch: np.array (batch_size, latent_dim)
            Batch of random latents.
        eps: tf float
            Constant to keep the log function happy.
    """
    gp_strength = tf.constant(args.gp_wt, dtype=tf.float32)

    with tf.GradientTape() as disc_tape:
        # Generate fake images to feed discriminator:
        fake_batch = generator([noise_batch, label], training=True)

        # Get discriminator logits on real images and fake images:
        # Could use different labels for fakes too. Doesn't make a
        # noticeable difference.
        disc_opinion_real = discriminator([real_batch, label], training=True)
        disc_opinion_fake = discriminator([fake_batch, label], training=True)

        # Get output for relativistic average losses:
        real_fake_rel_avg_opinion = (disc_opinion_real -
                                     tf.reduce_mean(disc_opinion_fake, axis=0))

        fake_real_rel_avg_opinion = (disc_opinion_fake -
                                     tf.reduce_mean(disc_opinion_real, axis=0))

        # Get loss:
        disc_loss = tf.reduce_mean(-tf.reduce_mean(
            tfm.log(tfm.sigmoid(real_fake_rel_avg_opinion) + eps),
            axis=0,
        ) - tf.reduce_mean(
            tfm.log(1 - tfm.sigmoid(fake_real_rel_avg_opinion) + eps),
            axis=0,
        ))

        # Get gradient penalty:
        new_real_batch = 1.0 * real_batch
        new_label = 1.0 * label
        with tf.GradientTape() as gp_tape:
            gp_tape.watch(new_real_batch)
            disc_opinion_real_new = discriminator(
                [new_real_batch, new_label],
                training=True,
            )

        grad = gp_tape.gradient(disc_opinion_real_new, new_real_batch)
        grad_sqr = tfm.square(grad)
        grad_sqr_sum = tf.reduce_sum(
            grad_sqr,
            axis=np.arange(1, len(grad_sqr.shape)),
        )
        gradient_penalty = (gp_strength / 2.0) * tf.reduce_mean(grad_sqr_sum)
        total_disc_loss = disc_loss + gradient_penalty

    # Get gradients and update discriminator:
    discriminator_gradients = disc_tape.gradient(
        total_disc_loss,
        discriminator.trainable_variables,
    )

    doptim.apply_gradients(
        zip(discriminator_gradients, discriminator.trainable_variables), )

    with summary_writer.as_default():
        tf.summary.scalar('losses/d_loss', disc_loss, step=step)
        tf.summary.scalar('regularizers/GP', gradient_penalty, step=step)
Esempio n. 22
0
def cart2pol(x, y):
    theta = tfm.atan2(y, x)
    rho = tfm.sqrt(tfm.add(tfm.square(x), tfm.square(y)))
    return (theta, rho)
Esempio n. 23
0
def snake_(X, beta):

    return X + (1 / beta) * math.square(math.sin(beta * X))