Esempio n. 1
0
def tf_nse_alpha(true, predicted, name='nse_alpha'):
    """
    Alpha decomposition of NSE. See Gupta et. al 2009
    used in kratzert et al., 2018
    It is being subtracted from 1.0
    """
    const = tf.constant(1.0, dtype=tf.float32)
    nse_alpha = K.std(predicted) / K.std(true)
    return tf.subtract(const, nse_alpha, name=name + '_LOSS')
Esempio n. 2
0
 def plcc_dist_tf(x, y):
     scores = K.constant(scores_array)
     xm = K.sum((x / K.reshape(K.sum(x, 1), [-1, 1])) * scores, 1)
     ym = K.sum((y / K.reshape(K.sum(y, 1), [-1, 1])) * scores, 1)
     x_sd = K.std(xm)
     y_sd = K.std(ym)
     xm_center = xm - K.mean(xm)
     ym_center = ym - K.mean(ym)
     return K.mean(xm_center * ym_center) / (x_sd * y_sd + 1e-3)
Esempio n. 3
0
 def call(self, x, mask=None):
     if self.axis == -1:
         mean = K.mean(x, axis=[3, 2, 1, 0], keepdims=True)
         std = K.std(x, axis=[3, 2, 1, 0], keepdims=True)
     elif self.axis in (0, 1, 2, 3):
         all_dims = [0, 1, 2, 3]
         del all_dims[self.axis]
         mean = K.mean(x, axis=all_dims, keepdims=True)
         std = K.std(x, axis=all_dims, keepdims=True)
     return (x - mean) / (std + self.eps)
Esempio n. 4
0
def pearson_correlation(y_true, y_pred):
    # Pearson's correlation coefficient = covariance(X, Y) / (stdv(X) * stdv(Y))
    fs_pred = y_pred - K.mean(y_pred)
    fs_true = y_true - K.mean(y_true)
    covariance = K.mean(fs_true * fs_pred)
    
    stdv_true = K.std(y_true)
    stdv_pred = K.std(y_pred)
    
    return covariance / (stdv_true * stdv_pred)
def corr(y_true, y_pred):
    #
    # This function calculates the correlation between the true and the predicted outputs
    #
    num1 = y_true - K.mean(y_true, axis=0)
    num2 = y_pred - K.mean(y_pred, axis=0)

    num = K.mean(num1 * num2, axis=0)
    den = K.std(y_true, axis=0) * K.std(y_pred, axis=0)

    return K.mean(num / den)
Esempio n. 6
0
    def call(self, inputs, **kwargs):
        """
        Student t-distribution kernel, probability of assigning encoded sequence i to cluster k.
            q_{ik} = (1 + dist(z_i, m_k)^2)^{-1} / normalization.

        Arguments:
            inputs: encoded input sequences, shape=(n_samples, timesteps, n_features)
        Return:
            q: soft labels for each sample. shape=(n_samples, n_clusters)
        """
        if self.dist_metric == 'eucl':
            distance = K.sum(K.sqrt(
                K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters),
                      axis=2)),
                             axis=-1)
        elif self.dist_metric == 'cid':
            ce_x = K.sqrt(
                K.sum(K.square(inputs[:, 1:, :] - inputs[:, :-1, :]),
                      axis=1))  # shape (n_samples, n_features)
            ce_w = K.sqrt(
                K.sum(K.square(self.clusters[:, 1:, :] -
                               self.clusters[:, :-1, :]),
                      axis=1))  # shape (n_clusters, n_features)
            ce = K.maximum(K.expand_dims(ce_x, axis=1), ce_w) / K.minimum(
                K.expand_dims(ce_x, axis=1),
                ce_w)  # shape (n_samples, n_clusters, n_features)
            ed = K.sqrt(
                K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters),
                      axis=2))  # shape (n_samples, n_clusters, n_features)
            distance = K.sum(ed * ce, axis=-1)  # shape (n_samples, n_clusters)
        elif self.dist_metric == 'cor':
            inputs_norm = (inputs - K.expand_dims(
                K.mean(inputs, axis=1), axis=1)) / K.expand_dims(
                    K.std(inputs, axis=1),
                    axis=1)  # shape (n_samples, timesteps, n_features)
            clusters_norm = (self.clusters - K.expand_dims(
                K.mean(self.clusters, axis=1), axis=1)) / K.expand_dims(
                    K.std(self.clusters, axis=1),
                    axis=1)  # shape (n_clusters, timesteps, n_features)
            pcc = K.mean(K.expand_dims(inputs_norm, axis=1) * clusters_norm,
                         axis=2)  # Pearson correlation coefficients
            distance = K.sum(
                K.sqrt(2.0 * (1.0 - pcc)), axis=-1
            )  # correlation-based similarities, shape (n_samples, n_clusters)
        elif self.dist_metric == 'acf':
            raise NotImplementedError
        else:
            raise ValueError('Available distances are eucl, cid, cor and acf!')
        q = 1.0 / (1.0 + K.square(distance) / self.alpha)
        q **= (self.alpha + 1.0) / 2.0
        q = K.transpose(K.transpose(q) / K.sum(q, axis=1))
        return q
Esempio n. 7
0
def _normalize(image,normalize_type=1):
    #normalize_type = 1: global , range = [-1,1]
    #normalize_type = 2: per channel, range = [0,1]
    #
    new_img = image
    if normalize_type == 1:
        new_img = new_img - tf.reduce_mean(new_img)
        new_img = new_img / K.std(new_img)
    else:
        new_img = new_img - tf.reduce_mean(new_img,axis=(1,2),keepdims=True) #new_img.mean(axis=(1, 2), keepdims=True)
        new_img = new_img / K.std(new_img,axis=(1,2),keepdims=True) #new_img.std(axis=(1, 2), keepdims=True)
        
    return new_img
Esempio n. 8
0
def content_loss(y_true, y_pred, y_rep, sample_weight = None):
    #Normalize prediction
    mean = K.mean(y_pred, axis = [1, 2], keepdims = True)
    std = K.std(y_pred, axis = [1, 2], keepdims = True) + 1e-7
    yp = (y_pred - mean) / std

    #Normalize representation
    mean = K.mean(y_rep, axis = [1, 2], keepdims = True)
    std = K.std(y_rep, axis = [1, 2], keepdims = True) + 1e-7
    yr = (y_rep - mean) / std

    #Find difference in normalized representations
    return K.mean(K.square(yp - yr))
Esempio n. 9
0
def ccc_a(y_true, y_pred):
    """
    Concordance Correlation Coefficient for arousal
    """
    x = y_true[:, 1]
    y = y_pred[:, 1]
    mx = K.mean(x, axis=0)
    my = K.mean(y, axis=0)
    xm, ym = x - mx, y - my
    rho = K.sum(xm * ym) / (K.sqrt(K.sum(xm**2)) * K.sqrt(K.sum(ym**2)))
    x_s = K.std(x)
    y_s = K.std(y)
    ccc = 2 * rho * x_s * y_s / (x_s**2 + y_s**2 + (mx - my)**2)
    return ccc
Esempio n. 10
0
def cc(y_true, y_pred):
    total_prediction = batch_size
    total_cc = 0
    # print(total_prediction);exit()
    for i in range(total_prediction):
        pred = y_pred[i]
        true = y_true[i]

        s_map_norm = (pred - K.mean(pred)) / K.std(pred)
        gt_norm = (true - K.mean(true)) / K.std(true)
        r = K.sum(s_map_norm * gt_norm) / K.sum(
            K.sqrt((s_map_norm * s_map_norm)) * K.sum(gt_norm * gt_norm))
        total_cc += r

    return total_cc / total_prediction
Esempio n. 11
0
    def call(self, inputs, training=None):
        """Call instance normalization."""
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if self.axis is not None:
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed
Esempio n. 12
0
    def call(self, y_true, y_pred):
        """ Return the Gradient Magnitude Similarity Deviation Loss.


        Parameters
        ----------
        y_true: tensor or variable
            The ground truth value
        y_pred: tensor or variable
            The predicted value

        Returns
        -------
        tensor
            The loss value
        """
        true_edge = self._scharr_edges(y_true, True)
        pred_edge = self._scharr_edges(y_pred, True)
        ephsilon = 0.0025
        upper = 2.0 * true_edge * pred_edge
        lower = K.square(true_edge) + K.square(pred_edge)
        gms = (upper + ephsilon) / (lower + ephsilon)
        gmsd = K.std(gms, axis=(1, 2, 3), keepdims=True)
        gmsd = K.squeeze(gmsd, axis=-1)
        return gmsd
Esempio n. 13
0
def nss(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(
        K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)),
                          shape_r_out,
                          axis=-1)),
                                   shape_c_out,
                                   axis=-1)
    y_pred /= max_y_pred
    y_pred_flatten = K.batch_flatten(y_pred)

    y_mean = K.mean(y_pred_flatten, axis=-1)
    y_mean = K.repeat_elements(K.expand_dims(
        K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)),
                          shape_r_out,
                          axis=-1)),
                               shape_c_out,
                               axis=-1)

    y_std = K.std(y_pred_flatten, axis=-1)
    y_std = K.repeat_elements(K.expand_dims(
        K.repeat_elements(K.expand_dims(K.expand_dims(y_std)),
                          shape_r_out,
                          axis=-1)),
                              shape_c_out,
                              axis=-1)

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    return -(K.sum(K.sum(y_true * y_pred, axis=2), axis=2) /
             K.sum(K.sum(y_true, axis=2), axis=2))
Esempio n. 14
0
def gmsd_loss(y_true, y_pred):
    """ Gradient Magnitude Similarity Deviation Loss.

    Improved image quality metric over MS-SSIM with easier calculations

    Parameters
    ----------
    y_true: tensor or variable
        The ground truth value
    y_pred: tensor or variable
        The predicted value

    Returns
    -------
    tensor
        The loss value

    References
    ----------
    http://www4.comp.polyu.edu.hk/~cslzhang/IQA/GMSD/GMSD.htm
    https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf

    """
    true_edge = scharr_edges(y_true, True)
    pred_edge = scharr_edges(y_pred, True)
    ephsilon = 0.0025
    upper = 2.0 * true_edge * pred_edge
    lower = K.square(true_edge) + K.square(pred_edge)
    gms = (upper + ephsilon) / (lower + ephsilon)
    gmsd = K.std(gms, axis=(1, 2, 3), keepdims=True)
    gmsd = K.squeeze(gmsd, axis=-1)
    return gmsd
Esempio n. 15
0
 def call(self, inputs, **kwargs):
     pi = []
     for i in range(self.time_steps):
         # slice
         block = tf.strided_slice(inputs,
                                  begin=[0, i, 0],
                                  end=[
                                      self.batch_size,
                                      i + self.norm_window_size,
                                      self.nb_features
                                  ],
                                  strides=[1, 1, 1])
         # compute mean & standard deviation
         mean = K.mean(block, axis=1, keepdims=False)
         std = K.std(inputs[:, i:i + self.norm_window_size],
                     axis=1,
                     keepdims=False)
         # normlization
         input = tf.strided_slice(
             inputs,
             begin=[0, i + self.norm_window_size - 1, 0],
             end=[
                 self.batch_size, i + self.norm_window_size,
                 self.nb_features
             ],
             strides=[1, 1, 1])
         res = (K.squeeze(input, axis=1) - mean) / (std + K.epsilon())
         pi.append(res)
     return K.stack(pi, axis=1)
Esempio n. 16
0
    def train_step(self, images, style, noise, perform_gp=True, perform_pl=False):

        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            # Get style information
            w_space = []
            pl_lengths = self.pl_mean
            for i in range(len(style)):
                w_space.append(self.GAN.S(style[i]))

            # Generate images
            generated_images = self.GAN.G(w_space + [noise])

            # Discriminate
            real_output = self.GAN.D(images, training=True)
            fake_output = self.GAN.D(generated_images, training=True)

            # Hinge loss function
            gen_loss = K.mean(fake_output)
            divergence = K.mean(K.relu(1 + real_output) +
                                K.relu(1 - fake_output))
            disc_loss = divergence

            if perform_gp:
                # R1 gradient penalty
                disc_loss += gradient_penalty(images, real_output, 10)

            if perform_pl:
                # Slightly adjust W space
                w_space_2 = []
                for i in range(len(style)):
                    std = 0.1 / \
                        (K.std(w_space[i], axis=0, keepdims=True) + 1e-8)
                    w_space_2.append(
                        w_space[i] + K.random_normal(tf.shape(w_space[i])) / (std + 1e-8))

                # Generate from slightly adjusted W space
                pl_images = self.GAN.G(w_space_2 + [noise])

                # Get distance after adjustment (path length)
                delta_g = K.mean(
                    K.square(pl_images - generated_images), axis=[1, 2, 3])
                pl_lengths = delta_g

                if self.pl_mean > 0:
                    gen_loss += K.mean(K.square(pl_lengths - self.pl_mean))

        # Get gradients for respective areas
        gradients_of_generator = gen_tape.gradient(
            gen_loss, self.GAN.GM.trainable_variables)
        gradients_of_discriminator = disc_tape.gradient(
            disc_loss, self.GAN.D.trainable_variables)

        # Apply gradients
        self.GAN.GMO.apply_gradients(
            zip(gradients_of_generator, self.GAN.GM.trainable_variables))
        self.GAN.DMO.apply_gradients(
            zip(gradients_of_discriminator, self.GAN.D.trainable_variables))

        return disc_loss, gen_loss, divergence, pl_lengths
Esempio n. 17
0
    def call(self, batch):
        batch_shape = K.shape(batch)[:-1]
        batch_shape = K.concatenate([batch_shape, (1, )])

        batch_std = K.mean(
            K.std(batch, axis=0, keepdims=True), axis=-1, keepdims=True) + 1e-2

        return tf.zeros(batch_shape) + batch_std
Esempio n. 18
0
def tf_nse_beta(true, predicted, name='nse_beta'):
    """
    Beta decomposition of NSE. See Gupta et. al 2009
    used in kratzert et al., 2018
    """
    const = tf.constant(1.0, dtype=tf.float32)
    nse_beta = (K.mean(predicted) - K.mean(true)) / K.std(true)
    return tf.subtract(const, nse_beta, name=name + '_LOSS')
Esempio n. 19
0
def ccc_error(y_true, y_pred):
    true_mean = K.mean(y_true)
    true_variance = K.var(y_true)
    pred_mean = K.mean(y_pred)
    pred_variance = K.var(y_pred)

    x = y_true - true_mean
    y = y_pred - pred_mean
    rho = K.sum(x * y) / (K.sqrt(K.sum(x**2) * K.sum(y**2)) + K.epsilon())

    std_predictions = K.std(y_pred)
    std_gt = K.std(y_true)

    ccc = 2 * rho * std_gt * std_predictions / (std_predictions**2 +
                                                std_gt**2 +
                                                (pred_mean - true_mean)**2)
    return 1 - ccc
Esempio n. 20
0
def ssim(y_true, y_pred, data_range=50):
    """structural similarity measurement system."""
    K1 = 0.01
    K2 = 0.03

    mu_x = K.mean(y_pred)
    mu_y = K.mean(y_true)

    sig_x = K.std(y_pred)
    sig_y = K.std(y_true)
    sig_xy = cov(y_true, y_pred)

    L = data_range
    C1 = (K1 * L)**2
    C2 = (K2 * L)**2

    return ((2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) /
            (mu_x**2 + mu_y**2 + C1) * (sig_x**2 + sig_y**2 + C2))
Esempio n. 21
0
 def _dice(self, inputs, epsilon=1e-8):
     """Dice Adaptive Activation."""
     mean = K.mean(inputs, axis=0)
     var = K.std(inputs, axis=0)
     indicator = (inputs - mean) / (K.sqrt(var + epsilon))
     indicator = K.sigmoid(indicator)
     pos = K.relu(inputs)
     neg = -self._alpha * K.relu(-inputs)
     return indicator * pos + (1. - indicator) * neg
    def _standartize(self, x, axes=[1, 2], std_eps=1e-9):
        '''Standardize the input to have mean 0 and std 1'''
        s = K.shape(x)
        N = K.prod(K.gather(s, axes))

        x = x - K.mean(x, axis=axes, keepdims=True)
        stds = K.std(x, axis=axes, keepdims=True)
        stds = tf.where(stds < std_eps, tf.fill(K.shape(stds), np.inf), stds)
        x = x / (stds * K.sqrt(tf.cast(N, K.floatx())))
        return x
Esempio n. 23
0
 def call(self, inputs, **kwargs) -> KTensor:
     """
     :param inputs: a Keras tensor
     :param kwargs:
     :return:
     """
     x = inputs
     mean = K.mean(x, axis=-1, keepdims=True)
     std = K.std(x, axis=-1, keepdims=True)
     return self.gain * (x - mean) / (std + self.eps) + self.bias
Esempio n. 24
0
def _normalize(image, normalize_type=1):
    #normalize_type = 1: global , range = [-1,1]
    #normalize_type = 2: per channel, range = [0,1]
    #else: global, not depend on specified images
    new_img = image
    if normalize_type == 1:
        new_img = new_img - tf.reduce_mean(new_img)
        new_img = new_img / K.std(new_img)
    elif normalize_type == 2:
        new_img = new_img - tf.reduce_mean(new_img, axis=(
            1, 2), keepdims=True)  #new_img.mean(axis=(1, 2), keepdims=True)
        new_img = new_img / K.std(new_img, axis=(
            1, 2), keepdims=True)  #new_img.std(axis=(1, 2), keepdims=True)
    elif normalize_type == 3:
        new_img = (new_img - 127.5) / 127.5
        # new_img = new_img / K.std(new_img,axis=(1,2),keepdims=True) #new_img.std(axis=(1, 2), keepdims=True)
    else:
        new_img = new_img / 255.0
    return new_img
Esempio n. 25
0
    def call(self, inputs):
        if not isinstance(inputs, list):
            raise TypeError('Need to be list for residual.')

        # y = keras.layers.Add()([inputs[0], inputs[1]])
        y = inputs[0] + inputs[1]  # fix [None, None] above

        mean = K.mean(y, axis=-1, keepdims=True)
        std = K.std(y, axis=-1, keepdims=True)
        y = self.gamma * (y - mean) / (std + self.epsilon) * self.beta
        return y
Esempio n. 26
0
    def call(self, inputs):
        """This is where the layer's logic lives.

        Parameters
        ----------
        inputs: tensor
            Input tensor, or list/tuple of input tensors
        kwargs: dict
            Additional keyword arguments

        Returns
        -------
        tensor
            A tensor or list/tuple of tensors
        """
        if self.data_format == 'channels_last':
            pooled = K.std(inputs, axis=[1, 2])
        else:
            pooled = K.std(inputs, axis=[2, 3])
        return pooled
Esempio n. 27
0
def total_variation_loss(y_true, y_pred):
    x = y_pred
    mean, sd = K.mean(x), K.std(x) + 1e-5
    x = (x - mean) / sd

    y_ij = x[:, :-1, :-1, :]
    y_i1j = x[:, 1:, :-1, :]
    y_ij1 = x[:, :-1, 1:, :]

    err = (K.square(y_ij - y_i1j) + K.square(y_ij - y_ij1)) / 2
    return K.mean(err)
Esempio n. 28
0
    def call(self, inputs):

        (x, scale, bias) = inputs
        mean = K.mean(x, axis=self.axis, keepdims=True)
        std = K.std(x, axis=self.axis, keepdims=True) + self.epsilon

        for i in range(self.n_dims_to_add):
            scale = K.expand_dims(scale, axis=self.expansion_axis)
            bias = K.expand_dims(bias, axis=self.expansion_axis)

        return (x - mean) / std * scale + bias
Esempio n. 29
0
    def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs[0])

        beta = inputs[1]
        gamma = inputs[2]

        reduction_axes = [0, 1, 2]
        mean = K.mean(inputs[0], reduction_axes, keepdims=True)
        stddev = K.std(inputs[0], reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs[0] - mean) / stddev

        return normed * gamma + beta
Esempio n. 30
0
 def __call__(self, x, output):
     """
     :param x: residual input
     :param output: sublayer output for input
     :return:
     """
     mean = K.mean(output, axis=-1, keepdims=True)
     std = K.std(output, axis=-1, keepdims=True)
     output = self.gamma * (x - mean) / (std +
                                         self.eps) + self.beta  # normalized
     output = keras.layers.Add()([output, x])  # Add, residual
     return output