def pearson_correlation(y_true, y_pred):
    # Pearson's correlation coefficient = covariance(X, Y) / (stdv(X) * stdv(Y))
    fs_pred = y_pred - K.mean(y_pred)
    fs_true = y_true - K.mean(y_true)
    covariance = K.mean(fs_true * fs_pred)

    stdv_true = K.std(y_true)
    stdv_pred = K.std(y_pred)

    return covariance / (stdv_true * stdv_pred)
Пример #2
0
def mvn(tensor):
    """Per row mean-variance normalization."""
    epsilon = 1e-6
    mean = K.mean(tensor, axis=1, keepdims=True)
    std = K.std(tensor, axis=1, keepdims=True)
    mvn = (tensor - mean) / (std + epsilon)
    return mvn
Пример #3
0
    def call(self, inputs):
        if not self.norm_method:
            outputs = inputs

        elif self.norm_method == 'whole_image':
            reduce_axes = [3, 4] if self.data_format == 'channels_first' else [
                2, 3
            ]
            outputs = inputs - tf.reduce_mean(
                inputs, axis=reduce_axes, keepdims=True)
            outputs /= K.std(inputs, axis=reduce_axes, keepdims=True)

        elif self.norm_method == 'std':
            outputs = inputs - self._average_filter(inputs)
            outputs /= self._window_std_filter(outputs)

        elif self.norm_method == 'max':
            outputs = inputs / tf.reduce_max(inputs)
            outputs -= self._average_filter(outputs)

        elif self.norm_method == 'median':
            reduce_axes = list(range(len(inputs.shape)))[1:]
            reduce_axes.remove(self.channel_axis)
            # mean = self._reduce_median(inputs, axes=reduce_axes)
            mean = tf.contrib.distributions.percentile(inputs, 50.)
            outputs = inputs / mean
            outputs -= self._average_filter(outputs)
        else:
            raise NotImplementedError('"{}" is not a valid norm_method'.format(
                self.norm_method))

        return outputs
Пример #4
0
    def call(self, inputs, training=None):
        class_labels = K.squeeze(inputs[1], axis=1)
        inputs = inputs[0]
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if self.axis is not None:
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[0] = K.shape(inputs)[0]
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(K.gather(self.gamma, class_labels),
                                        broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(K.gather(self.beta, class_labels),
                                       broadcast_shape)
            normed = normed + broadcast_beta
        return normed
Пример #5
0
    def sample_variance(self, x, y):
        _, _, decoder_lambda, decoder_dense = self.decoder.layers
        input1 = K.random_normal(shape=(1000, self.latent_dim))
        input2 = K.random_normal(shape=(1000, self.latent_dim))

        z = decoder_lambda([input1, input2])
        x_decoded_mean = decoder_dense(z)

        return K.mean(K.std(x_decoded_mean, axis=0))
Пример #6
0
    def _merge_function(self, inputs):
        # Check exception.
        x = inputs
        if isinstance(x, list) != True or len(x) != 2:
            raise ValueError('Input must be a list of two tensors.')
        assert K.int_shape(x[0]) == K.int_shape(x[1])  #?

        if self.axis < 0:
            assert K.ndim(x[0]) + self.axis >= 0
            self.axis = K.ndim(x[0]) + self.axis  #?

        reduce_axes = tuple(
            [i for i in range(1, K.ndim(x[0])) if i != self.axis])

        c = x[0]  # Content image tensor.
        s = x[1]  # Style image tensor.

        # Calculate mean and variance.
        c_mean = K.mean(c, axis=reduce_axes, keepdims=True)
        c_std = K.std(c, axis=reduce_axes, keepdims=True) + self.epsilon
        s_mean = K.mean(s, axis=reduce_axes, keepdims=True)
        s_std = K.std(s, axis=reduce_axes, keepdims=True)

        return s_std * ((c - c_mean) / c_std) + s_mean  # Broadcasting?
Пример #7
0
    def call(self, x, mask=None):
        def image_expand(tensor):
            return K.expand_dims(K.expand_dims(tensor, -1), -1)

        def batch_image_expand(tensor):
            return image_expand(K.expand_dims(tensor, 0))

        input_shape = K.int_shape(x)
        reduction_axes = list(range(0, len(input_shape)))
        del reduction_axes[3]
        del reduction_axes[0]
        mean = K.mean(x, reduction_axes, keepdims=True)
        stddev = K.std(x, reduction_axes, keepdims=True) + 1e-3
        normed = (x - mean) / stddev
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[3] = input_shape[3]
        broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
        normed = normed * broadcast_gamma
        broadcast_beta = K.reshape(self.beta, broadcast_shape)
        normed = normed + broadcast_beta
        return normed
Пример #8
0
    def call(self, inputs):
        if not self.norm_method:
            outputs = inputs

        elif self.norm_method == 'whole_image':
            axes = [3, 4] if self.channel_axis == 1 else [2, 3]
            outputs = inputs - K.mean(inputs, axis=axes, keepdims=True)
            outputs = outputs / K.std(inputs, axis=axes, keepdims=True)

        elif self.norm_method == 'std':
            outputs = inputs - self._average_filter(inputs)
            outputs = outputs / self._window_std_filter(outputs)

        elif self.norm_method == 'max':
            outputs = inputs / K.max(inputs)
            outputs = outputs - self._average_filter(outputs)

        else:
            raise NotImplementedError('"{}" is not a valid norm_method'.format(
                self.norm_method))

        return outputs
Пример #9
0
    def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if self.axis is not None:
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        def noised():
            eps = K.random_uniform(shape=[1], maxval=self.alpha)
            return inputs + K.random_normal(
                shape=K.shape(inputs), mean=0., stddev=eps)

        get_noised = K.in_train_phase(noised, normed, training=training)

        retrived = stddev * get_noised + mean
        return retrived
Пример #10
0
    def call(self, inputs):
        # Check exception.
        x = inputs
        if isinstance(x, list) != True or len(x) != 2:
            raise ValueError('Input must be a list of two tensors.')
        assert len(K.int_shape(x[1])) == 2

        if self.axis < 0:
            assert K.ndim(x[0]) + self.axis >= 0
            self.axis = K.ndim(x[0]) + self.axis  #?

        reduce_axes = tuple(
            [i for i in range(1, K.ndim(x[0])) if i != self.axis])

        c = x[0]  # Content image tensor.
        s = x[1]  # Style dlatent tensor.

        # Calculate mean and variance.
        c_mean = K.mean(c, axis=reduce_axes, keepdims=True)
        c_std = K.std(c, axis=reduce_axes, keepdims=True) + self.epsilon
        s = K.reshape(s, [-1, 2, 1, 1, c.shape[-1]])  #?

        return (s[:, 0] + 1) * ((c - c_mean) / c_std) + s[:, 1]
Пример #11
0
 def call(self, x):
     mean = K.mean(x, axis=self.axis, keepdims=True)
     std = K.std(x, axis=self.axis, keepdims=True)
     out = self.gamma * (x - mean) / (std + self.eps) + self.beta
     return out
Пример #12
0
def corr_loss(act,pred):   
    cov=(K.mean((act-K.mean(act))*(pred-K.mean(pred))))
    return 1-(cov/(K.std(act)*K.std(pred)+K.epsilon()))
Пример #13
0
 def call(self, inputs):
     mean = K.mean(inputs, axis=self.axis, keepdims=True)
     std = K.std(inputs, axis=self.axis, keepdims=True)
     return self.gamma * (inputs - mean) / (std + self.eps) + self.beta
Пример #14
0
 def call(self, x):
     mean = K.mean(x, axis=-1, keepdims=True)
     std = K.std(x, axis=-1, keepdims=True)
     return self.gamma * (x - mean) / (std + self.eps) + self.beta
Пример #15
0
    def data_variance(self, x, y):
        _, encoder_mean, encoder_logvar = self.encoder.layers
        z_mean = encoder_mean(x)
        z_logvar = encoder_logvar(x)

        return K.mean(K.std(z_mean, axis=0))
Пример #16
0
 def std_if_not_int(val):
     if val.dtype.is_integer:
         return 0
     else:
         return tf.stop_gradient(K.std(val, keepdims=True))