Exemple #1
0
    def test_mode_1(self):
        norm_m1 = normalization.BatchNormalization(input_shape=(10,), mode=1)

        for inp in [self.input_1, self.input_2, self.input_3]:
            norm_m1.input = K.variable(inp)
            out = (norm_m1.get_output(train=True) - norm_m1.beta) / norm_m1.gamma
            self.assertAlmostEqual(K.eval(K.mean(out)), 0.0)
            if inp.std() > 0.:
                self.assertAlmostEqual(K.eval(K.std(out)), 1.0, places=2)
            else:
                self.assertAlmostEqual(K.eval(K.std(out)), 0.0, places=2)
def linear_correlation_loss(y_true, y_pred):
    mean_y_true = K.mean(y_true)
    mean_y_pred = K.mean(y_pred)
    std_y_true = K.std(y_true)+1e-6
    std_y_pred = K.std(y_pred)+1e-6
    nSamples = K.shape(y_true)[0]
    firstTerm = (y_true - mean_y_true)/std_y_true
    secondTerm = (y_pred - mean_y_pred)/std_y_pred
    pearsonCorr = K.sum(firstTerm*secondTerm)/(nSamples-1)
    maeLoss = K.abs(y_true-y_pred)
    return maeLoss*(1-K.maximum(0.,pearsonCorr))
Exemple #3
0
def test_batchnorm_mode_1():
    norm_m1 = normalization.BatchNormalization(input_shape=(10,), mode=1)
    norm_m1.build(input_shape=(None, 10))

    for inp in [input_1, input_2, input_3]:
        out = (norm_m1.call(K.variable(inp)) - norm_m1.beta) / norm_m1.gamma
        assert_allclose(K.eval(K.mean(out)), 0.0, atol=1e-1)
        if inp.std() > 0.:
            assert_allclose(K.eval(K.std(out)), 1.0, atol=1e-1)
        else:
            assert_allclose(K.eval(K.std(out)), 0.0, atol=1e-1)
def test_batchnorm_mode_1():
    np.random.seed(1337)
    norm_m1 = normalization.BatchNormalization(input_shape=(10,), mode=1)

    for inp in [input_1, input_2, input_3]:
        norm_m1.input = K.variable(inp)
        out = (norm_m1.get_output(train=True) - norm_m1.beta) / norm_m1.gamma
        assert_allclose(K.eval(K.mean(out)), 0.0, atol=1e-1)
        if inp.std() > 0.0:
            assert_allclose(K.eval(K.std(out)), 1.0, atol=1e-1)
        else:
            assert_allclose(K.eval(K.std(out)), 0.0, atol=1e-1)
def linear_correlation_loss(y_true, y_pred):
    mean_y_true = K.mean(y_true)
    mean_y_pred = K.mean(y_pred)
    std_y_true = K.std(y_true)+1e-6
    std_y_pred = K.std(y_pred)+1e-6
    nSamples = K.shape(y_true)[0]
    firstTerm = (y_true - mean_y_true)/std_y_true
    secondTerm = (y_pred - mean_y_pred)/std_y_pred
    pearsonCorr = K.sum(firstTerm*secondTerm)/(nSamples-1)
    pearsonCorr = K.clip(pearsonCorr,-1.,1.)
    maeLoss = K.mean(K.abs(y_true-y_pred))
    # loss  = 1./(0.1+K.exp(-0.5*K.log(maeLoss+(1-pearsonCorr))))
    loss = (1./(0.1+K.exp(-0.5*K.log(maeLoss))))*(2-pearsonCorr)
    return loss
Exemple #6
0
def CCC_V(actual, predicted):
    predicted = predicted[:, 1]
    actual = actual[:, 1]
    # rescale
    ref_std = K.std(actual)
    pred_std = K.std(predicted)
    predicted = predicted * (ref_std / pred_std)

    pred_mean = K.mean(predicted, axis=0)
    ref_mean = K.mean(actual, axis=0)
    pred_var = K.var(predicted, axis=0)
    ref_var = K.var(actual, axis=0)
    covariance = K.mean((predicted - pred_mean) * (actual - ref_mean), axis=0)
    CCC = (2 * covariance) / (pred_var + ref_var + K.pow((pred_mean - ref_mean), 2))
    return CCC
    def call(self, x):
        mean = K.mean(x, axis=-1)
        std = K.std(x, axis=-1)

        if len(x.shape) == 3:
            mean = K.permute_dimensions(
                K.repeat(mean, x.shape.as_list()[-1]),
                [0,2,1]
            )
            std = K.permute_dimensions(
                K.repeat(std, x.shape.as_list()[-1]),
                [0,2,1] 
            )
            
        elif len(x.shape) == 2:
            mean = K.reshape(
                K.repeat_elements(mean, x.shape.as_list()[-1], 0),
                (-1, x.shape.as_list()[-1])
            )
            std = K.reshape(
                K.repeat_elements(mean, x.shape.as_list()[-1], 0),
                (-1, x.shape.as_list()[-1])
            )
        
        return self._g * (x - mean) / (std + self._epsilon) + self._b
Exemple #8
0
    def call(self, inputs, training=None):  # pylint:disable=unused-argument,arguments-differ
        """This is where the layer's logic lives.

        Parameters
        ----------
        inputs: tensor
            Input tensor, or list/tuple of input tensors

        Returns
        -------
        tensor
            A tensor or list/tuple of tensors
        """
        input_shape = K.int_shape(inputs[0])
        reduction_axes = list(range(0, len(input_shape)))

        beta = inputs[1]
        gamma = inputs[2]

        if self.axis is not None:
            del reduction_axes[self.axis]

        del reduction_axes[0]
        mean = K.mean(inputs[0], reduction_axes, keepdims=True)
        stddev = K.std(inputs[0], reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs[0] - mean) / stddev

        return normed * gamma + beta
Exemple #9
0
def gmsd_loss(y_true, y_pred):
    """ Gradient Magnitude Similarity Deviation Loss.

    Improved image quality metric over MS-SSIM with easier calculations

    Parameters
    ----------
    y_true: tensor or variable
        The ground truth value
    y_pred: tensor or variable
        The predicted value

    Returns
    -------
    tensor
        The loss value

    References
    ----------
    http://www4.comp.polyu.edu.hk/~cslzhang/IQA/GMSD/GMSD.htm
    https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf

    """
    true_edge = scharr_edges(y_true, True)
    pred_edge = scharr_edges(y_pred, True)
    ephsilon = 0.0025
    upper = 2.0 * true_edge * pred_edge
    lower = K.square(true_edge) + K.square(pred_edge)
    gms = (upper + ephsilon) / (lower + ephsilon)
    gmsd = K.std(gms, axis=(1, 2, 3), keepdims=True)
    gmsd = K.squeeze(gmsd, axis=-1)
    return gmsd
Exemple #10
0
 def call(self, inputs, **kwargs):
     # In layer normalization, instances are handled independent of each other.
     # Normalize on the feature (last) dimension for each instance.
     # Not handling masks for now..
     mean = K.mean(inputs, axis=-1, keepdims=True)
     std = K.std(inputs, axis=-1, keepdims=True)
     return self._gamma * (inputs - mean) / (std + self._eps) + self._beta
Exemple #11
0
def std(y_true, y_pred):
    dx = (y_true[..., 0] - y_pred[..., 0]) * 128
    dy = (y_true[..., 1] - y_pred[..., 1]) * 64

    distance = K.sqrt(dx * dx + dy * dy)
    print(distance)
    return K.std(distance)
Exemple #12
0
def kaggle_sliced_accuracy(y_true, y_pred, slice_weights=[1.] * 11):
    question_slices = [
        slice(0, 3),
        slice(3, 5),
        slice(5, 7),
        slice(7, 9),
        slice(9, 13),
        slice(13, 15),
        slice(15, 18),
        slice(18, 25),
        slice(25, 28),
        slice(28, 31),
        slice(31, 37)
    ]

    accuracy_slices = [
        categorical_accuracy(y_true[:, question_slices[i]],
                             y_pred[:, question_slices[i]]) * slice_weights[i]
        for i in range(len(question_slices))
    ]
    accuracy_slices = T.cast(accuracy_slices, 'float32')
    return {
        'sliced_accuracy_mean': T.mean(accuracy_slices),
        'sliced_accuracy_std': T.std(accuracy_slices)
    }
Exemple #13
0
    def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if self.axis is not None:
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.sw is None:
            if self.scale:
                broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
                normed = normed * broadcast_gamma
            if self.center:
                broadcast_beta = K.reshape(self.beta, broadcast_shape)
                normed = normed + broadcast_beta
        else:
            betaW = K.dot(self.sw, self.beta)
            gammaW = K.dot(self.sw, self.gamma)

            broadcast_gamma = K.reshape(gammaW, broadcast_shape)
            broadcast_beta = K.reshape(betaW, broadcast_shape)
            normed = normed * broadcast_gamma
            normed = normed + broadcast_beta

        return normed
def nss(y_true, y_pred):

    ax = 1

    if K.sum(K.sum(y_true, axis=ax), axis=ax) == 0:
        return 0

    max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=ax), axis=ax), axis=ax+1),
                                                                   shape_r_out, axis=ax), axis=ax+1), shape_c_out, axis=ax+1)

    y_pred /= max_y_pred


    y_pred_flatten = K.batch_flatten(y_pred)

    y_mean = K.mean(y_pred_flatten, axis=-1)
    y_mean = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)),
                                                               shape_r_out, axis=ax)), shape_c_out, axis=ax+1)

    y_std = K.std(y_pred_flatten, axis=-1)
    y_std = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_std)),
                                                              shape_r_out, axis=ax)), shape_c_out, axis=ax+1)

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    den = K.sum(K.sum(y_true * y_pred, axis=ax), axis=ax)
    nom = K.sum(K.sum(y_true, axis=ax), axis=ax) + K.epsilon()

    nss_out = den/nom

    return nss_out
Exemple #15
0
 def batch_std(x):
     # shape = K.shape(x)
     # dims = [shape[i] for i in range(len(x.shape)-1)]+[1]
     s = K.std(x,keepdims=True, axis=[0])
     s = K.mean(x,keepdims=True)
     s = K.tile(s, [K.shape(x)[0],32,32,1])
     return K.concatenate([x, s], axis=-1)
def nss_time(y_true, y_pred):
    if len(y_true.shape) == 5:
        ax = 2
    else:
        ax = 1

    maxi = K.max(K.max(y_pred, axis=ax), axis=ax)
    first_rep = K.repeat_elements(K.expand_dims(maxi, axis=ax),shape_r_out, axis=ax)
    max_y_pred = K.repeat_elements(K.expand_dims(first_rep, axis=ax+1), shape_c_out, axis=ax+1)
    y_pred /= max_y_pred

    if len(y_true.shape) == 5:
        y_pred_flatten = K.reshape(y_pred, (K.shape(y_pred)[0],K.shape(y_pred)[1],K.shape(y_pred)[2]*K.shape(y_pred)[3]*K.shape(y_pred)[4])) #K.batch_flatten(y_pred)
    else:
        y_pred_flatten = K.batch_flatten(y_pred)

    y_mean = K.mean(y_pred_flatten, axis=-1)
    y_mean = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)),
                                                               shape_r_out, axis=ax)), shape_c_out, axis=ax+1)

    y_std = K.std(y_pred_flatten, axis=-1)
    y_std = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_std)),
                                                              shape_r_out, axis=ax)), shape_c_out, axis=ax+1)

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    num = K.sum(K.sum(y_true * y_pred, axis=ax), axis=ax)
    den = K.sum(K.sum(y_true, axis=ax), axis=ax) + K.epsilon()

    if len(y_true.shape) == 5:
        nss_out = K.mean(num/den, axis = 1)
    else:
        nss_out = num/den

    return nss_out
Exemple #17
0
def activations(model, filters=['activation', 'dropout']):
    """Compute activation statistics (mean and stddev) for specified layers

    Parameters
    ----------
    model : keras model
    filters : string that specifies which layers to monitor

    """
    tensors = [
        layer for layer in model.layers if any(
            layer.name.startswith(filter) for filter in filters)
    ]
    names = [tensor.name for tensor in tensors]

    mean_tensors, std_tensors = [0] * len(tensors), [0] * len(tensors)
    for i, tensor in enumerate(tensors):
        means = K.mean(K.equal(tensor.output, 0), axis=1)

        mean_tensors[i] = K.mean(means)
        std_tensors[i] = K.std(means)

    names = [name + '_mu'
             for name in names] + [name + '_std' for name in names]
    tensors = mean_tensors + std_tensors

    return names, tensors
Exemple #18
0
        def normalized():
            initial_m = K.mean(inputs)
            initial_std = K.std(inputs)
            m = 0.0
            std = 1.0

            return ((inputs - (initial_m)) * ((self.stddev / initial_std)))
def _do_saliency_calculations(
        model_object, loss_tensor, list_of_input_matrices):
    """Does saliency calculations.

    T = number of input tensors to the model
    E = number of examples (storm objects)

    :param model_object: Instance of `keras.models.Model`.
    :param loss_tensor: Keras tensor defining the loss function.
    :param list_of_input_matrices: length-T list of numpy arrays, comprising one
        or more examples (storm objects).  list_of_input_matrices[i] must have
        the same dimensions as the [i]th input tensor to the model.
    :return: list_of_saliency_matrices: length-T list of numpy arrays,
        comprising the saliency map for each example.
        list_of_saliency_matrices[i] has the same dimensions as
        list_of_input_matrices[i] and defines the "saliency" of each value x,
        which is the gradient of the loss function with respect to x.
    """

    if isinstance(model_object.input, list):
        list_of_input_tensors = model_object.input
    else:
        list_of_input_tensors = [model_object.input]

    list_of_gradient_tensors = K.gradients(loss_tensor, list_of_input_tensors)
    num_input_tensors = len(list_of_input_tensors)

    for i in range(num_input_tensors):
        list_of_gradient_tensors[i] /= K.maximum(
            K.std(list_of_gradient_tensors[i]), K.epsilon()
        )

    inputs_to_gradients_function = K.function(
        list_of_input_tensors + [K.learning_phase()], list_of_gradient_tensors
    )

    # list_of_saliency_matrices = None
    # num_examples = list_of_input_matrices[0].shape[0]
    #
    # for i in range(num_examples):
    #     these_input_matrices = [a[[i], ...] for a in list_of_input_matrices]
    #     these_saliency_matrices = inputs_to_gradients_function(
    #         these_input_matrices + [0])
    #
    #     if list_of_saliency_matrices is None:
    #         list_of_saliency_matrices = these_saliency_matrices + []
    #     else:
    #         for i in range(num_input_tensors):
    #             list_of_saliency_matrices[i] = numpy.concatenate(
    #                 (list_of_saliency_matrices[i], these_saliency_matrices[i]),
    #                 axis=0)

    list_of_saliency_matrices = inputs_to_gradients_function(
        list_of_input_matrices + [0]
    )

    for i in range(num_input_tensors):
        list_of_saliency_matrices[i] *= -1

    return list_of_saliency_matrices
Exemple #20
0
def separation_loss(y_true, y_pred):

    y_true = tf.squeeze(y_true)
    env_id, _ = tf.unique(y_true)

    mu = []
    sigma = []
    for i in range(EPI.NUM_OF_ENVS):
        idx = tf.where(tf.equal(y_true, env_id[i]))
        traj = tf.gather(y_pred, idx)
        mu.append(tf.squeeze(K.mean(traj, axis=0)))
        this_sigma = tf.maximum(K.mean(K.std(traj, axis=0)) - 0.1, 0)
        sigma.append(this_sigma)

    mu = tf.stack(mu)
    r = tf.reduce_sum(mu * mu, 1)
    r = tf.reshape(r, [-1, 1])
    D = (r - 2 * tf.matmul(mu, tf.transpose(mu)) +
         tf.transpose(r)) / tf.constant(EPI.EMBEDDING_DIMENSION,
                                        dtype=tf.float32)
    D = tf.sqrt(D + tf.eye(EPI.NUM_OF_ENVS, dtype=tf.float32))
    distance = K.mean(tf.reduce_sum(0.1 - tf.minimum(D, 0.1)))

    sigma = tf.stack(sigma)

    return (distance + K.mean(sigma)) * 0.01
Exemple #21
0
def get_std_3d(input):
    return K.repeat_elements(K.expand_dims(
        K.repeat_elements(K.expand_dims(K.std(input, axis=[2, 3])),
                          shape_r_out,
                          axis=2)),
                             shape_c_out,
                             axis=3)
Exemple #22
0
    def call(self, y_true, y_pred):
        """ Return the Gradient Magnitude Similarity Deviation Loss.


        Parameters
        ----------
        y_true: tensor or variable
            The ground truth value
        y_pred: tensor or variable
            The predicted value

        Returns
        -------
        tensor
            The loss value
        """
        true_edge = self._scharr_edges(y_true, True)
        pred_edge = self._scharr_edges(y_pred, True)
        ephsilon = 0.0025
        upper = 2.0 * true_edge * pred_edge
        lower = K.square(true_edge) + K.square(pred_edge)
        gms = (upper + ephsilon) / (lower + ephsilon)
        gmsd = K.std(gms, axis=(1, 2, 3), keepdims=True)
        gmsd = K.squeeze(gmsd, axis=-1)
        return gmsd
Exemple #23
0
 def call(self, x, mask=None):
     # mean of the hidden activations
     mean = K.mean(x, axis=-1, keepdims=True)  # [None, steps, hidden]
     # std of the hidden activations
     std = K.std(x, axis=-1, keepdims=True)  # [None, steps, hidden]
     norm = (x - mean) / (std + self.epsilon)
     return norm * self.scale + self.bias  # [None, steps, hidden]
Exemple #24
0
def nss(y_true, y_pred):
    max_y_pred = K.expand_dims(
        K.repeat_elements(K.expand_dims(
            K.repeat_elements(K.expand_dims(K.max(y_pred, axis=[2, 3, 4])),
                              y_pred.shape[2],
                              axis=2)),
                          y_pred.shape[3],
                          axis=3))
    y_pred /= max_y_pred
    # y_pred_flatten = K.batch_flatten(y_pred)

    # max_y_true = K.expand_dims(K.repeat_elements(
    #     K.expand_dims(K.repeat_elements(K.expand_dims(K.max(y_true, axis=[2, 3, 4])), shape_r_out, axis=2)),
    #     shape_c_out, axis=3))
    max_y_true = K.max(y_true, axis=[2, 3, 4])
    y_bool = K.cast(K.greater(max_y_true, 0.1), 'float32')

    y_mean = K.mean(y_pred, axis=[2, 3, 4])
    y_mean = K.expand_dims(
        K.repeat_elements(K.expand_dims(
            K.repeat_elements(K.expand_dims(y_mean), y_pred.shape[2], axis=2)),
                          y_pred.shape[3],
                          axis=3))

    y_std = K.std(y_pred, axis=[2, 3, 4])
    y_std = K.expand_dims(
        K.repeat_elements(K.expand_dims(
            K.repeat_elements(K.expand_dims(y_std), y_pred.shape[2], axis=2)),
                          y_pred.shape[3],
                          axis=3))

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    return -K.sum(y_bool * ((K.sum(y_true * y_pred, axis=[2, 3, 4])) /
                            (K.sum(y_true, axis=[2, 3, 4]))))
Exemple #25
0
def resample(x):
    mean = K.mean(x, axis=0)
    std = K.std(x, axis=0)

    std_norm = K.random_normal(shape=K.shape(x), mean=0, stddev=1)

    return mean + std_norm * std
def mvn(tensor):
    """Per row mean-variance normalization."""
    epsilon = 1e-6
    mean = K.mean(tensor, axis=1, keepdims=True)
    std = K.std(tensor, axis=1, keepdims=True)
    mvn = (tensor - mean) / (std + epsilon)
    return mvn
Exemple #27
0
def correlation_multi(y_true, y_pred):
    mean_true = K.expand_dims(K.mean(y_true, axis=-2), axis=-2)
    mean_pred = K.expand_dims(K.mean(y_pred, axis=-2), axis=-2)
    std_true = K.expand_dims(K.std(y_true, axis=-2), axis=-2)
    std_pred = K.expand_dims(K.std(y_pred, axis=-2), axis=-2)
    sts_true = (y_true - mean_true) / std_true
    sts_pred = (y_pred - mean_pred) / std_pred
    # sts_true = (y_true - mean_true)
    # sts_pred = (y_pred - mean_pred)
    # cent_true = y_true - K.expand_dims(mean_true, axis=-2)
    # cent_pred = y_pred - K.expand_dims(mean_pred, axis=-2)
    # norm_true = K.l2_normalize(cent_true, axis=-2)
    # norm_pred = K.l2_normalize(cent_pred, axis=-2)
    corrs = K.mean(sts_true * sts_pred, axis=-2)
    # print(corrs) ####
    return K.mean(corrs, axis=-1)
Exemple #28
0
    def __call__(self, y_true, y_pred):
        """ Return the Gradient Magnitude Similarity Deviation Loss.

        Parameters
        ----------
        y_true: tensor or variable
            The ground truth value
        y_pred: tensor or variable
            The predicted value

        Returns
        -------
        tensor
            The loss value
        """
        raise FaceswapError("GMSD Loss is not currently compatible with PlaidML. Please select a "
                            "different Loss method.")

        true_edge = self._scharr_edges(y_true, True)
        pred_edge = self._scharr_edges(y_pred, True)
        ephsilon = 0.0025
        upper = 2.0 * true_edge * pred_edge
        lower = K.square(true_edge) + K.square(pred_edge)
        gms = (upper + ephsilon) / (lower + ephsilon)
        gmsd = K.std(gms, axis=(1, 2, 3), keepdims=True)
        gmsd = K.squeeze(gmsd, axis=-1)
        return gmsd
Exemple #29
0
def NSS(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(
        K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)),
                          shape_r_out,
                          axis=-1)),
                                   shape_c_out,
                                   axis=-1)
    y_pred /= max_y_pred
    y_pred_flatten = K.batch_flatten(y_pred)

    y_mean = K.mean(y_pred_flatten, axis=-1)
    y_mean = K.repeat_elements(K.expand_dims(
        K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)),
                          shape_r_out,
                          axis=-1)),
                               shape_c_out,
                               axis=-1)

    y_std = K.std(y_pred_flatten, axis=-1)
    y_std = K.repeat_elements(K.expand_dims(
        K.repeat_elements(K.expand_dims(K.expand_dims(y_std)),
                          shape_r_out,
                          axis=-1)),
                              shape_c_out,
                              axis=-1)

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    return -(K.sum(K.sum(y_true * y_pred, axis=2), axis=2) /
             K.sum(K.sum(y_true, axis=2), axis=2))
 def SMD_D2_HAT(self, x1, x2, k):
     t = 3
     beta = 2
     order = 5
     gamma = 1
     sing_item = 0
     EXs = K.mean(x1, axis=0)
     EXt = K.mean(x2, axis=0)
     DXs_std = K.std(x1, axis=0)
     DXt_std = K.std(x2, axis=0)
     for n in range(1, (order + 1)):
         EXs_n_hat = K.mean(K.pow((x1 - EXs) / DXs_std, n), axis=0)
         EXt_n_hat = K.mean(K.pow((x2 - EXt) / DXt_std, n), axis=0)
         numerator = gamma * K.exp(K.abs(EXs_n_hat - EXt_n_hat))
         sing_item += math.pow(1 / t, n) * (numerator / (numerator + 0.05))
     return K.sum(sing_item)
def CCC4Keras(y_pred, y_true):
    K.print_tensor(y_true, message='y_true = ')
    pc = PearsonCorrelation4keras(y_true, y_pred)
    devP = K.std(y_pred, axis=0)
    devT = K.std(y_true, axis=0)
    meanP = K.mean(y_pred, axis=0)
    meanT = K.mean(y_true, axis=0)
    powMeans = K.pow(meanP-meanT,2)

    varP = K.var(y_pred, axis=0)
    varT = K.var(y_true, axis=0)

    numerator = 2*pc*devP*devT
    denominator = varP+varT+powMeans
    CCC = numerator/denominator
    return K.sum(CCC)
def DNS_update(cur_iter, gamma, crate, power, kernel, T, learning_phase):
    # The paper isn't too clear on this, so this has been dug out of their C++ source code
    probThreshold = (1 + gamma * cur_iter) ** power

    # Determine which filters shall be updated this iteration
    random_number = K.random_uniform(shape=(1, 1, 1, int(T.shape[-1])))
    random_number = K.cast(random_number < probThreshold, dtype='float32')

    # Based on the mean & standard deviation of the weights, determine a weight significancy threshold
    mu_vec = K.mean(x=kernel, axis=(0, 1, 2), keepdims=True)
    std_vec = K.std(x=kernel, axis=(0, 1, 2), keepdims=True)
    threshold_vec = mu_vec + crate * std_vec  # weights this many std. deviations from the mean are 'significant'

    # Incorporate hysteresis into the threshold
    alpha_vec = 0.9 * threshold_vec
    beta_vec = 1.1 * threshold_vec

    # Update the significant weight mask by applying the threshold to the unmasked weights
    abs_kernel = K.abs(x=kernel)
    new_T = T - K.cast(abs_kernel < alpha_vec, dtype='float32') * random_number
    new_T = new_T + K.cast(abs_kernel > beta_vec, dtype='float32') * random_number
    new_T = K.clip(x=new_T, min_value=0., max_value=1.)

    # Only apply DNS when training and when activated via the current iteration variable
    new_T = K.switch(cur_iter >= 0., new_T, T)
    new_T = K.switch(learning_phase, new_T, T)

    return new_T
Exemple #33
0
    def __call__(self, y_true: plaidml.tile.Value,
                 y_pred: plaidml.tile.Value) -> plaidml.tile.Value:
        """ Return the Gradient Magnitude Similarity Deviation Loss.

        Parameters
        ----------
        y_true: :class:`plaidml.tile.Value`
            The ground truth value
        y_pred: :class:`plaidml.tile.Value`
            The predicted value

        Returns
        -------
        :class:`plaidml.tile.Value`
            The loss value
        """
        image_shape = K.int_shape(y_pred)
        true_edge = self._scharr_edges(y_true, True, image_shape)
        pred_edge = self._scharr_edges(y_pred, True, image_shape)
        ephsilon = 0.0025
        upper = 2.0 * true_edge * pred_edge
        lower = K.square(true_edge) + K.square(pred_edge)
        gms = (upper + ephsilon) / (lower + ephsilon)
        gmsd = K.std(gms, axis=(1, 2, 3), keepdims=True)
        gmsd = K.squeeze(gmsd, axis=-1)
        return gmsd
Exemple #34
0
    def call(self, x):

        mean = K.mean(x, axis=-1, keepdims=True)

        std = K.std(x, axis=-1, keepdims=True)

        return self.gamma * (x - mean) / (std + self.eps) + self.beta
Exemple #35
0
    def compute_style_loss(self, gen_img, style_img):
        gen_feats = self.style_layers(gen_img)
        style_feats = self.style_layers(style_img)
        style_loss = []
        axis = [1, 2]
        for i in range(len(style_feats)):
            gmean = K.mean(gen_feats[i], axis=axis)
            gstd = K.std(gen_feats[i], axis=axis)

            smean = K.mean(style_feats[i], axis=axis)
            sstd = K.std(style_feats[i], axis=axis)

            style_loss.append(
                K.sum(K.square(gmean - smean)) + K.sum(K.square(gstd - sstd)))

        return Reduction()(style_loss)
Exemple #36
0
    def test_weight_init(self):
        """
        Test weight initialization
        """
        norm_m1 = normalization.BatchNormalization(input_shape=(10,), mode=1,
                                                   weights=[np.ones(10), np.ones(10), np.zeros(10), np.zeros(10)])

        for inp in [self.input_1, self.input_2, self.input_3]:
            norm_m1.input = K.variable(inp)
            out = (norm_m1.get_output(train=True) - np.ones(10)) / 1.
            self.assertAlmostEqual(K.eval(K.mean(out)), 0.0)
            if inp.std() > 0.:
                self.assertAlmostEqual(K.eval(K.std(out)), 1.0, places=2)
            else:
                self.assertAlmostEqual(K.eval(K.std(out)), 0.0, places=2)

        assert_allclose(K.eval(norm_m1.gamma), np.ones(10))
        assert_allclose(K.eval(norm_m1.beta), np.ones(10))
Exemple #37
0
def test_batchnorm_weight_init():
    """
    Test weight initialization
    """
    np.random.seed(1337)
    norm_m1 = normalization.BatchNormalization(input_shape=(10,), mode=1,
                                               weights=[np.ones(10), np.ones(10), np.zeros(10), np.zeros(10)])

    for inp in [input_1, input_2, input_3]:
        norm_m1.input = K.variable(inp)
        out = (norm_m1.get_output(train=True) - np.ones(10)) / 1.
        assert_allclose(K.eval(K.mean(out)), 0.0, atol=1e-1)
        if inp.std() > 0.:
            assert_allclose(K.eval(K.std(out)), 1.0, atol=1e-1)
        else:
            assert_allclose(K.eval(K.std(out)), 0.0, atol=1e-1)

    assert_allclose(K.eval(norm_m1.gamma), np.ones(10), atol=1e-1)
    assert_allclose(K.eval(norm_m1.beta), np.ones(10), atol=1e-1)
Exemple #38
0
def ssim(y_true, y_pred):
    """structural similarity measurement system."""
    ## K1, K2 are two constants, much smaller than 1
    K1 = 0.04
    K2 = 0.06
    
    ## mean, std, correlation
    mu_x = K.mean(y_pred)
    mu_y = K.mean(y_true)
    
    sig_x = K.std(y_pred)
    sig_y = K.std(y_true)
    sig_xy = (sig_x * sig_y) ** 0.5

    ## L, number of pixels, C1, C2, two constants
    L =  33
    C1 = (K1 * L) ** 2
    C2 = (K2 * L) ** 2

    ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2))
    return ssim
Exemple #39
0
def test_batchnorm_mode_0_convnet():
    model = Sequential()
    norm_m0 = normalization.BatchNormalization(mode=0, axis=1, input_shape=(3, 4, 4))
    model.add(norm_m0)
    model.compile(loss='mse', optimizer='sgd')

    # centered on 5.0, variance 10.0
    X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
    model.fit(X, X, nb_epoch=5, verbose=0)
    norm_m0.input = K.variable(X)
    out = (norm_m0.get_output(train=True) - K.reshape(norm_m0.beta, (1, 3, 1, 1))) / K.reshape(norm_m0.gamma, (1, 3, 1, 1))

    assert_allclose(K.eval(K.mean(out, axis=(0, 2, 3))), 0.0, atol=1e-1)
    assert_allclose(K.eval(K.std(out, axis=(0, 2, 3))), 1.0, atol=1e-1)
Exemple #40
0
    def test_mode_0(self):
        model = Sequential()
        norm_m0 = normalization.BatchNormalization(input_shape=(10,))
        model.add(norm_m0)
        model.compile(loss='mse', optimizer='sgd')

        # centered on 5.0, variance 10.0
        X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
        model.fit(X, X, nb_epoch=5, verbose=0)
        norm_m0.input = K.variable(X)
        out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma

        self.assertAlmostEqual(K.eval(K.mean(out)), 0.0, places=1)
        self.assertAlmostEqual(K.eval(K.std(out)), 1.0, places=1)
def test_batchnorm_mode_0():
    np.random.seed(1337)
    model = Sequential()
    norm_m0 = normalization.BatchNormalization(input_shape=(10,))
    model.add(norm_m0)
    model.compile(loss="mse", optimizer="sgd")

    # centered on 5.0, variance 10.0
    X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
    model.fit(X, X, nb_epoch=5, verbose=0)
    norm_m0.input = K.variable(X)
    out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma

    assert_allclose(K.eval(K.mean(out)), 0.0, atol=1e-1)
    assert_allclose(K.eval(K.std(out)), 1.0, atol=1e-1)
    def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if (self.axis is not None):
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed
def center_normalize(x):
    return (x - K.mean(x)) / K.std(x)
def center_normalize(x):
    """
    Custom activation for online sample-wise center and std. normalization
    """
    return (x - K.mean(x)) / K.std(x)