Ejemplo n.º 1
0
    def channel(self, zMean):
        batch = K.shape(zMean)[0]
        # Generate Laplace r.v.
        # Source: https://en.wikipedia.org/wiki/Laplace_distribution#Generating_random_variables_according_to_the_Laplace_distribution
        # Code: https://stackoverflow.com/questions/56691436/how-can-one-add-laplacian-noise-to-a-tensor-in-keras
        u = K.random_uniform((batch, self.latent_dim), minval=-0.5, maxval=0.5)
        epsilon = K.sign(u) * K.log(1 - 2 * K.abs(u) + K.epsilon())

        return zMean + self.n0 / self.latent_dim * epsilon
 def call(self, inputs, weights_mode=False):
     image, noise_std = inputs
     threshold = self.alpha * noise_std
     if not weights_mode:
         threshold = tf.expand_dims(threshold, axis=1)
         threshold = tf.expand_dims(threshold, axis=1)
     input_sign = K.sign(image)
     soft_thresh_unsigned = ReLU()(input_sign * image - threshold)
     soft_thresh = soft_thresh_unsigned * input_sign
     return soft_thresh
Ejemplo n.º 3
0
def mean_percentage_error(y_true, y_pred):
    """
    Bla  bla  bla
    """
    y_true = tf.convert_to_tensor(y_true)
    y_pred = tf.convert_to_tensor(y_pred)
    MPE_model = K.mean(
        (y_true - y_pred) /
        (K.sign(y_true) * K.clip(K.abs(y_true), K.epsilon(), None)))
    return 100. * K.abs(MPE_model)
Ejemplo n.º 4
0
def off_loss(y_true,y_pred):
    mask = K.batch_flatten(K.sign(y_true[...,4]))
    N = K.sum(mask)
    offy_pred = K.batch_flatten(y_pred[...,2])
    offx_pred = K.batch_flatten(y_pred[...,3])
    offy_true = K.batch_flatten(y_true[...,2])
    offx_true = K.batch_flatten(y_true[...,3])
    offloss = K.abs(offx_pred*mask-offx_true) + K.abs(offy_pred*mask-offy_true)
    offloss = K.sum(offloss)/N
    return offloss
def mask_rs(x):
    y = K.sign(tf.keras.activations.relu(x))
    scalefactor = tf.compat.v1.size(y, out_type=tf.dtypes.float32) / (
        1 + tf.math.count_nonzero(y, dtype=tf.dtypes.float32))
    y *= scalefactor

    def grad(dy):
        return dy

    return y, grad
 def closs(y_true, y_pred):
     mask = K.batch_flatten(K.sign(y_true[..., 6]))
     N = K.sum(mask)
     heatmaps = K.batch_flatten(y_true[..., 0])
     cls_pred = K.batch_flatten(y_pred[..., 1])
     cls_pred = K.clip(cls_pred, 1e-7, 1 - 1e-7)
     cls_true = K.batch_flatten(y_true[..., 1])
     cls_loss = K.sum(
         focal_loss(gamma1, gamma2, cls_true, cls_pred, heatmaps)) / N
     return cls_loss
def flip(x):
    # y = K.sign(tf.keras.activations.relu(tf.keras.activations.tanh(x)))
    # y = K.sign(tf.keras.activations.relu(x))
    # y = K.sign(tf.keras.activations.tanh(x))
    y = K.sign(x)

    def grad(dy):
        return dy

    return y, grad
Ejemplo n.º 8
0
 def channel(self, zMean):
     batch = K.shape(zMean)[0]
     # Generate Laplace r.v.
     # Source: https://en.wikipedia.org/wiki/Laplace_distribution#Generating_random_variables_according_to_the_Laplace_distribution
     # Code: https://stackoverflow.com/questions/56691436/how-can-one-add-laplacian-noise-to-a-tensor-in-keras
     u = K.random_uniform((batch, self.latent_dim), minval=-0.5, maxval=0.5)
     epsilon = K.sign(u) * K.log(1 - 2 * K.abs(u) + K.epsilon())
     # Because BatchNormalization produces z vector with signal power 'latentDim',
     # we should not scale the noise power here.
     return zMean + np.sqrt(self.train_noisepow) * epsilon
 def sloss(y_true, y_pred):
     mask = K.batch_flatten(K.sign(y_true[..., 6]))
     N = K.sum(mask)
     sizey_true = K.batch_flatten(y_true[..., 4])
     sizey_pred = K.batch_flatten(y_pred[..., 4])
     sizex_true = K.batch_flatten(y_true[..., 5])
     sizex_pred = K.batch_flatten(y_pred[..., 5])
     size_loss = K.sum(
         K.abs(sizex_pred * mask - sizex_true) +
         K.abs(sizey_pred * mask - sizey_true)) / N
     return size_loss
Ejemplo n.º 10
0
    def all_loss(self, y_true, y_pred):
        mask = K.sign(y_true[..., 2 * self.__category_n + 2])
        N = K.sum(mask)
        alpha = 2.
        beta = 4.

        heat_loss = self.__calculate_heatmap_loss(y_true, y_pred, alpha, beta)
        offset_loss = self.__calculate_offset_loss(y_true, y_pred, mask)
        size_loss = self.__calculate_size_loss(y_true, y_pred, mask)

        return (heat_loss + 1.0 * offset_loss + 5.0 * size_loss) / N
Ejemplo n.º 11
0
def continuous_dice_coef(y_true, y_pred, smooth=1):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    # this is a workaround to allow a boolean check for continuous dice
    if tf.cond(tf.greater(intersection, 0.), lambda: 1, lambda: 0) == 1:
        c = K.sum(y_true_f * y_pred_f) / K.sum(y_true_f * K.sign(y_pred_f))
    else:
        c = 1
    continuous_union = c * K.sum(y_true_f) + K.sum(y_pred_f)
    return (2. * intersection + smooth) / (continuous_union + smooth)
Ejemplo n.º 12
0
 def call(self, x):
     # Apply sigmoid function to the params
     x = K.sigmoid(self.image_params) * K.sign(K.abs(x) + 1)
     # [0,1] -> [0, 255]
     x = 255 * x
     # RGB -> BGR
     x = x[:, :, :, ::-1]
     # Convert to caffe color scale
     mean = K.variable(
         np.array([103.939, 116.779, 123.68],
                  np.float32).reshape(1, 1, 1, 3))
     return x - mean
Ejemplo n.º 13
0
 def call(self, Inputs):
     Reshaped_Inputs = K.reshape(Inputs, [
         -1,
         Inputs.get_shape().as_list()[1] * Inputs.get_shape().as_list()[2],
         Inputs.get_shape().as_list()[3]
     ])
     Bilinear_Pooling = K.batch_dot(Reshaped_Inputs,
                                    Reshaped_Inputs,
                                    axes=[1, 1])
     Signed_Sqrt = K.sign(Bilinear_Pooling) * K.sqrt(
         K.abs(Bilinear_Pooling) + 1e-9)
     return K.l2_normalize(Signed_Sqrt, axis=-1)
Ejemplo n.º 14
0
def cdc_of_true_positive(y_true, y_pred, smooth=1):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    # this is a workaround to allow a boolean check for continuous dice
    if tf.cond(tf.greater(intersection, 0.), lambda: 1, lambda: 0) == 1:
        c = K.sum(y_true_f * y_pred_f) / K.sum(y_true_f * K.sign(y_pred_f))
    else:
        c = 1
    # this differs here from normal dice in the second term of the union
    continuous_union = c * K.sum(y_true_f) + K.sum(intersection)
    return (2. * intersection + smooth) / (continuous_union + smooth)
Ejemplo n.º 15
0
def heaviside(x):
    """ Définis la fonction de Heaviside qui n'est pas défini
	par défaut dans keras. A utiliser comme fonction 
	d'activiation lors de la définition d'une couche par exemple
	    modele.add(Dense(4,activation=heaviside))
	Attention il n'y a pas de guillemet ici.
	Astuce de la formule : H(x) = 1/2 (1+|x|) """

    # return (1+x/K.abs(x))/2
    # return (1+K.sign(x))/2

    z = K.zeros_like(x)
    return 1 - K.maximum(z, K.sign(-x))
Ejemplo n.º 16
0
 def call(self, inputs):
     output = K.dot(inputs, self.kernel)
     if self.activation is not None:
         output = self.activation(output)
     b = 0.5 * K.sign(output) + 1
     Sw = b - self.mw
     Sb = b - self.mb
     F = Sw - Sb +  \
         1e3 * ((b - 0.5) - output)**2 + \
         1e3 * ((b - 0.5))**2 - \
         1e6 * ((b - K.mean(output, axis = -1))**2)
     self.add_loss(F)
     return b
Ejemplo n.º 17
0
def mask_rs(x):
    y = K.sign(tf.keras.activations.relu(x))

    # some papers (arXiv:1905.01067v4 and arXiv:1911.13299v1) do a
    # rescaling of the weights/masks while backpropagating, we can do it here as well
    scalefactor = tf.compat.v1.size(y, out_type=tf.dtypes.float32) / (
        1 + tf.math.count_nonzero(y, dtype=tf.dtypes.float32))
    y *= scalefactor

    def grad(dy):
        return dy

    return y, grad
Ejemplo n.º 18
0
    def call(self, inputs):

        binary_kernel = self.kernel + K.stop_gradient(
            K.sign(self.kernel) - self.kernel)
        binary_kernel = binary_kernel + K.stop_gradient(
            binary_kernel * self.multiplier - binary_kernel)

        outputs = K.conv2d(inputs,
                           binary_kernel,
                           strides=self.strides,
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate)

        return outputs
Ejemplo n.º 19
0
def false_positive_continuous_dice_coef_loss(y_true, y_pred, smooth=1):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)

    # this is a workaround to allow a boolean check for continuous dice
    if tf.cond(tf.greater(intersection, 0.), lambda: 1, lambda: 0) == 1:
        c = K.sum(y_true_f * y_pred_f) / K.sum(y_true_f * K.sign(y_pred_f))
    else:
        c = 1

    continuous_union = c * K.sum(y_true_f) + K.sum(y_pred_f)
    false_positive_augmented_union = continuous_union - intersection

    return -(2. * intersection + smooth) / (false_positive_augmented_union +
                                            smooth) + 1
Ejemplo n.º 20
0
        def rank_loss(y_true, y_pred):
            y_true_p1 = y_true[:, 0:window_size]
            y_true_p2 = y_true[:, window_size:]

            y_true_p1 = K.reshape(y_true_p1, [-1, window_size])
            y_true_p2 = K.reshape(y_true_p2, [-1, window_size])
            y_pred = K.reshape(y_pred, [-1, window_size])

            R = tf.matmul(y_true_p2, K.transpose(y_true_p1))

            P = tf.matmul(y_true_p2, K.transpose(y_pred))
            diag_P = tf.linalg.tensor_diag_part(P)
            temp = -K.sign(P - K.reshape(diag_P, [-1, 1])) * R

            loss = K.sum(temp) / K.sum(R * 1.0)

            return loss
Ejemplo n.º 21
0
    def update_neurons(self):
        """Update neurons according to activation function."""

        # Update membrane potentials.
        new_mem = self.get_new_mem()

        # Generate spikes.
        output_spikes = self.get_spikes(new_mem)

        if self.spiketrain is not None:
            self.add_update([(self.spiketrain,
                              self.time * k.sign(output_spikes))])

        # Compute post-synaptic potential.
        psp = self.get_psp(output_spikes)

        return psp
def mask_flip(x):
    # y = K.sign(tf.keras.activations.relu(tf.keras.activations.tanh(x)))
    # y = K.sign(tf.keras.activations.relu(x))
    # y = K.sign(tf.keras.activations.tanh(x))
    # y = K.sign(x)
    a = 0.005
    y = K.sign(
        tf.keras.activations.relu(x - a) - tf.keras.activations.relu(-x - a))

    # y = K.sign(tf.keras.activations.relu(tf.keras.activations.tanh(x) - a) - tf.keras.activations.relu(-tf.keras.activations.tanh(x) - a))

    # scalefactor = tf.compat.v1.size(y, out_type=tf.dtypes.float32) / (1+tf.math.count_nonzero(y, dtype=tf.dtypes.float32))
    # y *= scalefactor

    def grad(dy):
        return dy

    return y, grad
Ejemplo n.º 23
0
def calculate_binary_weights(conv_layer, M):
    '''
    conv_layer: original layer's W
    '''
    mean = BK.mean(BK.reshape(conv_layer, shape=(-1, )), axis=0)
    variance = BK.var(BK.reshape(conv_layer, shape=(-1, )), axis=0)

    shifted_stddev = -1 + np.array(range(M)) * (2 / (M - 1))
    shifted_stddev = BK.constant(shifted_stddev,
                                 dtype="float32",
                                 name="shifted_stddev")
    shifted_stddev *= BK.sqrt(variance)
    shifted_stddev = BK.reshape(shifted_stddev,
                                shape=[M] + [1] * len(conv_layer.get_shape()))
    binary_weights = conv_layer - mean
    binary_weights = BK.tile(BK.expand_dims(binary_weights, 0),
                             n=[M] + [1] * len(conv_layer.get_shape()))
    binary_weights = BK.sign(binary_weights + shifted_stddev)
    return binary_weights
Ejemplo n.º 24
0
    def binary_focal_loss_fixed(y_true, y_pred):
        pt_1 = tf.where(tf.equal(y_true[..., 0], 1), y_pred[..., 0],
                        tf.ones_like(y_pred[..., 0]))
        pt_0 = tf.where(tf.equal(y_true[..., 0], 0), y_pred[..., 0],
                        tf.zeros_like(y_pred[..., 0]))

        epsilon = K.epsilon()
        # clip to prevent NaN's and Inf's
        pt_1 = K.clip(pt_1, epsilon, 1. - epsilon)
        pt_0 = K.clip(pt_0, epsilon, 1. - epsilon)

        loss =  -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) \
               -K.sum((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))

        mask = K.sign(y_true[..., 0] - 1 + K.epsilon())
        mask = K.clip(mask, 0, 1)
        N = K.sum(mask) + 1

        return loss / N
Ejemplo n.º 25
0
  def call(self, inputs):
    inputs, weights = inputs

    weights = weights/tf.reduce_sum(weights)
    weights_expand = tf.expand_dims(weights, axis=1)

    mean, variance = tf.nn.weighted_moments(inputs, [0], weights_expand)

    counter = K.update_add(self.counter, K.ones_like(self.counter))
    init = K.sign(counter-K.ones_like(counter))

    mean = K.update(self.mean, init*self.mean+(1.0-init)*mean)
    variance = K.update(self.variance, init*self.variance+(1.0-init)*variance)

    mean_expand = tf.expand_dims(mean, axis=0)
    variance_expand = tf.expand_dims(variance, axis=0)

    outputs = (inputs-mean_expand)/tf.sqrt(variance_expand+self.epsilon)

    return outputs
def off_loss(y_true, y_pred):
    mask = K.batch_flatten(K.sign(y_true[..., 6]))
    N = K.sum(mask)
    offy_pred = K.batch_flatten(y_pred[..., 2])
    offx_pred = K.batch_flatten(y_pred[..., 3])
    offy_true = K.batch_flatten(y_true[..., 2])
    offx_true = K.batch_flatten(y_true[..., 3])
    offloss1 = K.abs(offx_pred * mask - offx_true) + K.abs(offy_pred * mask -
                                                           offy_true)
    offloss1 = K.sum(offloss1) / N

    offh_pred = K.batch_flatten(y_pred[..., 4])
    offw_pred = K.batch_flatten(y_pred[..., 5])
    offh_true = K.batch_flatten(y_true[..., 4])
    offw_true = K.batch_flatten(y_true[..., 5])
    offloss2 = K.abs(offw_pred * mask - offw_true) + K.abs(offh_pred * mask -
                                                           offh_true)
    offloss2 = K.sum(offloss2) / N
    offloss = (offloss1 + 2 * offloss2)
    return offloss
Ejemplo n.º 27
0
def get_gradients(self, tape, loss, var_list, grad_loss=None):
        """Called in `minimize` to compute gradients from loss."""
        grads = tape.gradient(loss, var_list, grad_loss)
        if not (hasattr(self.e2efs_layer, 'regularization_loss')):
            return list(zip(grads, var_list))
        with tf.GradientTape() as e2efs_tape:
            e2efs_loss = self.e2efs_layer.regularization_func(self.e2efs_layer.kernel)
        e2efs_grad = grads[0]
        e2efs_regularizer_grad = e2efs_tape.gradient(e2efs_loss, [self.e2efs_layer.kernel])[0]
        # tf.print(e2efs_regularizer_grad)
        e2efs_regularizer_grad_corrected = e2efs_regularizer_grad / (tf.norm(e2efs_regularizer_grad) + K.epsilon())
        e2efs_grad_corrected = e2efs_grad / (tf.norm(e2efs_grad) + K.epsilon())
        combined_e2efs_grad = (1. - self.e2efs_layer.moving_factor) * e2efs_grad_corrected + \
                              self.e2efs_layer.moving_factor * e2efs_regularizer_grad_corrected
        combined_e2efs_grad = K.sign(
            self.e2efs_layer.moving_factor) * K.minimum(self.th, K.max(
            K.abs(combined_e2efs_grad))) * combined_e2efs_grad / K.max(
            K.abs(combined_e2efs_grad) + K.epsilon())
        grads[0] = combined_e2efs_grad
        return list(zip(grads, var_list))
    def final_loss(y_true, y_pred):
        mask = K.batch_flatten(K.sign(y_true[..., 6]))
        N = K.sum(mask)

        heatmaps = K.batch_flatten(y_true[..., 0])
        cls_pred = K.batch_flatten(y_pred[..., 1])
        cls_pred = K.clip(cls_pred, 1e-7, 1 - 1e-7)
        cls_true = K.batch_flatten(y_true[..., 1])

        cls_loss = K.sum(
            focal_loss(gamma1, gamma2, cls_true, cls_pred, heatmaps)) / N

        offy_pred = K.batch_flatten(y_pred[..., 2])
        offx_pred = K.batch_flatten(y_pred[..., 3])
        offy_true = K.batch_flatten(y_true[..., 2])
        offx_true = K.batch_flatten(y_true[..., 3])
        offloss1 = K.abs(offx_pred * mask -
                         offx_true) + K.abs(offy_pred * mask - offy_true)
        offloss1 = K.sum(offloss1) / N

        offh_pred = K.batch_flatten(y_pred[..., 4])
        offw_pred = K.batch_flatten(y_pred[..., 5])
        offh_true = K.batch_flatten(y_true[..., 4])
        offw_true = K.batch_flatten(y_true[..., 5])
        offloss2 = K.abs(offw_pred * mask -
                         offw_true) + K.abs(offh_pred * mask - offh_true)
        offloss2 = K.sum(offloss2) / N
        offloss = (offloss1 + 2 * offloss2)

        sizey_true = K.batch_flatten(y_true[..., 6])
        sizey_pred = K.batch_flatten(y_pred[..., 6])
        sizex_true = K.batch_flatten(y_true[..., 7])
        sizex_pred = K.batch_flatten(y_pred[..., 7])

        size_loss = K.sum(
            K.abs(sizex_pred * mask - sizex_true) +
            K.abs(sizey_pred * mask - sizey_true)) / N
        loss = (1 * cls_loss + 0.8 * offloss + 1 * size_loss)
        return loss
Ejemplo n.º 29
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [state_ops.assign_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
            lr = lr * (  # pylint: disable=g-no-augmented-assignment
                1. / (1. + self.decay * math_ops.cast(self.iterations,
                                                    K.dtype(self.decay))))

        t = math_ops.cast(self.iterations, K.floatx()) + 1
        lr_t = lr * (
            K.sqrt(1. - math_ops.pow(self.beta_2, t)) /
            (1. - math_ops.pow(self.beta_1, t)))

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        if self.amsgrad:
            vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        else:
            vhats = [K.zeros(1) for _ in params]
        self.weights = [self.iterations] + ms + vs + vhats

        for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            #v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g) # from amsgrad
            v_t = v - (1-self.beta_2)*K.sign(v-math_ops.square(g))*math_ops.square(g)
            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append(state_ops.assign(m, m_t))
            self.updates.append(state_ops.assign(v, v_t))
            new_p = p_t

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(state_ops.assign(p, new_p))
        return self.updates
Ejemplo n.º 30
0
    def call(self, inputs):

        #***************************************************************************************************
        #Binary layer as in https://arxiv.org/abs/1802.08530
        #M. D. McDonnell, Training wide residual networks for deployment using a single bit for each weight
        #ICLR, 2018
        #
        #This code sets the full precsion weights to binary for forward and bacjkward propagation
        #but enables gradients to update the full precision weights that ar used only during training
        #
        binary_kernel = self.kernel + K.stop_gradient(
            K.sign(self.kernel) - self.kernel)
        binary_kernel = binary_kernel + K.stop_gradient(
            binary_kernel * self.multiplier - binary_kernel)
        #***************************************************************************************************

        outputs = K.conv2d(inputs,
                           binary_kernel,
                           strides=self.strides,
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate)

        return outputs