Beispiel #1
0
 def vae_loss(y_true, y_pred):
     generation_loss = img_rows * img_cols \
         * metrics.binary_crossentropy(x, x_decoded)
     kl_loss = 0.5 * tf.reduce_sum(K.square(z_mean)
             + K.square(z_var) - K.log(K.square(z_var + 1e-8)) - 1,
             axis=1)
     return tf.reduce_mean(generation_loss + kl_loss)
Beispiel #2
0
def triplet_loss(y_true, y_pred, alpha=0.2):
    """
    Implementation of the triplet loss as defined by formula (3)

    Arguments:
    y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
    y_pred -- python list containing three objects:
            anchor -- the encodings for the anchor images, of shape (None, 128)
            positive -- the encodings for the positive images, of shape (None, 128)
            negative -- the encodings for the negative images, of shape (None, 128)

    Returns:
    loss -- real number, value of the loss
    """

    anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]

    # Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1
    pos_dist = K.sum(K.square(anchor - positive), axis=-1)
    # Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1
    neg_dist = K.sum(K.square(anchor - negative), axis=-1)
    # Step 3: subtract the two previous distances and add alpha.
    basic_loss = pos_dist - neg_dist + alpha
    # Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
    loss = K.sum(K.maximum(basic_loss, 0))

    return loss
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        shapes = [K.get_variable_shape(p) for p in params]
        accumulators = [K.zeros(shape) for shape in shapes]
        delta_accumulators = [K.zeros(shape) for shape in shapes]
        self.weights = accumulators + delta_accumulators
        self.updates = []

        for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
            # update accumulator
            new_a = self.rho * a + (1. - self.rho) * K.square(g)
            self.updates.append(K.update(a, new_a))

            # use the new accumulator and the *old* delta_accumulator
            update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)

            new_p = p - get_learing_rate(p,self.lr) * update
            # apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))

            # update delta_accumulator
            new_d_a = self.rho * d_a + (1 - self.rho) * K.square(update)
            self.updates.append(K.update(d_a, new_d_a))
        return self.updates
Beispiel #4
0
    def call(self, x, mask=None):
        x = K.permute_dimensions(x, (0, 2, 1))
        x = K.reshape(x, (-1, self.input_length))
        x = K.expand_dims(x, 1)
        x = K.expand_dims(x, -1)
        if self.real_filts is not None:
            conv_out_r = K.conv2d(x, self.W_r, strides=self.subsample,
                                  border_mode=self.border_mode,
                                  dim_ordering='th')
        else:
            conv_out_r = x

        if self.complex_filts is not None:
            conv_out_c1 = K.conv2d(x, self.W_c1, strides=self.subsample,
                                   border_mode=self.border_mode,
                                   dim_ordering='th')
            conv_out_c2 = K.conv2d(x, self.W_c2, strides=self.subsample,
                                   border_mode=self.border_mode,
                                   dim_ordering='th')
            conv_out_c = K.sqrt(K.square(conv_out_c1) + K.square(conv_out_c2) + K.epsilon())
            output = K.concatenate((conv_out_r, conv_out_c), axis=1)
        else:
            output = conv_out_r

        output_shape = self.get_output_shape_for((None, self.input_length, self.input_dim))
        output = K.squeeze(output, 3)  # remove the dummy 3rd dimension
        output = K.permute_dimensions(output, (2, 1, 0))
        output = K.reshape(output, (-1, output_shape[1], output.shape[1]*output.shape[2]))
        return output
def eigen_loss(y_true, y_pred):
    y_true = tf.Print(y_true, [y_true], message='y_true', summarize=30)
    y_pred = tf.Print(y_pred, [y_pred], message='y_pred', summarize=30)

    y_true_clipped = K.clip(y_true, K.epsilon(), None)
    y_pred_clipped = K.clip(y_pred, K.epsilon(), None)

    first_log = K.log(y_pred_clipped + 1.)
    second_log = K.log(y_true_clipped + 1.)
    w_x = K.variable(np.array([[-1., 0., 1.],
                                [-1., 0., 1.],
                                [-1., 0., 1.]]).reshape(3, 3, 1, 1))

    grad_x_pred = K.conv2d(first_log, w_x, padding='same')
    grad_x_true = K.conv2d(second_log, w_x, padding='same')

    w_y = K.variable(np.array([[-1., -1., -1.],
                                [0., 0., 0.],
                                [1., 1., 1.]]).reshape(3, 3, 1, 1))

    grad_y_pred = K.conv2d(first_log, w_y, padding='same')
    grad_y_true = K.conv2d(second_log, w_y, padding='same')
    diff_x = grad_x_pred - grad_x_true
    diff_y = grad_y_pred - grad_y_true

    log_term = K.mean(K.square((first_log - second_log)), axis=-1)
    sc_inv_term = K.square(K.mean((first_log - second_log),axis=-1))
    grad_loss = K.mean(K.square(diff_x) + K.square(diff_y), axis=-1)

    return log_term - (0.5 * sc_inv_term) + grad_loss
Beispiel #6
0
def gradient_penalty_loss(y_true, y_pred, averaged_samples, gradient_penalty_weight):
    """Calculates the gradient penalty loss for a batch of "averaged" samples.

    In Improved WGANs, the 1-Lipschitz constraint is enforced by adding a term to the loss function
    that penalizes the network if the gradient norm moves away from 1. However, it is impossible to evaluate
    this function at all points in the input space. The compromise used in the paper is to choose random points
    on the lines between real and generated samples, and check the gradients at these points. Note that it is the
    gradient w.r.t. the input averaged samples, not the weights of the discriminator, that we're penalizing!

    In order to evaluate the gradients, we must first run samples through the generator and evaluate the loss.
    Then we get the gradients of the discriminator w.r.t. the input averaged samples.
    The l2 norm and penalty can then be calculated for this gradient.

    Note that this loss function requires the original averaged samples as input, but Keras only supports passing
    y_true and y_pred to loss functions. To get around this, we make a partial() of the function with the
    averaged_samples argument, and use that for model training."""
    # first get the gradients:
    #   assuming: - that y_pred has dimensions (batch_size, 1)
    #             - averaged_samples has dimensions (batch_size, nbr_features)
    # gradients afterwards has dimension (batch_size, nbr_features), basically
    # a list of nbr_features-dimensional gradient vectors
    gradients = K.gradients(y_pred, averaged_samples)[0]
    # compute the euclidean norm by squaring ...
    gradients_sqr = K.square(gradients)
    #   ... summing over the rows ...
    gradients_sqr_sum = K.sum(gradients_sqr,
                              axis=np.arange(1, len(gradients_sqr.shape)))
    #   ... and sqrt
    gradient_l2_norm = K.sqrt(gradients_sqr_sum)
    # compute lambda * (1 - ||grad||)^2 still for each single sample
    gradient_penalty = gradient_penalty_weight * K.square(1 - gradient_l2_norm)
    # return the mean as loss over all the batch samples
    return K.mean(gradient_penalty)
Beispiel #7
0
def contrastive_loss(y_true, y_pred):
    '''Contrastive loss from Hadsell-et-al.'06
    http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
    '''
    margin = 1
    return K.mean(y_true * K.square(y_pred) +
                  (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
 def __call__(self, loss):
     x = self.layer.get_output(True)
     assert K.ndim(x) == 4
     a = K.square(x[:, :, 1:, :-1] - x[:, :, :-1, :-1])
     b = K.square(x[:, :, :-1, 1:] - x[:, :, :-1, :-1])
     loss += self.weight * K.mean(K.sum(K.pow(a + b, 1.25), axis=(1,2,3)))
     return loss
def total_variation_loss(x):
    assert K.ndim(x) == 4
    a = K.square(x[:, :, 1:, :img_width - 1] - x[:, :, :img_height - 1, :img_width - 1])
    b = K.square(x[:, :, :img_height - 1, 1:] - x[:, :, :img_width - 1, :img_height - 1])
    #a = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, 1:, :img_height-1])
    #b = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, :img_width-1, 1:])
    return K.sum(K.pow(a + b, 1.25))
Beispiel #10
0
def contrastive_loss(y_true, y_pred):
    '''Contrastive loss from Hadsell-et-al.'06
    http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
    from https://github.com/fchollet/keras/blob/master/examples/mnist_siamese_graph.py
    '''
    margin = 1
    return K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
Beispiel #11
0
def huber_loss(y_true, y_pred, clip_value):
    # Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
    # https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
    # for details.
    assert clip_value > 0.

    x = y_true - y_pred
    if np.isinf(clip_value):
        # Spacial case for infinity since Tensorflow does have problems
        # if we compare `K.abs(x) < np.inf`.
        return .5 * K.square(x)

    condition = K.abs(x) < clip_value
    squared_loss = .5 * K.square(x)
    linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        if hasattr(tf, 'select'):
            return tf.select(condition, squared_loss, linear_loss)  # condition, true, false
        else:
            return tf.where(condition, squared_loss, linear_loss)  # condition, true, false
    elif K.backend() == 'theano':
        from theano import tensor as T
        return T.switch(condition, squared_loss, linear_loss)
    else:
        raise RuntimeError('Unknown backend "{}".'.format(K.backend()))
Beispiel #12
0
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5):
    '''Return yolo_loss tensor

    Parameters
    ----------
    yolo_outputs: list of tensor, the output of yolo_body
    y_true: list of array, the output of preprocess_true_boxes
    anchors: array, shape=(T, 2), wh
    num_classes: integer
    ignore_thresh: float, the iou threshold whether to ignore object confidence loss

    Returns
    -------
    loss: tensor, shape=(1,)

    '''
    yolo_outputs = args[:3]
    y_true = args[3:]
    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]
    input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
    grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(3)]
    loss = 0
    m = K.shape(yolo_outputs[0])[0]

    for l in range(3):
        object_mask = y_true[l][..., 4:5]
        true_class_probs = y_true[l][..., 5:]

        pred_xy, pred_wh, pred_confidence, pred_class_probs = yolo_head(yolo_outputs[l],
             anchors[anchor_mask[l]], num_classes, input_shape)
        pred_box = K.concatenate([pred_xy, pred_wh])

        # Darknet box loss.
        xy_delta = (y_true[l][..., :2]-pred_xy)*grid_shapes[l][::-1]
        wh_delta = K.log(y_true[l][..., 2:4]) - K.log(pred_wh)
        # Avoid log(0)=-inf.
        wh_delta = K.switch(object_mask, wh_delta, K.zeros_like(wh_delta))
        box_delta = K.concatenate([xy_delta, wh_delta], axis=-1)
        box_delta_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]

        # Find ignore mask, iterate over each of batch.
        ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')
        def loop_body(b, ignore_mask):
            true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
            iou = box_iou(pred_box[b], true_box)
            best_iou = K.max(iou, axis=-1)
            ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
            return b+1, ignore_mask
        _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
        ignore_mask = ignore_mask.stack()
        ignore_mask = K.expand_dims(ignore_mask, -1)

        box_loss = object_mask * K.square(box_delta*box_delta_scale)
        confidence_loss = object_mask * K.square(1-pred_confidence) + \
            (1-object_mask) * K.square(0-pred_confidence) * ignore_mask
        class_loss = object_mask * K.square(true_class_probs-pred_class_probs)
        loss += K.sum(box_loss) + K.sum(confidence_loss) + K.sum(class_loss)
    return loss / K.cast(m, K.dtype(loss))
    def vae_loss(x, x_decoded_mean):
        #     xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
        #     bott_loss = objectives.binary_crossentropy(sp, z)
        #     print(sp.shape)µ
        kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)

        return K.sqrt(K.sum(K.square(x - x_decoded_mean), axis=-1)) + l * K.sqrt(
            K.sum(K.square(z - sp), axis=-1)) + beta * kl_loss
Beispiel #14
0
def keras_dice_loss(labels, probas, epsilon=1e-6):
    # https://www.jeremyjordan.me/semantic-segmentation/

    axes = tuple(range(1, len(probas.shape) - 1))
    numerator = 2. * K.sum(probas * labels, axes)
    denominator = K.sum(K.square(probas) + tf.cast(K.square(labels), probas.dtype), axes)

    return 1 - K.mean(numerator / (denominator + epsilon))  # average over classes and batch
Beispiel #15
0
def total_variation_loss(y_pred):
    if K.image_data_format() == 'channels_first':
        a = K.square(y_pred[:, :, :m - 1, :n - 1] - y_pred[:, :, 1:, :n - 1])
        b = K.square(y_pred[:, :, :m - 1, :n - 1] - y_pred[:, :, :m - 1, 1:])
    else:
        a = K.square(y_pred[:, :m - 1, :n - 1, :] - y_pred[:, 1:, :n - 1, :])
        b = K.square(y_pred[:, :m - 1, :n - 1, :] - y_pred[:, :m - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
def chopra_loss(y_true, y_pred):
    ''' (1-Y)(2/Q)(Ew)^2 + (Y) 2 Q e^(-2.77/Q * Ew)
        Needs to use functions of keras.backend.theano_backend = K '''
    #Q = 500.
    #return (1 - y_true) * 2 / Q * K.square(y_pred) + y_true * 2 * Q * K.exp(-2.77 / Q * y_pred)
    margin = 1
    loss = K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
    return loss
def dice_coef(y_true, y_pred, smooth=1):
    """
    Dice = (2*|X & Y|)/ (|X|+ |Y|)
         =  2*sum(|A*B|)/(sum(A^2)+sum(B^2))
    ref: https://arxiv.org/pdf/1606.04797v1.pdf
    """
    intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
    return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
Beispiel #18
0
 def __call__(self, x):
     xshape = K.int_shape(x)
     if self.division_idx is None:
         self.division_idx = xshape[-1]/2
     x = K.reshape(x, (-1, xshape[-1]))
     x /= K.sqrt(K.sum(K.square(x), axis=0, keepdims=True))
     xx = K.sum(x[:,:self.division_idx] * x[:,self.division_idx:], axis=0)
     return self.gamma * K.sqrt(K.sum(K.square(xx)) + K.epsilon())
Beispiel #19
0
 def __call__(self, loss):
     output = self.layer.get_output(True)
     batch_size = K.shape(output)[0] // 2
     generated = output[:batch_size, :, :, :]
     loss += self.weight * K.mean(
         K.sum(K.square(gram_matrix(self.target) - gram_matrix(generated)), axis=(1,2))
     ) / (4.0 * K.square(K.prod(K.shape(generated)[1:])))
     return loss
Beispiel #20
0
def variation_loss(comb):
    if K.image_dim_ordering() == "th":
        dx = K.square(comb[:, :, :RESIZED_WH-1, :RESIZED_WH-1] - comb[:, :, 1:, :RESIZED_WH-1])
        dy = K.square(comb[:, :, :RESIZED_WH-1, :RESIZED_WH-1] - comb[:, :, :RESIZED_WH-1, 1:])
    else:
        dx = K.square(comb[:, :RESIZED_WH-1, :RESIZED_WH-1, :] - comb[:, 1:, :RESIZED_WH-1, :])
        dy = K.square(comb[:, :RESIZED_WH-1, :RESIZED_WH-1, :] - comb[:, :RESIZED_WH-1, 1:, :])
    return K.sum(K.pow(dx + dy, 1.25))
def yoloxyloss(y_true, y_pred, t):
    #real_y_true = tf.where(t, y_true, K.zeros_like(y_true))
    lo = K.square(y_true - y_pred) + 0.05 * K.square(0.5 -y_pred)
    value_if_true = lo
    value_if_false = K.zeros_like(y_true)
    loss1 = tf.where(t, value_if_true, value_if_false)
    objsum = K.sum(y_true)
    return K.sum(loss1)/(objsum+0.0000001)
    def _huber_loss(self, y_true, y_pred, clip_delta=1.0):
        error = y_true - y_pred
        cond  = K.abs(error) <= clip_delta

        squared_loss = 0.5 * K.square(error)
        quadratic_loss = 0.5 * K.square(clip_delta) + clip_delta * (K.abs(error) - clip_delta)

        return K.mean(tf.where(cond, squared_loss, quadratic_loss))
Beispiel #23
0
def continuity_loss(x):
    assert K.ndim(x) == 4
    if K.image_dim_ordering() == "th":
        a = K.square(x[:, :, : img_width - 1, : img_height - 1] - x[:, :, 1:, : img_height - 1])
        b = K.square(x[:, :, : img_width - 1, : img_height - 1] - x[:, :, : img_width - 1, 1:])
    else:
        a = K.square(x[:, : img_width - 1, : img_height - 1, :] - x[:, 1:, : img_height - 1, :])
        b = K.square(x[:, : img_width - 1, : img_height - 1, :] - x[:, : img_width - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
def total_variation_loss(x):
    assert K.ndim(x) == 4
    if K.image_dim_ordering() == 'th':
        a = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, 1:, :img_height - 1])
        b = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, :img_width - 1, 1:])
    else:
        a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
        b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
def pearson_loss(y_true_rank, y_pred_rank, eps=1e-10):
    y_true_mean = K.mean(y_true_rank)
    y_pred_mean = K.mean(y_pred_rank)
    u1 = (y_true_rank - y_true_mean)
    u2 = (y_pred_rank - y_pred_mean)
    u=K.sum(tf.multiply(u1,u2))
    d=K.sqrt(K.sum(K.square(u1))*K.sum(K.square(u2)))
    rou=tf.div(u,d+eps)
    return 1.-rou
Beispiel #26
0
 def __call__(self, x):
     regularization = 0
     dimorder = self.axes + list(set(range(K.ndim(x))) - set(self.axes))
     p = K.permute_dimensions(x, dimorder)
     if self.TV:
         regularization += self.TV*K.sum(K.sqrt(K.square(diffr(p)) + K.square(diffc(p)) + K.epsilon()))
     if self.TV2:
         regularization += self.TV2*K.sum(K.sqrt(K.square(diffrr(p)) + K.square(diffcc(p)) + 2*K.square(diffrc(p)) + K.epsilon()))
     return regularization
Beispiel #27
0
def total_variation_loss(x):
    assert K.ndim(x) == 4
    if K.image_data_format() == 'channels_first':
        a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1])
        b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:])
    else:
        a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :])
        b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
Beispiel #28
0
def total_variation_loss(x):
    assert 4 == K.ndim(x)
    if K.image_dim_ordering() == 'th':
        a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1])
        b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:])
    else:
        a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :])
        b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))
Beispiel #29
0
    def setup(self):
        distorted_A, fake_A, fake_sz64_A, mask_A, self.path_A, self.path_mask_A, self.path_abgr_A, self.path_bgr_A = self.cycle_variables(self.model.netGA)
        distorted_B, fake_B, fake_sz64_B, mask_B, self.path_B, self.path_mask_B, self.path_abgr_B, self.path_bgr_B = self.cycle_variables(self.model.netGB)
        real_A = Input(shape=self.model.img_shape)
        real_B = Input(shape=self.model.img_shape)

        if self.use_lsgan:
            self.loss_fn = lambda output, target : K.mean(K.abs(K.square(output-target)))
        else:
            self.loss_fn = lambda output, target : -K.mean(K.log(output+1e-12)*target+K.log(1-output+1e-12)*(1-target))

        # ========== Define Perceptual Loss Model==========
        if self.use_perceptual_loss:
            from keras.models import Model
            from keras_vggface.vggface import VGGFace
            vggface = VGGFace(include_top=False, model='resnet50', input_shape=(224, 224, 3))
            vggface.trainable = False
            out_size55 = vggface.layers[36].output
            out_size28 = vggface.layers[78].output
            out_size7 = vggface.layers[-2].output
            vggface_feat = Model(vggface.input, [out_size55, out_size28, out_size7])
            vggface_feat.trainable = False
        else:
            vggface_feat = None

        loss_DA, loss_GA = self.define_loss(self.model.netDA, real_A, fake_A, fake_sz64_A, distorted_A, vggface_feat)
        loss_DB, loss_GB = self.define_loss(self.model.netDB, real_B, fake_B, fake_sz64_B, distorted_B, vggface_feat)

        if self.use_mask_refinement:
            loss_GA += 1e-3 * K.mean(K.square(mask_A))
            loss_GB += 1e-3 * K.mean(K.square(mask_B))
        else:
            loss_GA += 3e-3 * K.mean(K.abs(mask_A))
            loss_GB += 3e-3 * K.mean(K.abs(mask_B))

        w_fo = 0.01
        loss_GA += w_fo * K.mean(self.first_order(mask_A, axis=1))
        loss_GA += w_fo * K.mean(self.first_order(mask_A, axis=2))
        loss_GB += w_fo * K.mean(self.first_order(mask_B, axis=1))
        loss_GB += w_fo * K.mean(self.first_order(mask_B, axis=2))

        weightsDA = self.model.netDA.trainable_weights
        weightsGA = self.model.netGA.trainable_weights
        weightsDB = self.model.netDB.trainable_weights
        weightsGB = self.model.netGB.trainable_weights

        # Adam(..).get_updates(...)
        training_updates = Adam(lr=self.lrD, beta_1=0.5).get_updates(weightsDA,[],loss_DA)
        self.netDA_train = K.function([distorted_A, real_A],[loss_DA], training_updates)
        training_updates = Adam(lr=self.lrG, beta_1=0.5).get_updates(weightsGA,[], loss_GA)
        self.netGA_train = K.function([distorted_A, real_A], [loss_GA], training_updates)

        training_updates = Adam(lr=self.lrD, beta_1=0.5).get_updates(weightsDB,[],loss_DB)
        self.netDB_train = K.function([distorted_B, real_B],[loss_DB], training_updates)
        training_updates = Adam(lr=self.lrG, beta_1=0.5).get_updates(weightsGB,[], loss_GB)
        self.netGB_train = K.function([distorted_B, real_B], [loss_GB], training_updates)
 def total_variation_loss(self):
     """
     Total variation loss, designed to keep the generated image locally coherent
     :return: total variation loss
     """
     x = self.input_tensor
     assert K.ndim(x) == 4
     a = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, 1:, :img_height - 1])
     b = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, :img_width - 1, 1:])
     return K.sum(K.pow(a + b, 1.25))
Beispiel #31
0
def customloss(y_true, y_pred):
    return K.mean(K.square(y_pred - y_true), axis=-1)
Beispiel #32
0
		def customLoss(yTrue, yPred):
			return K.mean(K.sqrt(K.square(K.sin(yTrue) - K.sin(yPred)) + K.square(K.cos(yTrue) - K.cos(yPred))))
Beispiel #33
0
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):
    '''Return yolo_loss tensor

    Parameters
    ----------
    yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
    y_true: list of array, the output of preprocess_true_boxes
    anchors: array, shape=(N, 2), wh
    num_classes: integer
    ignore_thresh: float, the iou threshold whether to ignore object confidence loss

    Returns
    -------
    loss: tensor, shape=(1,)

    '''
    num_layers = len(anchors) // 3  # default setting
    yolo_outputs = args[:num_layers]
    y_true = args[num_layers:]
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]
                   ] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
    input_shape = K.cast(
        K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
    grid_shapes = [
        K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0]))
        for l in range(num_layers)
    ]
    loss = 0
    m = K.shape(yolo_outputs[0])[0]  # batch size, tensor
    mf = K.cast(m, K.dtype(yolo_outputs[0]))

    for l in range(num_layers):
        object_mask = y_true[l][..., 4:5]
        true_class_probs = y_true[l][..., 5:]

        grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
                                                     anchors[anchor_mask[l]],
                                                     num_classes,
                                                     input_shape,
                                                     calc_loss=True)
        pred_box = K.concatenate([pred_xy, pred_wh])

        # Darknet raw box to calculate loss.
        raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid
        raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] *
                            input_shape[::-1])
        raw_true_wh = K.switch(object_mask, raw_true_wh,
                               K.zeros_like(raw_true_wh))  # avoid log(0)=-inf
        box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]

        # Find ignore mask, iterate over each of batch.
        ignore_mask = tf.TensorArray(K.dtype(y_true[0]),
                                     size=1,
                                     dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')

        def loop_body(b, ignore_mask):
            true_box = tf.boolean_mask(y_true[l][b, ..., 0:4],
                                       object_mask_bool[b, ..., 0])
            iou = box_iou(pred_box[b], true_box)
            best_iou = K.max(iou, axis=-1)
            ignore_mask = ignore_mask.write(
                b, K.cast(best_iou < ignore_thresh, K.dtype(true_box)))
            return b + 1, ignore_mask

        _, ignore_mask = K.control_flow_ops.while_loop(lambda b, *args: b < m,
                                                       loop_body,
                                                       [0, ignore_mask])
        ignore_mask = ignore_mask.stack()
        ignore_mask = K.expand_dims(ignore_mask, -1)

        # K.binary_crossentropy is helpful to avoid exp overflow.
        xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(
            raw_true_xy, raw_pred[..., 0:2], from_logits=True)
        wh_loss = object_mask * box_loss_scale * 0.5 * K.square(
            raw_true_wh - raw_pred[..., 2:4])
        confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
            (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
        class_loss = object_mask * K.binary_crossentropy(
            true_class_probs, raw_pred[..., 5:], from_logits=True)

        xy_loss = K.sum(xy_loss) / mf
        wh_loss = K.sum(wh_loss) / mf
        confidence_loss = K.sum(confidence_loss) / mf
        class_loss = K.sum(class_loss) / mf
        loss += xy_loss + wh_loss + confidence_loss + class_loss
        if print_loss:
            loss = tf.Print(loss, [
                loss, xy_loss, wh_loss, confidence_loss, class_loss,
                K.sum(ignore_mask)
            ],
                            message='loss: ')
    return loss
 def normalize(x):
     return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
Beispiel #35
0
 def f(y, d):
     return K.mean(y * K.square(d) +
                   (1 - y) * K.square(K.maximum(margin - d, 0)))
Beispiel #36
0
def vae_loss(x, x_decoded_mean):
    xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)  #
    kl_loss = -0.5 * K.mean(
        1 + z_log_std - K.square(z_mean) - K.exp(z_log_std), axis=-1)
    return K.mean(xent_loss + kl_loss)
def squash(vectors, axis=-1):
    s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)
    scale = s_squared_norm / (1 + s_squared_norm) / K.sqrt(s_squared_norm)
    return scale * vectors
Beispiel #38
0
 def vae_loss(x, x_decoded_mean):
     xent_loss = input_dim * objectives.binary_crossentropy(
         x, x_decoded_mean)
     kl_loss = -0.5 * K.sum(
         1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
     return xent_loss + kl_loss
Beispiel #39
0
def coeff_determination(y_true, y_pred):
    SS_res = K.sum(K.square(y_true - y_pred))
    SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
    return (1 - SS_res / (SS_tot + K.epsilon()))
Beispiel #40
0
 def clipped_mse(y_true, y_pred):
     delta = K.clip(y_true - y_pred, self.delta_range[0],
                    self.delta_range[1])
     return K.mean(K.square(delta), axis=-1)
Beispiel #41
0
 def _huber_loss(self, target, prediction):
     # sqrt(1+error^2)-1
     error = prediction - target
     return K.mean(K.sqrt(1 + K.square(error)) - 1, axis=-1)
    plt.grid(False)
    plt.imshow(display_grid, aspect='auto', cmap='viridis')
    plt.show()

from keras.applications import VGG16
from keras import backend as K

model = VGG16(weights='imagenet', include_top=False)
layer_name = 'block3_conv1'
filter_index = 0
layer_output = model.get_layer(layer_name).output
loss = K.mean(layer_output[:, :, :, filter_index])

grads = K.gradients(loss, model.input)[0]

grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)

iterate = K.function([model.input], [loss, grads])

import numpy as np
loss_value, grads_value = iterate([np.zeros((1, 150, 150, 3))])

input_img_data = np.random.random((1, 150, 150, 3)) * 20 + 128.
step = 1.
for i in range(40):
    loss_value, grads_value = iterate([input_img_data])
    input_img_data += grads_value * step


def deprocess_image(x):
    x -= x.mean()
def squeeze(s):
    sq = K.sum(K.square(s), axis=-1, keepdims=True)
    return (sq / (1 + sq)) * (s / K.sqrt(sq + K.epsilon()))
 def _total_variation_loss(self, x):
     a = backend.square(x[:, :self.height - 1, :self.width - 1, :] - x[:, 1:, :self.width - 1, :])
     b = backend.square(x[:, :self.height - 1, :self.width - 1, :] - x[:, :self.height - 1, 1:, :])
     return backend.sum(backend.pow(a + b, 1.25))
 def call(self, inputs, **kwargs):
     return K.sqrt(K.sum(K.square(inputs), -1) + K.epsilon())
 def call(self, inputs):
     return K.sqrt(K.sum(K.square(inputs + K.epsilon()), axis=-1))
 def _style_loss(self, style, combination):
     S = self._gram_matrix(style)
     C = self._gram_matrix(combination)
     channels = 3
     size = self.height * self.width
     return backend.sum(backend.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
Beispiel #48
0
 def _eucl_loss(x, y):
     return K.sum(K.square(x - y)) / batch_size / 2
Beispiel #49
0
 def squash(x, axis=-1):
     s_squared_norm = K.sum(K.square(x), axis, keepdims=True)
     scale = K.sqrt(s_squared_norm + K.epsilon())
     return x / scale
Beispiel #50
0
def root_mean_squared_error(y_true, y_pred):
    return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
Beispiel #51
0
 def call(self, x):
     mean = K.mean(x, axis=-1, keepdims=True)
     var = K.mean(K.square(x - mean), axis=-1, keepdims=True)
     res = (x - mean) / K.sqrt(var + self.epsilon)
     return self.gamma * res + self.beta
Beispiel #52
0
def total_variation_loss(x):  # to smooth the generated image
    a = K.square(x[:, :img_h - 1, :img_w - 1, :] - x[:, 1:, :img_w - 1, :])
    b = K.square(x[:, :img_h - 1, :img_w - 1, :] - x[:, :img_h - 1, 1:, :])

    return K.sum(K.pow(a + b, 1.25))
Beispiel #53
0
 def ortho_reg(weight_matrix):
     ### orthogonal regularization for topic embedding matrix ###
     w_n = weight_matrix / K.cast(K.epsilon() + K.sqrt(K.sum(K.square(weight_matrix), axis=-1, keepdims=True)),
                                  K.floatx())
     reg = K.sum(K.square(K.dot(w_n, K.transpose(w_n)) - K.eye(w_n.shape[0].value)))
     return args.ortho_reg * reg
Beispiel #54
0
 def masked_mse(y_true, y_pred):
     mask_true = K.cast(K.not_equal(y_true, y_pred), K.floatx())
     masked_squared_error = K.square(mask_true * (y_true - y_pred))
     r = K.sum(masked_squared_error,
               axis=-1)  # / K.sum(mask_true, axis=-1)
     return r
def content_loss(base, combination):
    return K.sum(K.square(combination - base))
Beispiel #56
0
def pixelwise_l2_loss(y_true, y_pred):
    y_true /= 255.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    return K.mean(K.square(y_true_f - y_pred_f))
Beispiel #57
0
def total_variation(y):
#    assert K.ndim(y) == 4
    a = K.square(y[:, :res - 1, :res - 1, :] - y[:, 1:, :res - 1, :])
    b = K.square(y[:, :res - 1, :res - 1, :] - y[:, :res - 1, 1:, :])
    
    return K.pow(K.sum(a + b), 0.5)# tweak the power?
Beispiel #58
0
def sobelNorm(y):
     filt = expandedSobel(y)
     sobel = K.depthwise_conv2d(y, filt, padding = 'same')
     
     return K.mean(K.square(sobel))
def euclidean_distance(vects):
    x, y = vects
    #return K.sqrt(K.sum(K.square(x - y), axis=-1, keepdims=True))
    sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
    return K.sqrt(K.maximum(sum_square, K.epsilon()))
Beispiel #60
0
def content_loss(content, gen):
    return K.sum(K.square(gen - content))