Example #1
0
def margin_loss(y_true, y_pred):
    """
    Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
    :param y_true: [None, n_classes]
    :param y_pred: [None, num_capsule]
    :return: a scalar loss value.
    """
    L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
        0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))

    return K.mean(K.sum(L, 1))
Example #2
0
def contrastive_loss(y, d):
    """ Contrastive loss from Hadsell-et-al.'06
        http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
    """
    margin = 1
    return K.mean(y * K.square(d) +
                  (1 - y) * K.square(K.maximum(margin - d, 0)))
Example #3
0
def contrastive_loss_over_distance(labels, distances):
    '''
    :param labels: 1D tensor containing 0 or 1 for each example
    :param distances: 
    :return: 
    '''
    margin = 1
    # loss = K.mean((distances + K.maximum(margin-shifted_distances, 0)))
    print(K.eval(distances))
    right = margin - distances
    print(K.eval(right))
    right = K.maximum(right, 0)
    print(K.eval(right))
    right = K.square(right)
    print(K.eval(right))

    print ""

    print(K.eval(distances))
    left = distances
    print(K.eval(left))
    left = K.square(left)
    print(K.eval(left))

    left = labels * left
    print(K.eval(left))
    right = (1 - labels) * right
    print(K.eval(right))

    loss = K.mean(left + right)
    print(K.eval(loss))

    # loss = K.mean(distances - shifted_distances)
    return loss
Example #4
0
def triplet_loss(y_true, y_pred):
    y_pred = K.flatten(y_pred)
    y_true = K.flatten(y_true)
    pos = y_pred[::2]
    neg = y_pred[1::2]
    margin = y_true[::2] - y_true[1::2]
    delta = K.maximum(margin + neg - pos, 0)
    return K.mean(delta, axis=-1)
Example #5
0
def contrastive_loss(y_true, y_pred):
    '''Contrastive loss from Hadsell-et-al.'06
    http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
    '''
    margin = 1
    sqaure_pred = K.square(y_pred)
    margin_square = K.square(K.maximum(margin - y_pred, 0))
    return K.mean(y_true * sqaure_pred + (1 - y_true) * margin_square)
Example #6
0
    def contrastive_loss_contr_data(labels, output):
        distances = K.sqrt(K.sum(K.square(output - other_output), axis=0))
        #loss = K.mean((distances + K.maximum(margin-shifted_distances, 0)))
        #loss = K.mean(K.square(distances) + K.square(K.maximum(margin-shifted_distances, 0)))
        #loss = K.mean(distances - shifted_distances)
        loss = K.mean((labels) * K.square(distances) + (1 - labels) *
                      K.square(K.maximum(margin - distances, 0)))

        return loss
Example #7
0
def categorical_squared_hinge(y_true, y_pred):
    """
    hinge with 0.5*W^2 ,SVM
    """
    y_true = 2. * y_true - 1  # trans [0,1] to [-1,1],注意这个,svm类别标签是-1和1
    vvvv = K.maximum(1. - y_true * y_pred,
                     0.)  # hinge loss,参考keras自带的hinge loss
    #    vvv = K.square(vvvv) # 文章《Deep Learning using Linear Support Vector Machines》有进行平方
    vv = K.sum(vvvv, 1, keepdims=False)  #axis=len(y_true.get_shape()) - 1
    v = K.mean(vv, axis=-1)
    return v
Example #8
0
def contrastive_loss_over_distance(labels, distances):
    '''
    :param labels: 1D tensor containing 0 or 1 for each example
    :param distances: 
    :return: 
    '''
    margin = 1
    # loss = K.mean((distances + K.maximum(margin-shifted_distances, 0)))
    loss = labels * K.square(distances) + (1 - labels) * K.square(
        K.maximum(margin - distances, 0))
    #loss = K.mean(loss)
    # loss = K.mean(distances - shifted_distances)
    return loss
Example #9
0
def triplet_loss(y_true, y_pred):
    y_pred = K.l2_normalize(y_pred, axis=1)
    batch = BAT_SIZE
    # print(batch)
    ref1 = y_pred[0:batch, :]
    pos1 = y_pred[batch:batch + batch, :]
    neg1 = y_pred[batch + batch:3 * batch, :]
    dis_pos = K.sum(K.square(ref1 - pos1), axis=1, keepdims=True)
    dis_neg = K.sum(K.square(ref1 - neg1), axis=1, keepdims=True)
    dis_pos = K.sqrt(dis_pos)
    dis_neg = K.sqrt(dis_neg)
    a1 = 0.6
    d1 = dis_pos + K.maximum(0.0, dis_pos - dis_neg + a1)
    return K.mean(d1)
Example #10
0
    def triplet_loss(y_true, y_pred):
        """
        Triplet Loss的损失函数
        """
        anc, pos, neg = y_pred[:, 0:O_DIM], y_pred[:, O_DIM:O_DIM * 2], y_pred[:, O_DIM * 2:]

        # 欧式距离
        pos_dist = K.sum(K.square(anc - pos), axis=-1, keepdims=True)
        neg_dist = K.sum(K.square(anc - neg), axis=-1, keepdims=True)
        basic_loss = pos_dist - neg_dist + TripletModel.MARGIN

        loss = K.maximum(basic_loss, 0.0)

        print "[INFO] model - triplet_loss shape: %s" % str(loss.shape)
        return loss
Example #11
0
    def contrastive_loss_2(labels, im_outputs):
        distances = K.sqrt(K.sum(K.square(im_outputs - text_outputs), axis=-1))

        first_text = text_outputs[0:1, :]
        last_texts = text_outputs[1:, :]
        shifted_texts = K.concatenate([last_texts, first_text], axis=0)

        shifted_distances = K.sqrt(
            K.sum(K.square(im_outputs - shifted_texts), axis=-1))

        #loss = K.mean((distances + K.maximum(margin-shifted_distances, 0)))
        loss = K.mean((K.square(distances) +
                       K.square(K.maximum(margin - shifted_distances, 0))))
        #loss = K.mean(distances - shifted_distances)

        return loss
Example #12
0
 def contrastive_loss_on_distance(labels, distances_a):
     # loss = K.mean((distances + K.maximum(margin-shifted_distances, 0)))
     loss = K.mean((K.square(distances_a) +
                    K.square(K.maximum(margin - distances_b, 0))))
     # loss = K.mean(distances - shifted_distances)
     return loss
Example #13
0
def contrastive_loss(y_true, y_pred):
    margin = 1
    square_pred = K.square(y_pred)
    margin_square = K.square(K.maximum(margin - y_pred, 0))
    return K.mean(y_true * square_pred + (1 - y_true) * margin_square)
Example #14
0
def euclidean_distance(vects):
    x, y = vects
    sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
    return K.sqrt(K.maximum(sum_square, K.epsilon()))
def margin_loss(y, pred):
    """
    For the first part of loss(classification loss)
    """
    return K.mean(K.sum(y * K.square(K.maximum(0.9 - pred, 0)) + \
        0.5 *  K.square((1 - y) * K.maximum(pred - 0.1, 0)), axis=1))