Пример #1
0
def contrastive_loss_old(labels, dists):

    label_first = labels[0:1, :]
    other_labels = labels[1:, :]

    labels_shifted = K.concatenate(
        [labels, other_labels, label_first],
        axis=0)  #   [ l1 ........ ln  | l2 ... ln-1 ln ]
    labels_orig = K.concatenate(
        [labels, labels], axis=0)  #   [ l1 ........ ln  | l1 ... ln-2 ln ]
    zeros = K.zeros_like(labels_orig)  #   [ 0  ........  0  | 0  ...   0   0 ]
    h = K.cast(K.equal(labels_orig - labels_shifted, zeros),
               dtype='float32')  #   [ 1  1 ......  1  | 0  ...   1   0 ]
    # h:   ALL ONES       |    MOST ZEROS
    # h[i] = 1  where labels_orig[i] == labels_shifted[i]  (i-th image correlated with i+1-th image, i.e. same artwork)
    # h[i] = 0  where labels_orig[i] != labels_shifted[i]

    first_dist = dists[0:1]
    other_dists = dists[1:]
    shifted_dists = K.concatenate(
        [dists, other_dists, first_dist],
        axis=0)  # [ d1 ........ dn  | d1 ... dn-2 dn ]

    # equation:  Lcon = (1/2N) SUM[ h(i) d(i)^2 + (1-h(i)) max(1-d(i), 0)^2
    Z = K.zeros_like(shifted_dists)
    max_z_sd = K.max(K.stack([1 - shifted_dists, Z]), axis=0, keepdims=False)
    #max_z_sd = K.sqrt(K.cast(K.shape(shifted_dists)[0], dtype='float32')) - shifted_dists

    first_operand = h * K.square(shifted_dists)
    second_operand = (1 - h) * K.square(max_z_sd)
    tensor_sum = first_operand + second_operand
    sum = K.sum(tensor_sum, axis=0) / K.cast(K.shape(shifted_dists)[0],
                                             dtype='float32')

    return K.mean(sum)
Пример #2
0
 def bias_initializer(shape, *args, **kwargs):
     return K.concatenate([
         self.bias_initializer((self.units, ), *args, **kwargs),
         initializers.Ones()((self.units, ), *args, **kwargs),
         self.bias_initializer((self.units * 2, ), *args,
                               **kwargs),
     ])
Пример #3
0
def avg_batch_mse_loss(y_true, y_pred):
    # batch_size = K.int_shape(y_pred)[-1]
    # loss = 0
    # for i in range(0, batch_size):
    #     loss += mean_squared_error(y_pred[i], y_pred[i-1])
    # loss/=batch_size

    # max distance in the euclidean spaces with domain [0, 1] for each dimensionality, is the square root  of the number
    # of dimensions (1 for 1D, 1.414.. 2D, 1.73 3D, 2 4D....)

    y_pred_first_row = y_pred[0:1, :]
    y_pred_other_rows = y_pred[1:, :]
    y_pred_shifted = K.concatenate([y_pred_other_rows, y_pred_first_row],
                                   axis=0)

    return -K.mean(K.square(y_pred - y_pred_shifted))
Пример #4
0
    def contrastive_loss_2(labels, im_outputs):
        distances = K.sqrt(K.sum(K.square(im_outputs - text_outputs), axis=-1))

        first_text = text_outputs[0:1, :]
        last_texts = text_outputs[1:, :]
        shifted_texts = K.concatenate([last_texts, first_text], axis=0)

        shifted_distances = K.sqrt(
            K.sum(K.square(im_outputs - shifted_texts), axis=-1))

        #loss = K.mean((distances + K.maximum(margin-shifted_distances, 0)))
        loss = K.mean((K.square(distances) +
                       K.square(K.maximum(margin - shifted_distances, 0))))
        #loss = K.mean(distances - shifted_distances)

        return loss
Пример #5
0
def my_distance_shifted(tensors):
    first_row = tensors[0][0:1, :]
    last_rows = tensors[0][1:, :]
    shifted_a = K.concatenate([first_row, last_rows], axis=0)
    return my_distance([shifted_a, tensors[1]])