예제 #1
0
def quadratic_weighted_kappa_cm(conf_mat, num_ratings, cost_matrix):
    """
	Compute QWK function using confusion matrix.

	:param conf_mat: confusion matrix.
	:param min_rating: lowest rating.
	:param max_rating: highest rating.
	:param cost_matrix: cost_matrix.
	:return: QWK value.
	"""
    conf_mat = K.cast(conf_mat, dtype=K.floatx())

    hist_rater_a = K.cast(K.reshape(K.reduce_sum(conf_mat, axis=1),
                                    shape=[num_ratings, 1]),
                          dtype=K.floatx())  # Sum every row
    hist_rater_b = K.cast(K.reshape(K.reduce_sum(conf_mat, axis=0),
                                    shape=[1, num_ratings]),
                          dtype=K.floatx())  # Sum every column

    num_scored_items = K.reduce_sum(conf_mat)  # Sum all the elements

    expected_count = K.matmul(hist_rater_a, hist_rater_b) / K.cast(
        num_scored_items, dtype=K.floatx())

    numerator = K.reduce_sum(cost_matrix * conf_mat)
    denominator = K.reduce_sum(cost_matrix * expected_count)

    return 1.0 - numerator / denominator
예제 #2
0
def keras_loss(y_true, y_pred, alpha=0.2):
    # origin image
    anchor = y_pred[0]
    # same person
    positive = y_pred[1]
    # another person
    negative = y_pred[2]
    # operation
    pos_dist = K.reduce_sum(K.square(K.subtract(positive, anchor)))
    neg_dist = K.reduce_sum(K.square(K.subtract(negative, anchor)))
    loss = K.maximum(
        K.reduce_sum(K.add(K.subtract(pos_dist, neg_dist), alpha)), 0.0)
    return loss
예제 #3
0
    def logp(y_true, y_pred):
        # Y is our n_instances x n_classes tensor of permutations
        # F is n_instances x n_classes (n_classes many latent processes
        def logsumexp(x):
            masked = K.reshape(
                K.boolean_mask(exped,
                               K.greater_equal(y_pred, K.cast(x, 'float32'))),
                [n, -1])
            return max_entry + K.log(K.reduce_sum(masked, axis=1))

        n, m = K.shape(y_true)[0], K.shape(y_true)[1]
        max_entry = K.reduce_max(y_true)
        exped = K.exp(y_true - max_entry)
        lse = K.map_fn(logsumexp, K.range(m), dtype='float32')
        return K.reduce_sum(y_true) - K.reduce_sum(lse)
예제 #4
0
def kernel_pooling(translation_matrix, l_mean, l_sigma):
    """
    function for Lambda layer
    kernel pooling layer
    :param translation_matrix: input translation matrix
    :param l_mean: kp's mean
    :param l_sigma: sigma for kernels
    :return: output_shape = 1 + (hyper_parameter.l_kernel_pool_mean) # added 1 exact match pool
    """
    #TODO
    assert len(l_mean) == len(l_sigma)

    mu = np.array(l_mean)  # add exact match kernel
    mu.reshape((1, 1, len(l_mean)))
    sigma = np.array(l_sigma)
    sigma.reshape((1, 1, len(l_sigma)))

    m = TF.expand_dims(translation_matrix, -1)

    raw_k_pool = TF.exp(
        TF.div(TF.negative(TF.square(TF.sub(m, mu))),
               TF.mul(TF.square(sigma), 2)))
    k_pool = TF.reduce_sum(raw_k_pool, [0, 1])
    k_pool = TF.log(TF.maximum(k_pool, 1e-10)) * 0.01

    return k_pool
예제 #5
0
        def gaussian_loss(y_true, y_pred):
            # define loss function (eq 26 of http://arxiv.org/abs/1308.0850)
            x_data, y_data = y_true[:, 1:]
            eos_data = y_true[:, 0:1]
            pi, mux, muy, sigmax, sigmay, rho = K.split(y_pred[:, 1:], 6, 1)
            eos = y_pred[:, 0:1]
            gaussian = gaussian2d(x_data, y_data, mux, muy, sigmax, sigmay,
                                  rho)
            term1 = K.multiply(gaussian, pi)
            term1 = K.reduce_sum(term1, 1,
                                 keep_dims=True)  # do inner summation
            term1 = -K.log(K.maximum(
                term1, 1e-20))  # some errors are zero -> numerical errors.

            term2 = K.multiply(eos, eos_data) + K.multiply(
                1 - eos, 1 - eos_data)  # modified Bernoulli -> eos probability
            term2 = -K.log(term2)  # negative log error gives loss

            return K.reduce_sum(term1 + term2)  # do outer summation
예제 #6
0
def GPU_lid_eval_keras(logits, k=20):
    import time
    start_time = time.time()
    """
    Calculate LID for a minibatch of training samples based on the outputs of the network.

    :param logits:
    :param k:
    :return:
    """
    print(logits.shape)
    logits = K.constant(logits, dtype=tf.float32)

    epsilon = 1e-12
    batch_size = K.shape(logits)[0]
    # n_samples = logits.get_shape().as_list()
    # calculate pairwise distance
    r = K.reduce_sum(logits * logits, 1)
    # turn r into column vector
    r1 = K.reshape(r, [-1, 1])
    D = r1 - 2 * K.matmul(logits, K.transpose(logits)) + K.transpose(r1) + \
        K.ones([batch_size, batch_size])

    # find the k nearest neighbor
    D1 = -K.sqrt(D)
    D2, _ = tf.nn.top_k(D1, k=k, sorted=True)
    D3 = -D2[:, 1:]  # skip the x-to-x distance 0 by using [,1:]

    m = K.transpose(K.multiply(K.transpose(D3), 1.0 / D3[:, -1]))
    v_log = K.reduce_sum(tf.log(m + epsilon), axis=1)  # to avoid nan
    lids = -k / v_log
    ## l2 normalization
    # lids = tf.nn.l2_normalize(lids, dim=0, epsilon=epsilon)

    # import ipdb;
    # ipdb.set_trace()

    # sess = tf.Session()
    #     # with sess.as_default():
    #     #     lids = sess.run(lids)

    print('LID GPU Time:', time.time() - start_time)
    return lids.eval()
예제 #7
0
def batch_all_triplet_loss(y_true, y_pred, margin, squared=False):
    pairwise_dist = _pairwise_distances(y_pred, squared=squared)

    anchor_positive_dist = K.expand_dims(pairwise_dist, 2)
    anchor_negative_dist = K.expand_dims(pairwise_dist, 1)

    triplet_loss = anchor_positive_dist - anchor_negative_dist + margin

    mask = _get_triplet_mask(labels)
    mask = K.to_float(mask)
    triplet_loss = K.multiply(mask, triplet_loss)

    triplet_loss = K.maximum(triplet_loss, 0.0)

    valid_triplets = K.to_float(K.greater(triplet_loss, 1e-16))
    num_positive_triplets = K.reduce_sum(valid_triplets)
    num_valid_triplets = K.reduce_sum(mask)
    fraction_positive_triplets = num_positive_triplets / (num_valid_triplets +
                                                          1e-16)

    triplet_loss = K.reduce_sum(triplet_loss) / (num_positive_triplets + 1e-16)

    return triplet_loss, fraction_postive_triplets
예제 #8
0
 def func(y_true, y_pred):
     return K.reduce_sum(y_true * x + (y_true - 1) * x)
예제 #9
0
def K_parzen(x, mu, sigma):
    d = (K.expand_dims(x, 1) - K.expand_dims(mu, 0)) / sigma
    e = K_log_mean_exp(-0.5 * K.reduce_sum(K.multiply(d, d), axis=2), axis=1)
    e = K.squeeze(e, axis=1)
    z = K.to_float(K.shape(mu)[1]) * K.log(sigma * np.sqrt(np.pi * 2.0))
    return e - z
예제 #10
0
 def logsumexp(x):
     masked = K.reshape(
         K.boolean_mask(exped,
                        K.greater_equal(y_pred, K.cast(x, 'float32'))),
         [n, -1])
     return max_entry + K.log(K.reduce_sum(masked, axis=1))
예제 #11
0
파일: loss.py 프로젝트: beerys/multibox
def add_loss(locations, confidences, batched_bboxes, batched_num_bboxes,
             bbox_priors, location_loss_alpha):
    batch_size = tf.shape(locations)[0]  # locations.get_shape().as_list()[0]

    # ground truth bounding boxes:
    # [batch_size, # of ground truth bounding boxes, 4]
    # we also need to know the number of ground truth bounding boxes for each image in the batch
    # (it can be different for each image...)
    # We could assume 1 for now.

    # Pass the locations, confidences, and ground truth labels to the matching function
    locations = tf.reshape(locations, [-1, 4])
    confidences = tf.reshape(confidences, [-1])

    # add the priors to the predicted residuals
    locations += tf.tile(bbox_priors, [batch_size, 1])

    # add a small epsilon to the confidences
    confidences += SMALL_EPSILON

    # print "Shapes"
    # print locations.get_shape().as_list()
    # print confidences.get_shape().as_list()
    # print batched_bboxes.get_shape().as_list()
    # print batched_num_bboxes.get_shape().as_list()
    params = [
        locations, confidences, batched_bboxes, batched_num_bboxes, batch_size,
        location_loss_alpha
    ]
    matching, stacked_gt_bboxes = tf.py_func(compute_assignments,
                                             params, [tf.int32, tf.float32],
                                             name="bipartite_matching")

    # matching: [num_predictions * batch_size] 0s and 1s for partitioning
    # stacked_gt_bboxes : [total number of gt bboxes for this batch, 4]

    # dynamic partition the bounding boxes and confidences into "positives" and "negatives"
    unmatched_locations, matched_locations = tf.dynamic_partition(
        locations, matching, 2)
    unmatched_confidences, matched_confidences = tf.dynamic_partition(
        confidences, matching, 2)

    # Because we just did a dynamic partition, it could be the case that either the unmatched or matched matrices is empty.
    # It could also be the case that there were no ground truth bboxes in this batch.
    # Lets tack on some default values so that the loss calculations are well behaved.
    matched_locations = tf.concat(0, [matched_locations, tf.zeros([1, 4])])
    stacked_gt_bboxes = tf.concat(0, [stacked_gt_bboxes, tf.zeros([1, 4])])
    matched_confidences = tf.concat(0, [matched_confidences, tf.ones([1])])
    unmatched_confidences = tf.concat(
        0, [unmatched_confidences, tf.zeros([1])])

    location_loss = location_loss_alpha * tf.nn.l2_loss(matched_locations -
                                                        stacked_gt_bboxes)
    confidence_loss = -1. * tf.reduce_sum(
        tf.log(matched_confidences)) - tf.reduce_sum(
            tf.log((1. - unmatched_confidences) + SMALL_EPSILON))

    # It could be the case that there are no ground truth bounding boxes
    # num_gt_bboxes = tf.reduce_sum(batched_num_bboxes)

    # loc_loss = lambda: location_loss_alpha * tf.nn.l2_loss(matched_locations - stacked_gt_bboxes)
    # zero_loc_loss = lambda: tf.zeros(shape=[])
    # location_loss = tf.cond(num_gt_bboxes > 0, loc_loss, zero_loc_loss)

    # conf_loss = lambda: -1. * tf.reduce_sum(tf.log(matched_confidences)) - tf.reduce_sum(tf.log((1. - unmatched_confidences) + SMALL_EPSILON))
    # all_negative_conf_loss = lambda : -1. * tf.reduce_sum(tf.log((1. - unmatched_confidences) + SMALL_EPSILON))
    # confidence_loss = tf.cond(num_gt_bboxes > 0, conf_loss, all_negative_conf_loss)

    slim.losses.add_loss(location_loss)
    slim.losses.add_loss(confidence_loss)

    return location_loss, confidence_loss
예제 #12
0
def dotLayer(x):
    # Check axis to reduce sum along
    # Dot product operation
    return Lambda(lambda x: K.reduce_sum(
        K.multiply(x[0], x[1]), axis=-1, keep_dims=True),
                  name='DotLayer')