예제 #1
0
 def body(bit_index, value):
     """Body for the while loop executing the binary search."""
     new_value = tf.bitwise.bitwise_or(value,
                                       tf.bitwise.left_shift(1, bit_index))
     larger = larger_count(scores, tf.bitcast(new_value, tf.float32))
     next_value = tf.where(tf.logical_xor(larger >= k, kth_negative),
                           new_value, value)
     return bit_index - 1, next_value
 def do_process_boundary(start_points, end_points, input_length, t1_id,
                         t2_id, all_tokenized_diag):
     """function that contains the majority of the logic to proess boundary."""
     masks_start = tf.sequence_mask(start_points, input_length)
     masks_end = tf.sequence_mask(end_points, input_length)
     xor_masks = tf.logical_xor(masks_start, masks_end)
     mask1 = tf.reduce_any(xor_masks, axis=0)
     mask2 = tf.logical_not(mask1)
     all_turn1 = tf.equal(all_tokenized_diag, t1_id)
     all_turn2 = tf.equal(all_tokenized_diag, t2_id)
     turn_point = tf.logical_or(all_turn1, all_turn2)
     turn_point = tf.cast(turn_point, dtype=tf.float32)
     return mask1, mask2, turn_point
예제 #3
0
def get_mask(inputs, reverse_mask, data_format='NHWC', dtype=tf.float32):
    shape = inputs.get_shape().as_list()
    if len(shape) == 2:
        N = shape[-1]
        range_n = tf.range(N)
        odd_ind = tf.mod(range_n, 2)

        odd_ind = tf.reshape(odd_ind, [-1, N])
        checker = odd_ind

    elif len(shape) == 4:
        H = shape[2] if data_format == 'NCHW' else shape[1]
        W = shape[3] if data_format == 'NCHW' else shape[2]

        range_h = tf.range(H)
        range_w = tf.range(W)

        odd_ind_h = tf.cast(tf.mod(range_h, 2), dtype=tf.bool)
        odd_ind_w = tf.cast(tf.mod(range_w, 2), dtype=tf.bool)

        odd_h = tf.tile(tf.expand_dims(odd_ind_h, -1), [1, W])
        odd_w = tf.tile(tf.expand_dims(odd_ind_w, 0), [H, 1])

        checker = tf.logical_xor(odd_h, odd_w)

        reshape = [-1, 1, H, W] if data_format == 'NCHW' else [-1, H, W, 1]
        checker = tf.reshape(checker, reshape)

    else:
        raise ValueError(
            'Invalid tensor shape. Dimension of the tensor shape must be '
            '2 (NxD) or 4 (NxCxHxW or NxHxWxC), got {}.'.format(
                inputs.get_shape().as_list()))

    if checker.dtype != dtype:
        checker = tf.cast(checker, dtype)

    if reverse_mask:
        checker = 1. - checker

    return checker
 def distance(self, ids_1, ids_2):
     ings_1 = tf.gather(self.lookup, ids_1)
     ings_2 = tf.gather(self.lookup, ids_2)
     are_different = tf.logical_xor(ings_1, ings_2)
     distance = tf.cast(are_different, dtype=tf.int32)
     return distance
예제 #5
0
def batch_hard(dists, pids, margin, batch_precision_at_k=None):
    """Computes the batch-hard loss from arxiv.org/abs/1703.07737.

    Args:
        dists (2D tensor): A square all-to-all distance matrix as given by cdist.
        pids (1D tensor): The identities of the entries in `batch`, shape (B,).
            This can be of any type that can be compared, thus also a string.
        margin: The value of the margin if a number, alternatively the string
            'soft' for using the soft-margin formulation, or `None` for not
            using a margin at all.

    Returns:
        A 1D tensor of shape (B,) containing the loss value for each sample.
    """
    with tf.name_scope("batch_hard"):
        same_identity_mask = tf.equal(tf.expand_dims(pids, axis=1),
                                      tf.expand_dims(pids, axis=0))
        negative_mask = tf.logical_not(same_identity_mask)
        positive_mask = tf.logical_xor(
            same_identity_mask, tf.eye(tf.shape(pids)[0], dtype=tf.bool))

        furthest_positive = tf.reduce_max(dists *
                                          tf.cast(positive_mask, tf.float32),
                                          axis=1)
        closest_negative = tf.map_fn(
            lambda x: tf.reduce_min(tf.boolean_mask(x[0], x[1])),
            (dists, negative_mask), tf.float32)
        # Another way of achieving the same, though more hacky:
        # closest_negative = tf.reduce_min(dists + 1e5*tf.cast(same_identity_mask, tf.float32), axis=1)

        diff = furthest_positive - closest_negative
        if isinstance(margin, numbers.Real):
            diff = tf.maximum(diff + margin, 0.0)
        elif margin == 'soft':
            diff = tf.nn.softplus(diff)
        elif margin.lower() == 'none':
            pass
        else:
            raise NotImplementedError(
                'The margin {} is not implemented in batch_hard'.format(
                    margin))

    if batch_precision_at_k is None:
        return diff

    # For monitoring, compute the within-batch top-1 accuracy and the
    # within-batch precision-at-k, which is somewhat more expressive.
    with tf.name_scope("monitoring"):
        # This is like argsort along the last axis. Add one to K as we'll
        # drop the diagonal.
        _, indices = tf.nn.top_k(-dists, k=batch_precision_at_k + 1)

        # Drop the diagonal (distance to self is always least).
        indices = indices[:, 1:]

        # Generate the index indexing into the batch dimension.
        # This is simething like [[0,0,0],[1,1,1],...,[B,B,B]]
        batch_index = tf.tile(
            tf.expand_dims(tf.range(tf.shape(indices)[0]), 1),
            (1, tf.shape(indices)[1]))

        # Stitch the above together with the argsort indices to get the
        # indices of the top-k of each row.
        topk_indices = tf.stack((batch_index, indices), -1)

        # See if the topk belong to the same person as they should, or not.
        topk_is_same = tf.gather_nd(same_identity_mask, topk_indices)

        # All of the above could be reduced to the simpler following if k==1
        #top1_is_same = get_at_indices(same_identity_mask, top_idxs[:,1])

        topk_is_same_f32 = tf.cast(topk_is_same, tf.float32)
        top1 = tf.reduce_mean(topk_is_same_f32[:, 0])
        prec_at_k = tf.reduce_mean(topk_is_same_f32)

        # Finally, let's get some more info that can help in debugging while
        # we're at it!
        negative_dists = tf.boolean_mask(dists, negative_mask)
        positive_dists = tf.boolean_mask(dists, positive_mask)

        return diff, top1, prec_at_k, topk_is_same, negative_dists, positive_dists