Example #1
0
 def _filter_vert_hori(image, mask):
     is_filled = tf.reduce_all(tf.equal(mask, 1.0))
     is_empty = tf.reduce_all(tf.equal(mask, 0.0))
     is_uniform = tf.logical_or(is_filled, is_empty)
     vert_mean = tf.reduce_mean(mask, axis=0)
     hori_mean = tf.reduce_mean(mask, axis=1)
     is_vertical = tf.reduce_all(
         tf.logical_xor(tf.equal(vert_mean, 0.0),
                        tf.equal(vert_mean, 1.0)))
     is_horizontal = tf.reduce_all(
         tf.logical_xor(tf.equal(hori_mean, 0.0),
                        tf.equal(hori_mean, 1.0)))
     is_vert_or_hori = tf.logical_or(is_vertical, is_horizontal)
     return tf.logical_or(is_uniform, tf.logical_not(is_vert_or_hori))
def batch_hard(labels, embeddings, margin=1.0):
    dists = pairwise_distance(embeddings, squared=True)
    with tf.name_scope("batch_hard"):
        same_identity_mask = tf.equal(tf.expand_dims(labels, axis=1),
                                      tf.expand_dims(labels, axis=0))
        negative_mask = tf.logical_not(same_identity_mask)
        positive_mask = tf.logical_xor(
            same_identity_mask, tf.eye(tf.shape(labels)[0], dtype=tf.bool))

        furthest_positive = tf.reduce_max(dists *
                                          tf.cast(positive_mask, tf.float32),
                                          axis=1)
        closest_negative = tf.map_fn(
            lambda x: tf.reduce_min(tf.boolean_mask(x[0], x[1])),
            (dists, negative_mask), tf.float32)
        # Another way of achieving the same, though more hacky:
        # closest_negative = tf.reduce_min(dists + 1e5*tf.cast(same_identity_mask, tf.float32), axis=1)

        diff = furthest_positive - closest_negative
        if isinstance(margin, numbers.Real):
            diff = tf.maximum(diff + margin, 0.0)
            print('hard Margin Utilized')
        elif margin == 'soft':
            diff = tf.nn.softplus(diff)
            print('Soft-margin Utilized')
        elif margin.lower() == 'none':
            pass
        else:
            raise NotImplementedError(
                'The margin {} is not implemented in batch_hard'.format(
                    margin))

    return diff
Example #3
0
    def evaluate(self, outputs, references, reference_seq_length):
        '''evaluate the output of the decoder

        args:
            outputs: the outputs of the decoder as a dictionary
            references: the references as a dictionary
            reference_seq_length: the sequence lengths of the references

        Returns:
            the error of the outputs
        '''

        #compute the edit distance
        losses = []
        for o in outputs:
            numerrors = tf.reduce_sum(
                tf.cast(tf.logical_xor(outputs[o][0], references[o]),
                        tf.float32))
            numlabels = tf.cast(tf.reduce_sum(reference_seq_length[o]),
                                tf.float32)
            losses.append(numerrors / numlabels)

        loss = tf.reduce_mean(losses)

        return loss
def compute_mistakes(box_x, box_y, box_w, box_c_sim, target_x, target_y,
                     target_w, target_is_plane, grid_nn):
    DETECTION_TRESHOLD = 0.5  # plane "detected" if predicted C>0.5 TODO: refactor this
    ERROR_TRESHOLD = 0.3  # plane correctly localized if predicted x,y,w within % of ground truth
    detect_correct = tf.logical_not(
        tf.logical_xor(tf.greater(box_c_sim, DETECTION_TRESHOLD),
                       target_is_plane))
    ones = tf.ones(tf.shape(target_w))
    nonzero_target_w = tf.where(target_is_plane, target_w, ones)
    # true if correct size where there is a plane, nonsense value where there is no plane
    size_correct = tf.less(
        tf.abs(box_w - target_w) / nonzero_target_w, ERROR_TRESHOLD)
    # true if correct position where there is a plane, nonsense value where there is no plane
    position_correct = tf.less(
        tf.sqrt(tf.square(box_x - target_x) + tf.square(box_y - target_y)) /
        nonzero_target_w / grid_nn, ERROR_TRESHOLD)
    truth_no_plane = tf.logical_not(target_is_plane)
    size_correct = tf.logical_or(size_correct, truth_no_plane)
    position_correct = tf.logical_or(position_correct, truth_no_plane)
    size_correct = tf.logical_and(detect_correct, size_correct)
    position_correct = tf.logical_and(detect_correct, position_correct)
    all_correct = tf.logical_and(size_correct, position_correct)
    mistakes = tf.reduce_sum(tf.cast(tf.logical_not(all_correct), tf.int32),
                             axis=[1, 2, 3])  # shape [batch]
    return mistakes, size_correct, position_correct, all_correct
Example #5
0
 def micro_f1(self, pred, label):
     tp = tf.reduce_mean(tf.reduce_sum(pred * label, axis=1))
     fn = tf.reduce_mean(
         tf.reduce_sum(tf.cast(
             tf.logical_xor(tf.cast(pred, tf.bool), tf.cast(
                 label, tf.bool)), tf.float32) * label,
                       axis=1))
     fp = tf.reduce_mean(
         tf.reduce_sum(tf.cast(
             tf.logical_xor(tf.cast(pred, tf.bool), tf.cast(
                 label, tf.bool)), tf.float32) * pred,
                       axis=1))
     p = tp / (tp + fp + 1e-6)
     r = tp / (tp + fn + 1e-6)
     f1 = 2 * p * r / (p + r + 1e-6)
     return p, r, f1
Example #6
0
    def _confusion_metrics(self, predict, real):
        predictions = tf.greater(predict, 0)
        actuals = tf.greater(real, 0)
        differ = tf.logical_xor(actuals, predictions)
        same = tf.logical_not(differ)

        tp = tf.reduce_sum(tf.cast(tf.logical_and(same, actuals), tf.float32))
        tn = tf.reduce_sum(
            tf.cast(tf.logical_and(same, tf.logical_not(actuals)), tf.float32))

        fp = tf.reduce_sum(
            tf.cast(tf.logical_and(differ, predictions), tf.float32))
        fn = tf.reduce_sum(
            tf.cast(tf.logical_and(differ, tf.logical_not(predictions)),
                    tf.float32))

        tpr = tp / (tp + fn)
        fpr = fp / (fp + tn)
        fnr = fn / (tp + fn)

        accuracy = (tp + tn) / (tp + fp + fn + tn)

        recall = tpr
        precision = tp / (tp + fp)

        f1_score = (2 * (precision * recall)) / (precision + recall + 1e-8)

        return accuracy, recall, precision, f1_score
Example #7
0
        def loop_body(step_size, last_acceptance_rate, cond):
            # Calculate acceptance_rate
            new_q, new_p = leapfrog_integrator(
                q, p, tf.constant(0.0), step_size / 2,
                get_gradient, mass)
            new_q, new_p = leapfrog_integrator(
                new_q, new_p, step_size, step_size / 2,
                get_gradient, mass)
            __, _, _, _, acceptance_rate = get_acceptance_rate(
                q, p, new_q, new_p,
                get_log_posterior, mass, self.data_axes)

            acceptance_rate = tf.reduce_mean(acceptance_rate)

            # Change step size and stopping criteria
            new_step_size = tf.cond(
                tf.less(acceptance_rate,
                        self.target_acceptance_rate),
                lambda: step_size * (1.0 / factor),
                lambda: step_size * factor)

            cond = tf.logical_not(tf.logical_xor(
                tf.less(last_acceptance_rate, self.target_acceptance_rate),
                tf.less(acceptance_rate, self.target_acceptance_rate)))
            return [new_step_size, acceptance_rate, cond]
Example #8
0
def bh_quadruplet_loss(dists, labels):
    # Defines the "batch hard" quadruplet loss function.

    same_identity_mask = tf.equal(tf.expand_dims(labels, axis=1),
                                  tf.expand_dims(labels, axis=0))

    negative_mask = tf.logical_not(same_identity_mask)

    positive_mask = tf.logical_xor(same_identity_mask,
                                   tf.eye(tf.shape(labels)[0], dtype=tf.bool))

    different_mask = tf.logical_and(
        negative_mask, positive_mask)  #create the different probe of data

    furthest_positive = tf.reduce_max(dists *
                                      tf.cast(positive_mask, tf.float32),
                                      axis=1)
    closest_negative = tf.map_fn(
        lambda x: tf.reduce_min(tf.boolean_mask(x[0], x[1])),
        (dists, negative_mask), tf.float32)

    different_negative = tf.map_fn(
        lambda x: tf.reduce_min(tf.boolean_mask(x[0], x[1])),
        (dists, different_mask), tf.float32)

    diff = 2 * furthest_positive - closest_negative - different_negative

    return tf.maximum(diff + TL_MARGIN, 0.0)
Example #9
0
def build_sigmoid_cross_entropy_loss(logits, gold, indices, probs):
  """Builds sigmoid cross entropy loss."""

  # Filter out entries where gold <= -1, which are batch padding entries.
  valid = tf.greater(gold, -1)
  valid_ix = tf.reshape(tf.where(valid), [-1])
  valid_gold = tf.gather(gold, valid_ix)
  valid_indices = tf.gather(indices, valid_ix)
  valid_probs = tf.gather(probs, valid_ix)

  # NB: tf.gather_nd() raises an error on CPU for out-of-bounds indices.  That's
  # why we need to filter out the gold=-1 batch padding above.
  valid_pairs = tf.stack([valid_indices, valid_gold], axis=1)
  valid_logits = tf.gather_nd(logits, valid_pairs)
  cost = tf.reduce_sum(
      tf.nn.sigmoid_cross_entropy_with_logits(
          labels=valid_probs,
          logits=valid_logits,
          name='sigmoid_cross_entropy_with_logits'))

  gold_bool = valid_probs > 0.5
  predicted_bool = valid_logits > 0.0
  total = tf.size(gold_bool)
  with tf.control_dependencies([
      tf.assert_equal(
          total, tf.size(predicted_bool), name='num_predicted_gold_mismatch')
  ]):
    agreement_bool = tf.logical_not(tf.logical_xor(gold_bool, predicted_bool))
  correct = tf.reduce_sum(tf.to_int32(agreement_bool))

  cost.set_shape([])
  correct.set_shape([])
  total.set_shape([])
  return cost, correct, total, gold
Example #10
0
def test_logical_xor():
    with tf.Graph().as_default():
        in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name='in1')
        in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name='in2')
        out = tf.logical_xor(in1, in2, name='out')
        in_data1 = np.random.choice(a=[False, True],size=(1, 4, 4, 3)).astype('bool')
        in_data2 = np.random.choice(a=[False, True],size=(1, 4, 4, 3)).astype('bool')
        compare_tf_with_tvm([in_data1, in_data2], ['in1:0', 'in2:0'], 'out:0')
Example #11
0
 def augment(self, input_ids, input_dicts):
     if self.dict_augment_rate > 0:
         flip_mask = tf.random.uniform(
             tf.shape(input_dicts)) < self.dict_augment_rate
         # flip if flip mask is true
         input_dicts = tf.cast(input_dicts, dtype=tf.bool)
         input_dicts = tf.logical_xor(input_dicts, flip_mask)
         input_dicts = tf.cast(input_dicts, dtype=tf.int64)
     return input_ids, input_dicts
Example #12
0
def selective_strategy(t1_prediction, t2_prediction, t1_entropy, t2_entropy):
    '''
    选择,返回mask tensor,针对每个像素的选择策略
    :param t1_prediction: [B, H, W, 2]
    :param t2_prediction: [B, H, W, 2]
    :param t1_entropy: [B, H, W]
    :param t2_entropy: [B, H, W]
    :return: [B, H, W]
    '''
    t1_prediction = tf.keras.backend.argmax(t1_prediction, axis=-1)
    t2_prediction = tf.keras.backend.argmax(t2_prediction, axis=-1)
    zeros_mask = tf.keras.backend.zeros_like(t1_prediction)
    ones_mask = tf.keras.backend.ones_like(t1_prediction)
    twos_mask = tf.keras.backend.ones_like(t1_prediction) * 2
    selective_mask = tf.keras.backend.zeros_like(t1_prediction)
    both_pos_mask = tf.where(
        tf.logical_and(tf.equal(t1_prediction, 1), tf.equal(t2_prediction, 1)),
        ones_mask, zeros_mask)
    both_neg_mask = tf.where(
        tf.logical_and(tf.equal(t1_prediction, 0), tf.equal(t2_prediction, 0)),
        ones_mask, zeros_mask)
    single_pos_mask = tf.where(
        tf.logical_xor(tf.equal(t1_prediction, 1), tf.equal(t2_prediction, 1)),
        ones_mask, zeros_mask)
    t1_less_t2_mask = tf.where(tf.less_equal(t1_entropy, t2_entropy),
                               ones_mask, twos_mask)

    # 将t1 预测为1,t2预测为0 的位置置为1
    selective_mask = tf.where(
        tf.logical_and(tf.equal(single_pos_mask, 1),
                       tf.equal(t1_prediction, 1)), ones_mask, selective_mask)
    # 将t1 预测为0,t2预测为1 的位置置为2
    selective_mask = tf.where(
        tf.logical_and(tf.equal(single_pos_mask, 1),
                       tf.equal(t2_prediction, 1)), twos_mask, selective_mask)
    # 将两个位置均为0的位置,t1的熵小于t2的位置置为1
    selective_mask = tf.where(
        tf.logical_and(tf.equal(both_neg_mask, 1),
                       tf.equal(t1_less_t2_mask, 1)), ones_mask,
        selective_mask)
    # 将两个位置均为0的位置,t1的熵大于t2的位置置为1
    selective_mask = tf.where(
        tf.logical_and(tf.equal(both_neg_mask, 1),
                       tf.equal(t1_less_t2_mask, 2)), twos_mask,
        selective_mask)
    # 将两个位置均为1的位置,t1的熵小于t2的位置置为1
    selective_mask = tf.where(
        tf.logical_and(tf.equal(both_pos_mask, 1),
                       tf.equal(t1_less_t2_mask, 1)), ones_mask,
        selective_mask)
    # 将两个位置均为1的位置,t1的熵大于t2的位置置为1
    selective_mask = tf.where(
        tf.logical_and(tf.equal(both_pos_mask, 1),
                       tf.equal(t1_less_t2_mask, 2)), twos_mask,
        selective_mask)
    return selective_mask
Example #13
0
def contrastive_loss(y, y_, y_p, y_p_):

    # same(1) or not same(0) label
    ybin_ = tf.cast(y_, tf.bool)
    ybin_p_ = tf.cast(y_p_, tf.bool)
    label = tf.cast(tf.logical_not(tf.logical_xor(ybin_, ybin_p_)), tf.float32)

    # distance
    d = tf.sqrt(tf.reduce_sum(tf.square(y - y_p),1))

    return tf.reduce_mean(label*tf.square(d) + (1-label)*tf.square(tf.maximum(0., 1-d)))
Example #14
0
 def compare_embeddings(nested):
     elem_embedding = nested[0]
     elem_class = nested[1]
     same_class = tf.equal(elem_class, labels)  #True if belongs to class
     difference = tf.norm(tf.subtract(elem_embedding, embeddings), axis=1)
     less_than_margin = tf.less(difference, loss_margin)
     different_classes = tf.logical_not(same_class)
     return tf.logical_and(
         less_than_margin, same_class), same_class, tf.logical_and(
             less_than_margin,
             different_classes), different_classes, tf.logical_not(
                 tf.logical_xor(less_than_margin, same_class))
Example #15
0
def batch_hard(embeddings, pids, margin,metric):
    """Computes the batch-hard loss from arxiv.org/abs/1703.07737.

    Args:
        dists (2D tensor): A square all-to-all distance matrix as given by cdist.
        pids (1D tensor): The identities of the entries in `batch`, shape (B,).
            This can be of any type that can be compared, thus also a string.
        margin: The value of the margin if a number, alternatively the string
            'soft' for using the soft-margin formulation, or `None` for not
            using a margin at all.

    Returns:
        A 1D tensor of shape (B,) containing the loss value for each sample.
    """
    with tf.name_scope("batch_hard"):
        dists = cdist(embeddings, embeddings, metric=metric)

        same_identity_mask = tf.equal(tf.expand_dims(pids, axis=1),
                                      tf.expand_dims(pids, axis=0))
        # print(pids)
        # dists = tf.Print(dists, [dists], "Pair Dist", summarize=1000000)
        # same_identity_mask = tf.Print(same_identity_mask,[same_identity_mask, pids],"Hello World" ,summarize=1000000)
        negative_mask = tf.logical_not(same_identity_mask)
        positive_mask = tf.logical_xor(same_identity_mask,
                                       tf.eye(tf.shape(pids)[0], dtype=tf.bool))

        furthest_dist = dists*tf.cast(positive_mask, tf.float32)
        furthest_positive = tf.reduce_max(furthest_dist, axis=1)
        closest_negative = tf.map_fn(lambda x: tf.reduce_min(tf.boolean_mask(x[0], x[1])),
                                     (dists, negative_mask), tf.float32)



        diff = (furthest_positive - closest_negative)
        diff = tf.squeeze(diff)
        #print(prefix,diff)
        # negative_idx = pids[negative_idx]
        if isinstance(margin, numbers.Real):
            diff_result = tf.maximum(diff + margin, 0.0)
            assert_op = tf.Assert(tf.equal(tf.rank(diff), 1), ['Rank of image must be equal to 1.'])
            with tf.control_dependencies([assert_op]):
                diff  = diff_result
        elif margin == 'soft':
            diff_result = tf.nn.softplus(diff)
            assert_op = tf.Assert(tf.equal(tf.rank(diff), 1), ['Rank of image must be equal to 1.'])
            with tf.control_dependencies([assert_op]):
                diff = diff_result
        elif margin.lower() == 'none':
            pass
        else:
            raise NotImplementedError(
                'The margin {} is not implemented in batch_hard'.format(margin))
        return diff
Example #16
0
def batch_hard_loss(features, pids, metric='euclidean', margin=0.1):
    """Computes the batch-hard loss from arxiv.org/abs/1703.07737.
    Args:
        dists (2D tensor): A square all-to-all distance matrix as given by _cdist.
        pids (1D tensor): The identities of the entries in `batch`, shape (B,).
            This can be of any type that can be compared, thus also a string.
        margin: The value of the margin if a number, alternatively the string
            'soft' for using the soft-margin formulation, or `None` for not
            using a margin at all.
    Returns:
        A 1D tensor of shape (B,) containing the loss value for each sample.
        :param margin:
        :param features:
        :param pids:
        :param metric:
    """
    with tf.name_scope("batch_hard_loss"):

        dists = _cdist(features, features, metric=metric)

        pids = tf.argmax(pids, axis=1)

        exp_dims0 = tf.expand_dims(pids, axis=0)
        exp_dims1 = tf.expand_dims(pids, axis=1)

        same_identity_mask = tf.equal(exp_dims1, exp_dims0)

        negative_mask = tf.logical_not(same_identity_mask)
        positive_mask = tf.logical_xor(
            same_identity_mask, tf.eye(tf.shape(pids)[0], dtype=tf.bool))

        furthest_positive = tf.reduce_max(dists *
                                          tf.cast(positive_mask, tf.float32),
                                          axis=1)
        # closest_negative = tf.map_fn(lambda x: tf.reduce_min(tf.boolean_mask(x[0], x[1])),
        #                              (dists, negative_mask), tf.float32)
        # Another way of achieving the same, though more hacky:
        closest_negative = tf.reduce_min(
            dists + 1e5 * tf.cast(same_identity_mask, tf.float32), axis=1)

        diff = furthest_positive - closest_negative
        if isinstance(margin, numbers.Real):
            diff = tf.maximum(diff + margin, 0.0)
        elif margin == 'soft':
            diff = tf.nn.softplus(diff)
        elif margin is None:
            pass
        else:
            raise NotImplementedError(
                'The margin {} is not implemented in batch_hard_loss'.format(
                    margin))

    return diff
Example #17
0
 def _hamming_loss(self):
     with tf.name_scope("train_hamming_loss"):
         predicted_labels = tf.cast(tf.round(self.prediction),
                                    dtype=tf.int32)
         self.hamming_loss = tf.reduce_mean(
             tf.cast(tf.logical_xor(
                 tf.cast(self.targets, dtype=tf.bool),
                 tf.cast(predicted_labels, dtype=tf.bool)),
                     dtype=tf.float32))
         self.hamming_summ = tf.summary.scalar("hamming_loss",
                                               self.hamming_loss)
         return self.hamming_loss
Example #18
0
    def log_pdf(self, t):
        """Log of PDF of the distribution.

    Args:
      t: time instance. Tensor of shape [batch_size]. TODO--> [batch_size, 1]

    Returns:
      Value of log of PDF. Tensor of shape [batch_size, 1].
    """
        t = tf.squeeze(t)
        t, last_slot_hr = self._bucketize_t(t)

        ones = tf.fill(tf.shape(t), 1)
        # shape [batch_size, TIME_LEN]
        seq_mask_t_1 = tf.sequence_mask(tf.cast(t - ones, tf.int32),
                                        maxlen=self._time_len)
        # shape [TIME_LEN, batch_size]
        lambda_tensor = self._hazard_tensor

        # shape [batch_size, TIME_LEN], multiply supports broadcast.
        lambda_tensor_t_1 = tf.multiply(tf.transpose(lambda_tensor),
                                        tf.cast(seq_mask_t_1, tf.float32))

        # shape [batch_size, TIME_LEN]
        seq_mask_t = tf.sequence_mask(tf.cast(t, tf.int32),
                                      maxlen=self._time_len)
        # shape [batch_size, TIME_LEN]
        mask_at_t = tf.logical_xor(seq_mask_t, seq_mask_t_1)

        # shape [batch_size]
        selected_lambda_tensor_at_t = tf.boolean_mask(
            tf.transpose(lambda_tensor), mask_at_t)
        # selected_lambda_tensor_at_t = tf.Print(
        #    selected_lambda_tensor_at_t, [selected_lambda_tensor_at_t],
        #    'selected_lambda_tensor_at_t',
        #    summarize=self._time_len)

        # shape [batch_size, 1]
        result = tf.reduce_sum(
            tf.log(1 - lambda_tensor_t_1), axis=-1, keepdims=True) + tf.log(
                tf.reshape(selected_lambda_tensor_at_t, [-1, 1]))

        if self._model_hparams.last_slot_loss:
            # last_slot_hr is the position of t in terms of #hrs in the last slot.
            # tf.multiply performs element-wise multiplication along batch dimension.
            result = result + tf.reduce_sum(tf.multiply(
                tf.log(1 - lambda_tensor[self._time_len - 1]),
                tf.cast(last_slot_hr, tf.float32)),
                                            axis=-1,
                                            keepdims=True)

        return result
Example #19
0
def _generic_batchloss(dists,
                       pids,
                       margin,
                       batch_precision_at_k=None,
                       variant='hard'):
    """Computes the batch-hard loss from arxiv.org/abs/1703.07737.
    Args:
        dists (2D tensor): A square all-to-all distance matrix as given by cdist.
        pids (1D tensor): The identities of the entries in `batch`, shape (B,).
            This can be of any type that can be compared, thus also a string.
        margin: The value of the margin if a number, alternatively the string
            'soft' for using the soft-margin formulation, or `None` for not
            using a margin at all.
    Returns:
        A 1D tensor of shape (B,) containing the loss value for each sample.
    """
    with tf.name_scope("batch_hard"):
        same_identity_mask = tf.equal(tf.expand_dims(pids, axis=1),
                                      tf.expand_dims(pids, axis=0))
        negative_mask = tf.logical_not(same_identity_mask)
        positive_mask = tf.logical_xor(
            same_identity_mask, tf.eye(tf.shape(pids)[0], dtype=tf.bool))

        if variant == 'sample':
            # -inf gives that index a probability of zero.
            neg_infs = -tf.constant(float('inf')) * tf.ones_like(dists)
            # higher logits are more likely to be sampled.
            pos_logits = tf.where(positive_mask, dists, neg_infs)
            pos_indices = tf.multinomial(pos_logits, num_samples=1)[:, 0]
            positive = get_at_indices(dists, pos_indices)

            # Same for the negatives, but we need to turn the logits around,
            # since we want to sample the smaller distances more likely.
            neg_logits = tf.where(negative_mask, -dists, neg_infs)
            neg_indices = tf.multinomial(neg_logits, num_samples=1)[:, 0]
            negative = get_at_indices(dists, neg_indices)
        elif variant == 'hard':
            # Furthest one is worst positive.
            positive = tf.reduce_max(dists *
                                     tf.cast(positive_mask, tf.float32),
                                     axis=1)
            # Closest one is worst negative.
            negative = tf.map_fn(
                lambda x: tf.reduce_min(tf.boolean_mask(x[0], x[1])),
                (dists, negative_mask), tf.float32)
            # negative = tf.reduce_min(dists + 1e5*tf.cast(same_identity_mask, tf.float32), axis=1)

        losses = apply_margin(positive - negative, margin)

    return return_with_extra_stats(losses, dists, batch_precision_at_k,
                                   same_identity_mask, positive_mask,
                                   negative_mask)
Example #20
0
 def do_process_boundary(start_points, end_points, input_length, t1_id,
                         t2_id, all_tokenized_diag):
     """function that contains the majority of the logic to proess boundary."""
     masks_start = tf.sequence_mask(start_points, input_length)
     masks_end = tf.sequence_mask(end_points, input_length)
     xor_masks = tf.logical_xor(masks_start, masks_end)
     mask1 = tf.reduce_any(xor_masks, axis=0)
     mask2 = tf.logical_not(mask1)
     all_turn1 = tf.equal(all_tokenized_diag, t1_id)
     all_turn2 = tf.equal(all_tokenized_diag, t2_id)
     turn_point = tf.logical_or(all_turn1, all_turn2)
     turn_point = tf.cast(turn_point, dtype=tf.float32)
     return mask1, mask2, turn_point
 def _true_fn():
     prev_max_step = tf.argmax(prev_alignments, axis=-1)
     max_steps = tf.shape(energies)[-1]
     win_mask = tf.logical_xor(
         tf.sequence_mask(
             prev_max_step -
             np.ceil(self.synthesis_win_size / 2).astype(np.int64),
             max_steps),
         tf.sequence_mask(
             prev_max_step +
             np.floor(self.synthesis_win_size / 2).astype(np.int64),
             max_steps))
     mask_paddings = -np.inf * tf.ones_like(energies)
     return tf.where(win_mask, energies, mask_paddings)
Example #22
0
def discount_trajectory_op(rewards, terms, traj_ends, gamma, ev):
    # Predict EV for bootstrap states
    # EV len should be equal to the trajectory len or bootstrap states len
    bootstrap_idx = tf.logical_xor(traj_ends, terms)
    num_bootstrap = tf.reduce_sum(tf.cast(bootstrap_idx, 'int32'))
    ev = tf.cond(tf.equal(num_bootstrap, 0),
                 lambda: tf.zeros_like(traj_ends, 'float32'), lambda: ev)
    with tf.device("/cpu:0"):
        discount = tf.py_func(utils.discount_trajectory,
                              [rewards, terms, traj_ends, gamma, ev],
                              tf.float32)
    # If batch consists from randomly shuffled samples (usually used in Replay trainers)
    # discount = rewards + gamma * ev
    return discount
    def __call__(self, query, state, z=None):
        """Score the query based on the keys and values.

        This replaces the superclass implementation in order to add in the location
        term.

        Args:
          query: Tensor of shape `[N, num_units]`.
          state: Tensor of shape `[N, T_in]`

        Returns:
          alignments: Tensor of shape `[N, T_in]`
        """
        with tf.variable_scope(None, 'location_sensitive_attention', [query]):
            expanded_alignments = tf.expand_dims(state, axis=2)  # [N, T_in, 1]
            f = self.location_conv(expanded_alignments)  # [N, T_in, 10]
            processed_location = self.location_layer(f)  # [N, T_in, num_units]

            processed_query = self.query_layer(
                query) if self.query_layer else query  # [N, num_units]
            processed_query = tf.expand_dims(processed_query,
                                             axis=1)  # [N, 1, num_units]
            score = _location_sensitive_score(processed_query,
                                              processed_location, self.keys,
                                              self._normalize)
            if self._sharpen:
                score *= self._sharpen_factor
            if self._windowing:
                cum_alignment = tf.cumsum(state, 1)
                half_step = cum_alignment > 0.5
                shifted_left = tf.pad(
                    half_step[:, self._left_window_width + 1:],
                    [[0, 0], [0, self._left_window_width + 1]],
                    constant_values=True)
                shifted_right = tf.pad(
                    half_step[:, :-self._right_window_width],
                    [[0, 0], [self._right_window_width, 0]],
                    constant_values=False)
                window = tf.logical_xor(shifted_left, shifted_right)
                # mask the score using the window
                score = tf.where(
                    window, score,
                    tf.ones_like(score) * tf.float32.as_numpy_dtype(-np.inf))

            alignments = self._probability_fn(score, state)
            if self._cumulate_weights:
                next_state = alignments + state
            else:
                next_state = alignments
            return alignments, next_state
Example #24
0
            def body(previous_finished, time_step, previous_state,
                     running_output, running_state, ponder_steps, remainders,
                     running_p_sum):

                current_inputs = tf.where(tf.equal(time_step, 1),
                                          inputs_and_one, inputs_and_zero)
                current_output, current_state = self._cell(
                    current_inputs, previous_state)

                if state_is_tuple:
                    joint_current_state = tf.concat(current_state, 1)
                else:
                    joint_current_state = current_state

                current_h = tf.nn.sigmoid(
                    tf.squeeze(
                        _linear([joint_current_state], 1, True,
                                self._init_halting_bias), 1))

                current_h_sum = running_p_sum + current_h

                limit_condition = time_step >= self._ponder_limit
                halting_condition = current_h_sum >= 1.0 - self._epsilon
                current_finished = tf.logical_or(halting_condition,
                                                 limit_condition)
                just_finished = tf.logical_xor(current_finished,
                                               previous_finished)

                current_p = tf.where(current_finished, 1.0 - running_p_sum,
                                     current_h)
                expanded_current_p = tf.expand_dims(current_p, 1)

                running_output += expanded_current_p * current_output

                if state_is_tuple:
                    running_state += tf.expand_dims(expanded_current_p,
                                                    0) * current_state
                else:
                    running_state += expanded_current_p * current_state

                ponder_steps = tf.where(just_finished,
                                        tf.fill([batch_size], time_step),
                                        ponder_steps)
                remainders = tf.where(just_finished, current_p, remainders)
                running_p_sum += current_p

                return (current_finished, time_step + 1, current_state,
                        running_output, running_state, ponder_steps,
                        remainders, running_p_sum)
Example #25
0
def test_error(YT, YP):
    #YP = tf.Print(YP, [YP.shape])
    #YT = tf.Print(YT, [YT.shape])

    YPB = tf.greater(YP, 0.0)
    #num = tf.reduce_sum(tf.cast( YPB, tf.float32))
    YTB = tf.greater(YT, 0.0)
    #num = tf.reduce_sum(tf.cast( YTB, tf.float32))

    YERR = tf.logical_xor(YTB, YPB)
    num = tf.reduce_sum(tf.cast(YERR, tf.float32))

    numSamples = tf.cast(tf.shape(YT)[0], tf.float32)

    return tf.divide(num, numSamples)
Example #26
0
def _make_target_attributes(attributes: tf.Tensor) -> Tuple[tf.Tensor, int]:
    n_samples = tf.shape(attributes)[0]
    n_classes = attributes.shape[1].value

    attributes_repeated = _repeat_elements(attributes, n_classes)
    active_attribute_repeated = tf.tile(tf.eye(n_classes), [n_samples, 1])

    target_attributes = tf.logical_xor(
        tf.cast(active_attribute_repeated, tf.bool),
        tf.cast(attributes_repeated, tf.bool))
    target_attributes = tf.cast(target_attributes, tf.float32)

    # TODO: Include option to get combinations of attributes.
    n_targets_per_sample = n_classes

    return target_attributes, n_targets_per_sample
Example #27
0
    def __compute_reward_on_prediction(self, this_comp, last_comp):
        changed = tf.logical_xor(this_comp, last_comp)
        unchanged = tf.logical_not(changed)

        changed_reward = tf.to_float(changed) * (
            tf.to_float(this_comp) * 1.0 -
            tf.to_float(tf.logical_not(this_comp)) * 1.0)
        unchanged_reward = tf.to_float(unchanged) * (tf.to_float(this_comp) *
                                                     0.5)

        reward = tf.layers.average_pooling2d(inputs=changed_reward +
                                             unchanged_reward,
                                             pool_size=5,
                                             strides=1,
                                             padding='same')
        return tf.stop_gradient(tf.squeeze(reward, axis=-1))
Example #28
0
def calculate_simple_loss(predict, label):
    """
    Calculate the simple loss which only according to the pixel-wise accuracy
    :param predict: the predicted likelihood produced by network
    :param label: the ground truth
    :return: a scalar loss
    """

    # predict is the probability of pixels, which should be cast to binary result(0 or 1)
    predict_bool = tf.greater_equal(predict, 0)
    label_bool = tf.greater_equal(label, 0.5)
    error_map = tf.logical_xor(label_bool, predict_bool)
    pixel_num = tf.reduce_sum(label + 1) - tf.reduce_sum(label)
    loss = tf.reduce_sum(tf.cast(error_map, tf.float32)) / pixel_num

    return loss
Example #29
0
    def call(self, x, mask=None):
        """Layer functionality."""
        # Skip integration of input spikes in membrane potential. Directly
        # transmit new spikes. The output psp is nonzero wherever there has
        # been an input spike at any time during simulation.

        input_psp = MaxPooling2D.call(self, x)

        if self.spiketrain is not None:
            new_spikes = tf.logical_xor(k.greater(input_psp, 0),
                                        k.greater(self.last_spiketimes, 0))
            self.add_update([(self.spiketrain,
                              self.time * k.cast(new_spikes, k.floatx()))])

        psp = self.get_psp(input_psp)

        return k.cast(psp, k.floatx())
Example #30
0
        def _one_frame_forward(i, lstm_state, Q_vals, chose_to_release):
            """Runs one time-step forward from the input frame to the output Qs
               including epsilon-greedy action choice."""
            with tf.variable_scope('one_frame_forward', reuse=tf.AUTO_REUSE):
                # Get current frame
                curr_input_frame = input_frames[
                    i:i + 1]  # The :+1 keeps the dimension

                # Pass through inception_v3
                # Note: use_fused_batchnorm = False is to work around a bug that breaks
                # backprop through fused_batchnorm.
                with arg_scope(
                        inception.inception_v3_arg_scope(
                            use_fused_batchnorm=False)):
                    inception_features, _ = inception.inception_v3_base(
                        curr_input_frame)

                # Flatten and fully-connect to lstm inputs
                inception_features = slim.flatten(inception_features,
                                                  scope='inception_flattened')
                if not model_config['tune_vision_model']:
                    inception_features = tf.stop_gradient(inception_features)
                lstm_inputs = slim.fully_connected(
                    inception_features,
                    model_config['LSTM_hidden_size'],
                    activation_fn=tf.nn.relu,
                    scope='lstm_inputs')

                # LSTM!
                (lstm_outputs, lstm_state) = lstm_cell(lstm_inputs, lstm_state)

                # Fully connect (linear) to Q estimates
                Q_vals = slim.fully_connected(lstm_outputs,
                                              num_actions,
                                              activation_fn=None,
                                              scope='Q_vals')

                # is the greedy action to release?
                greedy_release = tf.less(Q_vals[0, 0], Q_vals[0, 1])
                # is this an epsilon-exploring trial?
                explore = tf.less(explore_vals[i], epsilon_ph)

                # make the choice thereby
                chose_to_release = tf.logical_xor(greedy_release, explore)

            return (i, lstm_state, Q_vals, tf.squeeze(chose_to_release))
Example #31
0
 def __xor__(self, other):
   return tf.logical_xor(self, other)
Example #32
0
tf.less_equal(1, 1)
# True

tf.greater(1, 2)
# False

tf.greater_equal(1, 2)
# False

tf.logical_and(True, False)
# False

tf.logical_or(True, False)
# True

tf.logical_xor(True, False)
# True


# 1D array is called tensor
# 2D tensors = nxn matrix

tensor_1 = tf.constant([[1., 2.], [3.,4]])

tensor_2 = tf.constant([[5.5,6.],[7.,8.]])

# create a matrix multiplication operation
output_tensor = tf.matmul(tensor_1, tensor_2)

# have to run the graph using a session
sess = tf.Session()
Example #33
0
 
  
  with tf.Session() as sess:
    # create the computation graph
    model, saver, feature_ph, target_ph, keep_prob_ph = train.get_nn()

    # tf_train.py wrote this out; use it to get graph populated
    localfilename = '/tmp/trained_model.tf'
    subprocess.check_call(['gsutil', 'cp', train.GS_MODEL_OUTPUT, localfilename])
    saver.restore(sess, localfilename)

    # evaluation graph
    features, labels = train.get_training_data(prefix='testflights')
    pred_bool = tf.greater(model, PROB_THRESH*tf.identity(model)) # threshold output of model
    truth_bool = tf.greater(target_ph, 0.5*tf.identity(target_ph))
    cost = tf.reduce_sum(tf.to_int32(tf.logical_xor(pred_bool, truth_bool))) # number wrong
 
    tf.initialize_all_variables().run()
    tf.get_default_graph().finalize()  #prevent changing graph

    # start the evaluation
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    totalcost = 0.0
    try: 
      nbatch = 0
      while nbatch < numbatches:
        features_feed, labels_feed = sess.run([features, labels])
        batchcost = cost.eval(feed_dict = {feature_ph: features_feed, target_ph: labels_feed, keep_prob_ph: 1.0})
        totalcost = totalcost + batchcost
def calculate_metrics(predicted_batch, real_batch, threshold=0.5, is_training=False, ema_decay=0.9):
    with tf.variable_scope('metric'):
        threshold_graph = tf.constant(threshold, name='threshold')
        zero_point_five = tf.constant(0.5)
        predicted_bool = tf.greater_equal(predicted_batch, threshold_graph)
        real_bool = tf.greater_equal(real_batch, zero_point_five)
        predicted_bool_neg = tf.logical_not(predicted_bool)
        real_bool_neg = tf.logical_not(real_bool)
        differences_bool = tf.logical_xor(predicted_bool, real_bool)
        tp = tf.logical_and(predicted_bool, real_bool)
        tn = tf.logical_and(predicted_bool_neg, real_bool_neg)
        fn = tf.logical_and(differences_bool, real_bool)
        fp = tf.logical_and(differences_bool, predicted_bool)
        tp = tf.reduce_sum(tf.cast(tp, tf.float32))
        tn = tf.reduce_sum(tf.cast(tn, tf.float32))
        fn = tf.reduce_sum(tf.cast(fn, tf.float32))
        fp = tf.reduce_sum(tf.cast(fp, tf.float32))

        average_ops = None
        init_op = None
        if is_training:
            ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
            average_ops = ema.apply([tp, tn, fp, fn])
            tp = ema.average(tp)
            tn = ema.average(tn)
            fp = ema.average(fp)
            fn = ema.average(fn)
        else:
            tp_v = tf.Variable(0, dtype=tf.float32, name='true_positive', trainable=False)
            tn_v = tf.Variable(0, dtype=tf.float32, name='true_negative', trainable=False)
            fp_v = tf.Variable(0, dtype=tf.float32, name='false_positive', trainable=False)
            fn_v = tf.Variable(0, dtype=tf.float32, name='false_negative', trainable=False)
            init_op = [tf.assign(tp_v, 0), tf.assign(tn_v, 0), tf.assign(fp_v, 0), tf.assign(fn_v, 0)]
            tp = tf.assign_add(tp_v, tp)
            tn = tf.assign_add(tn_v, tn)
            fp = tf.assign_add(fp_v, fp)
            fn = tf.assign_add(fn_v, fn)

        # calculate metrics
        precision = tp / (tp + fp)
        recall = tp / (tp + fn)
        accuracy = (tp + tn) / (tp + tn + fp + fn)
        fall_out = fp / (tn + fp)
        f1_score = tp * 2 / (tp * 2 + fp + fn)

        # remove NaNs and set them to 0
        zero = tf.constant(0, dtype=tf.float32)
        precision = tf.cond(tf.equal(tp, 0.0), lambda: zero, lambda: precision)
        recall = tf.cond(tf.equal(tp, 0.0), lambda: zero, lambda: recall)
        accuracy = tf.cond(tf.equal(tp + tn, 0.0), lambda: zero, lambda: accuracy)
        fall_out = tf.cond(tf.equal(fp, 0.0), lambda: zero, lambda: fall_out)
        f1_score = tf.cond(tf.equal(tp, 0.0), lambda: zero, lambda: f1_score)

        # add to tensorboard
        # tf.summary.scalar('accuracy', accuracy)
        tf.summary.scalar('precision', precision)
        tf.summary.scalar('recall', recall)
        tf.summary.scalar('fall-out', fall_out)
        tf.summary.scalar('f1-score', f1_score)
        tf.summary.scalar('true_positive', tp)
        tf.summary.scalar('true_negative', tn)
        tf.summary.scalar('false_positive', fp)
        tf.summary.scalar('false_negative', fn)

    metrics_ops = {
        # 'accuracy' : accuracy,
        'precision': precision,
        'recall': recall,
        'fall-out': fall_out,
        'f1-score': f1_score,
        'true positive': tp,
        'true negative': tn,
        'false positive': fp,
        'false negative': fn,
    }
    return init_op, average_ops, metrics_ops
Example #35
0
 def __rxor__(self, other):
   return tf.logical_xor(other, self)