Example #1
0
def transform_targets_for_output(y_true, grid_size, anchor_idxs, classes):
    """
    y_true here is y_train append a column with anchor_idx
    which indicates, every object(box) anchored to which anchor
    then you can using anchor to trace location and regression the precise offset to
    the anchor
    """
    # y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor))
    N = tf.shape(y_true)[0]

    # y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])
    y_true_out = tf.zeros(
        (N, grid_size, grid_size, tf.shape(anchor_idxs)[0], 6))

    anchor_idxs = tf.cast(anchor_idxs, tf.int32)

    indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
    updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
    idx = 0
    for i in tf.range(N):
        for j in tf.range(tf.shape(y_true)[1]):
            if tf.equal(y_true[i][j][2], 0):
                # 0 is backgrond or width is 0?
                continue
            anchor_eq = tf.equal(anchor_idxs, tf.cast(y_true[i][j][5],
                                                      tf.int32))

            if tf.reduce_any(anchor_eq):
                box = y_true[i][j][0:4]
                box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2

                anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
                grid_xy = tf.cast(box_xy // (1 / grid_size), tf.int32)

                # grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)
                indexes = indexes.write(
                    idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])
                updates = updates.write(
                    idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]])
                idx += 1

    # tf.print(indexes.stack())
    # tf.print(updates.stack())
    return tf.tensor_scatter_nd_update(y_true_out, indexes.stack(),
                                       updates.stack())
Example #2
0
def segment_top_k(x, i, ratio):
    """
    Returns indices to get the top K values in x segment-wise, according to
    the segments defined in I. K is not fixed, but it is defined as a ratio of
    the number of elements in each segment.
    :param x: a rank 1 Tensor;
    :param i: a rank 1 Tensor with segment IDs for x;
    :param ratio: float, ratio of elements to keep for each segment;
    :return: a rank 1 Tensor containing the indices to get the top K values of
    each segment in x.
    """
    i = tf.cast(i, tf.int32)
    n = tf.shape(i)[0]
    n_nodes = tf.math.segment_sum(tf.ones_like(i), i)
    batch_size = tf.shape(n_nodes)[0]
    n_nodes_max = tf.reduce_max(n_nodes)
    cumulative_n_nodes = tf.concat(
        (tf.zeros(1, dtype=n_nodes.dtype), tf.cumsum(n_nodes)[:-1]), 0
    )
    index = tf.range(n)
    index = (index - tf.gather(cumulative_n_nodes, i)) + (i * n_nodes_max)

    dense_x = tf.zeros(batch_size * n_nodes_max, dtype=x.dtype) - 1e20
    dense_x = tf.tensor_scatter_nd_update(dense_x, index[:, None], x)
    dense_x = tf.reshape(dense_x, (batch_size, n_nodes_max))

    perm = tf.argsort(dense_x, direction="DESCENDING")
    perm = perm + cumulative_n_nodes[:, None]
    perm = tf.reshape(perm, (-1,))

    k = tf.cast(tf.math.ceil(ratio * tf.cast(n_nodes, tf.float32)), i.dtype)

    # This costs more memory
    # to_rep = tf.tile(tf.constant([1., 0.]), (batch_size,))
    # rep_times = tf.reshape(tf.concat((k[:, None], (n_nodes_max - k)[:, None]), -1), (-1,))
    # mask = ops.repeat(to_rep, rep_times)
    # perm = tf.boolean_mask(perm, mask)

    # This is slower
    r_range = tf.ragged.range(k).flat_values
    r_delta = ops.repeat(tf.range(batch_size) * n_nodes_max, k)
    mask = r_range + r_delta
    perm = tf.gather(perm, mask)

    return perm
Example #3
0
def match_boxes(gt_labels, def_boxes, threshold=0.5):
    """
    Return the new ground truth, corresponding to labeled default boxes

    Parameters
    ----------
    gt_labels: Tensor of real bounding boxes of the image(s), with labels, in the format:
               [NUM_GT_BOXES * [x_min, y_min, x_max, y_max | label]]
               
    def_boxes: Tensor of default boxes of an image, in the format:
               [NUM_DEF_BOXES * [x_min, y_min, x_max, y_max]]
    threshold: positive threshold

    Return
    ------
    ground_truth: labeled default boxes
    """
    # Handle missing labels
    if len(gt_labels) == 0:
        def_labels = tf.zeros(shape=(len(def_boxes), ), dtype=tf.float32)
        return def_boxes, def_labels

    # Process ground truth
    gt_boxes = gt_labels[..., :-1]
    labels = gt_labels[..., -1]

    # Compute IoU values
    jaccard = jaccard_overlap(def_boxes, gt_boxes)
    max_gt_jaccard = tf.reduce_max(jaccard, axis=1)
    max_def_jaccard = tf.reduce_max(jaccard, axis=0)
    max_gt_indices = tf.argmax(jaccard, axis=1)
    max_def_indices = tf.argmax(jaccard, axis=0)

    # Ensure best IoU
    max_gt_jaccard = tf.tensor_scatter_nd_update(
        max_gt_jaccard, tf.expand_dims(max_def_indices, 1),
        tf.ones_like(max_def_indices, dtype=tf.float32))

    # Match boxes with IoU > threshold
    positive_mask = tf.where(max_gt_jaccard >= threshold, 1., 0.)
    matched_labels = tf.gather(labels, max_gt_indices) * positive_mask
    matched_boxes = tf.gather(gt_boxes, max_gt_indices)
    encoded_boxes = encode_boxes(def_boxes, matched_boxes)

    return encoded_boxes, matched_labels
Example #4
0
    def mask(self):
        """mask helper function for initializing grid mask of required size."""
        mask_w = mask_h = int(
            (self.gridmask_size_ratio + 1) * max(self.h, self.w))
        mask = tf.zeros(shape=[mask_h, mask_w], dtype=tf.int32)
        gridblock = tf.random.uniform(
            shape=[],
            minval=int(min(self.h * 0.5, self.w * 0.3)),
            maxval=int(max(self.h * 0.5, self.w * 0.3)),
            dtype=tf.int32,
        )

        if self.ratio == 1:
            length = tf.random.uniform(shape=[],
                                       minval=1,
                                       maxval=gridblock,
                                       dtype=tf.int32)
        else:
            length = tf.cast(
                tf.math.minimum(
                    tf.math.maximum(
                        int(tf.cast(gridblock, tf.float32) * self.ratio + 0.5),
                        1,
                    ),
                    gridblock - 1,
                ),
                tf.int32,
            )

        for _ in range(2):
            start_w = tf.random.uniform(shape=[],
                                        minval=0,
                                        maxval=gridblock,
                                        dtype=tf.int32)
            for i in range(mask_w // gridblock):
                start = gridblock * i + start_w
                end = tf.math.minimum(start + length, mask_w)
                indices = tf.reshape(tf.range(start, end), [end - start, 1])
                updates = (
                    tf.ones(shape=[end - start, mask_w], dtype=tf.int32) *
                    self.fill)
                mask = tf.tensor_scatter_nd_update(mask, indices, updates)
            mask = tf.transpose(mask)

        return mask
Example #5
0
  def _sparse_scatter(
    operand,
    scatter_indices,
    updates,
    update_jaxpr,
    update_consts,
    dimension_numbers,
    indices_are_sorted: bool,
    unique_indices: bool,
    mode,
    _in_avals: Sequence[core.ShapedArray],
    _out_aval: core.ShapedArray):
    """
    Implementation of scatter specialised to indexing from the
    front axes. This covers unique indices and non-unique indices
    of single depth.

    Note on unique indices: `tf.tensor_scatter_nd_update` interprets
    indices thusly: every axis except the final one encodes a batch
    dimension, the final axis encoding the actual indices to scatter in to.
    It enforces, at least one, batch dimension so we add an empty
    dimension to indices and updates if lacking.

    Note on non-unique indices: There is no tf op for non single depth
    indexing. But if indexing is single depth, this can be viewed as a
    segment op.
    """
    # Infer unique indices from lack of batch dimension
    unique_indices = unique_indices or (len(scatter_indices.shape) == 1)
    if unique_indices:
      suboperand = tf.gather_nd(operand, scatter_indices)
      updated_suboperand = update_op(suboperand, updates)
      # add a batch dim if none exist
      if len(scatter_indices.shape) == 1:
        scatter_indices = scatter_indices[None]
        updated_suboperand = updated_suboperand[None]
      y = tf.tensor_scatter_nd_update(operand, scatter_indices, updated_suboperand)
    else:
      if (scatter_indices.shape[-1] == 1) and (unsorted_segment_op != None):
        # If only indexing into the first dimension, it's a segment op
        operand_update = unsorted_segment_op(updates, tf.squeeze(scatter_indices, -1), operand.shape[0])
        y = update_op(operand, operand_update)
      else:
        raise error("Scatter supports unique indices. Scatter also supports non-unique indices with indexing into only one dimension for (add, mul, min, max)")
    return y
    def shiftGaugeField(self, gaugeField, cpt, sign):
        gaugeFieldShifted = tf.roll(gaugeField, -sign, cpt)

        pauliMatNum = self.boundaryConditions[cpt]

        if pauliMatNum == 0:
            return gaugeFieldShifted

        latShape = tf.shape(gaugeField)[0:3]
        indices = FieldTools.boundaryIndices(latShape, cpt, sign)

        updates = tf.gather_nd(gaugeFieldShifted, indices)
        updates = FieldTools.pauliMatrix(pauliMatNum) @\
            updates @ FieldTools.pauliMatrix(pauliMatNum)

        gaugeFieldShifted = tf.tensor_scatter_nd_update(
            gaugeFieldShifted, indices, updates)
        return gaugeFieldShifted
def sparse_fill_empty_rows(sp_input, default_value, name=None):
    # Op implemented in tflite mode, so don't go through complicated logic below.
    # if not cuda_ops_only():
    #     return tf.sparse_fill_empty_rows(sp_input, default_value, name=name)
    d = sparse_tensor_to_dense(sp_input, default_value=default_value)
    ds = sp_input.dense_shape
    seq_len = sequence_length_from_sparse_tensor(sp_input)
    empty_row_indicator = tf.equal(seq_len, 0)
    mask = sparse_tensor_to_dense(tf.SparseTensor(sp_input.indices,
        tf.fill(tf.shape(sp_input.values), True), ds))
    row_idx = tf.where(tf.logical_and(empty_row_indicator,
        tf.greater(tf.size(d), 0)))   # empty input edge case.
    fill_indices = tf.concat([row_idx, tf.zeros([tf.size(row_idx),
        tf.size(ds) - 1], dtype=tf.int64)], axis=1)
    mask = tf.tensor_scatter_nd_update(
        mask, fill_indices, tf.fill(tf.shape(row_idx)[:1], True))
    values, indices = tf.boolean_mask(d, mask), tf.where(mask)
    return tf.SparseTensor(indices, values, ds), empty_row_indicator
Example #8
0
 def call(self, inputs):
     Q_t, ard, Q_tar = inputs
     Q_t1_max = tf.math.reduce_max(Q_tar, axis=1)
     reward = tf.cast(ard[:, 1], tf.float32)
     action = tf.cast(ard[:, 0], tf.int32)
     done = tf.cast(ard[:, 2], tf.int32)
     continue_candidates = tf.math.add(
         tf.math.multiply(self.gamma, Q_t1_max), reward)
     both_candidates = tf.stack([continue_candidates, reward],
                                axis=1)
     new_Qta = tf.gather_nd(both_candidates,
                            tf.expand_dims(done, axis=1),
                            batch_dims=1)
     size = tf.shape(ard)[0]
     index = tf.range(0, size)
     indices = tf.stack([index, action], axis=1)
     new_Q = tf.tensor_scatter_nd_update(Q_t, indices, new_Qta)
     return tf.stop_gradient(new_Q)
    def shiftCovDeriv(self, covDeriv, cpt, sign):
        covDerivShifted = tf.roll(covDeriv, -sign, cpt)

        if cpt != 0:
            return covDerivShifted

        indices = FieldTools.boundaryIndices(self.latShape, cpt, sign)

        if sign == -1:
            updates = tf.zeros(tf.concat([tf.shape(indices)[0:-1], [2, 2]], 0),
                               dtype=tf.complex128)
        else:
            updates = tf.gather_nd(covDeriv, indices)

        covDerivShifted = tf.tensor_scatter_nd_update(covDerivShifted, indices,
                                                      updates)

        return covDerivShifted
def find_neighbouring_indices_along_axis(grid_sizes,
                                         quadrature_coords_along_axis, dim):
    dim_size = grid_sizes[dim]
    dx = tf.cast(2 / (dim_size - 1), tf.keras.backend.floatx())
    lower_idx = tf.cast(tf.math.floor((quadrature_coords_along_axis + 1) / dx),
                        tf.int32)
    upper_idx = tf.cast(tf.math.ceil((quadrature_coords_along_axis + 1) / dx),
                        tf.int32)
    equal_indices = (upper_idx == lower_idx)
    if tf.reduce_any(
            equal_indices
    ):  #sometimes a quad pt may coincide with the position of a gridpoint. adjust upper idx for that possibility
        new_upper_indices = tf.gather_nd(upper_idx,
                                         tf.where(equal_indices)) + 1
        upper_idx = tf.tensor_scatter_nd_update(upper_idx,
                                                tf.where(equal_indices),
                                                new_upper_indices)
    return tf.stack([lower_idx, upper_idx], 1)
Example #11
0
        def pairwise_update_in_mol(h_v, mol_idx):
            # (n_atoms, )
            this_mol_has_atom = atom_in_mol[:, mol_idx]

            # (n_atoms_in_this_mol, )
            this_mol_atom_idxs = tf.where(this_mol_has_atom)

            # (n_atoms_in_this_mol, d_v)
            h_v_this_mol = tf.boolean_mask(h_v, this_mol_has_atom)

            # (n_atoms_in_this_mol, d_v)
            h_v_this_mol = self.pairwise_update(h_v_this_mol)

            # (n_atoms, )
            h_v = tf.tensor_scatter_nd_update(h_v, this_mol_atom_idxs,
                                              h_v_this_mol)

            return h_v, mol_idx + 1
Example #12
0
 def false_fn():
     scatter_index = count_non_blank(y_hat_prediction, blank=self.text_featurizer.blank)
     updated_prediction = tf.tensor_scatter_nd_update(
         y_hat_prediction,
         indices=tf.reshape(scatter_index, [1, 1]),
         updates=tf.expand_dims(pred, axis=-1)
     )
     return (
         B.score,
         B.indices,
         B.prediction,
         B.states,
         A.score.write(A_i, new_score),
         A.indices.write(A_i, pred),
         A.prediction.write(A_i, updated_prediction),
         A.states.write(A_i, new_states),
         A_i + 1
     )
Example #13
0
    def kdtree_triplet_loss(self, y_true, y_pred, margin=1.0):
        """Computes the triplet loss with semi-hard negative mining.
        MODIFIED FROM tensorflow_addons.losses.triplet_semihard_loss

        Args:
        y_true: 1-D integer `Tensor` with shape [batch_size] of
            multiclass integer labels.
        y_pred: 2-D float `Tensor` of embedding vectors. Embeddings should
            be l2 normalized.
        margin: Float, margin term in the loss definition.
        """
        labels, embeddings = y_true, y_pred
        # Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
        lshape = tf.shape(labels)
        print('step1')
        assert lshape.shape == 1 or lshape.shape == (2, )
        print('step2')
        labels = tf.reshape(labels, [-1])
        print('step3')

        pos_dists, neg_dists = tf.py_function(self.get_dists,
                                              inp=[labels, embeddings],
                                              Tout=[tf.float64, tf.float64])
        print('step4')
        print(pos_dists, neg_dists)
        pos_diffs = tf.minimum(pos_dists - neg_dists, [0])
        print('step5')
        print(pos_diffs)
        zero_idx = tf.cast(
            tf.where(pos_diffs == 0, -tf.ones_like(pos_diffs), pos_diffs),
            tf.int64)
        print(zero_idx)
        print(pos_dists[zero_idx])
        norm_diffs = tf.tensor_scatter_nd_update(pos_diffs, zero_idx,
                                                 pos_dists[zero_idx])
        print(norm_diffs)
        # pos_diffs[[1] == 0] = pos_dists[[1] == 0] / 2
        print('step6')

        triplet_loss = tf.cast(tf.reduce_mean(norm_diffs), tf.float32)
        print('step7')
        print(triplet_loss)

        return triplet_loss
Example #14
0
def patch_application(images: tf.Tensor,
                      patches: tf.Tensor,
                      randomness: Union[np.array, tf.Tensor] = None):
    """Applies patches to images at random locations.

    Args:
        images: A batch of images of shape [B, H, W, 3].
        patches: A batch of patches of shape [B, H_patch, W_patch, 3].
        randomness: Optional seeds for randomness. Shape is [B, ]

    Returns:
        Images with patches applied at random locations. Shape is [B, H, W, 3].
    """
    image_height, image_width = images.shape[1:3]
    patch_height, patch_width = patches.shape[1:3]
    height_to_pad = int(image_height - patch_height)
    width_to_pad = int(image_width - patch_width)

    # Generate offsets for random translation
    if randomness is None:
        height_offsets = tf.random.uniform([images.shape[0]],
                                           maxval=height_to_pad,
                                           dtype=tf.int32)
        width_offsets = tf.random.uniform([images.shape[0]],
                                          maxval=width_to_pad,
                                          dtype=tf.int32)
    else:
        assert randomness.shape[0] == images.shape[0]
        randomness = randomness.numpy() if tf.is_tensor(
            randomness) else randomness
        height_offsets = randomness % height_to_pad
        width_offsets = (randomness // height_to_pad) % width_to_pad

    mesh = tf.meshgrid(
        tf.range(images.shape[0]),
        tf.range(patch_height),
        tf.range(patch_width),
        indexing="ij",
    )
    mesh[1] += height_offsets[:, None, None]
    mesh[2] += width_offsets[:, None, None]
    indices = tf.stack(mesh, axis=-1)
    # Paste patches into the images at random locations
    return tf.tensor_scatter_nd_update(images, indices, patches)
Example #15
0
    def _remove_input_items_from_prediction(batch_item_indices, result):
        """ Remove input items from predictions, as we don't want to predict them. It is done setting their
            probabilities to -1

            Args:
                batch_item_indices: Batch input item indices 
                result: Batch predicted probabilities
            Returns: 
                Batch predicted probabilities with input items probs. set to -1
        """
        # batch_item_indices is a ragged with input indices for each batch row. Ex [ [0] , [1, 2] ]
        batch_item_indices = batch_item_indices.to_tensor(
            -1)  # -> [ [0,-1] , [1, 2] ]
        #print(batch_item_indices, batch_item_indices.shape[0])

        # Convert batch_item_indices row indices to (row,column) indices
        row_indices = tf.range(0,
                               tf.shape(batch_item_indices)[0],
                               dtype=tf.int64)  # -> [ 0, 1 ]
        row_indices = tf.repeat(
            row_indices,
            [tf.shape(batch_item_indices)[1]])  # -> [ 0, 0, 1, 1 ]
        #print(">>>", batch_item_indices)
        batch_item_indices = tf.reshape(batch_item_indices,
                                        shape=[-1])  # -> [ 0, -1, 1, 2 ]
        batch_item_indices = tf.stack(
            [row_indices, batch_item_indices],
            axis=1)  # -> [ [0,0] , [0,-1], [1,1], [1,2] ]

        # batch_item_indices.to_tensor(-1) added -1's to pad the matrix. Remove these indices
        # Needed according to tf.tensor_scatter_nd_update doc. (it will fail in CPU execution, if there are out of bound indices)
        # Get indices without -1's:
        gather_idxs = tf.where(
            batch_item_indices[:, 1] != -1)  # -> [[0], [2], [3]]
        batch_item_indices = tf.gather_nd(
            batch_item_indices, gather_idxs)  # -> [ [0,0] , [1,1], [1,2] ]

        # To remove input indices, we will set a probability -1 in their indices
        updates = tf.repeat(
            -1.0,
            tf.shape(batch_item_indices)[0])  # -> [ -1, -1, -1 ]

        # Assign -1's to the input indices:
        return tf.tensor_scatter_nd_update(result, batch_item_indices, updates)
    def _update_tp_fp_per_image(self, ground_truth_boxes, predicted_boxes):
        with tf.name_scope("update_tp_fp_per_image"):
            ground_truth_boxes = tf.boolean_mask(ground_truth_boxes, 
                                                 tf.logical_not(tf.reduce_all(ground_truth_boxes == 0, 1)))
            predicted_boxes = tf.boolean_mask(predicted_boxes,
                                              tf.logical_not(tf.reduce_all(predicted_boxes == 0, 1)))

            num_gt_boxes = tf.shape(ground_truth_boxes)[0]
            num_pred_boxes = tf.shape(predicted_boxes)[[0]]
            matched_gt_boxes = tf.zeros([tf.shape(ground_truth_boxes)[0]], dtype=tf.int32)
            self.num_gt_boxes.assign_add(tf.cast(num_gt_boxes, self.num_gt_boxes.dtype))
            
            gt_areas = ((ground_truth_boxes[:, 2] - ground_truth_boxes[:, 0]) *
                        (ground_truth_boxes[:, 3] - ground_truth_boxes[:, 1]))
            pred_areas = ((predicted_boxes[:, 2] - predicted_boxes[:, 0]) * 
                          (predicted_boxes[:, 3] - predicted_boxes[:, 1]))

            if tf.greater(tf.shape(predicted_boxes)[0], 0):
                for i in tf.range(num_pred_boxes):
                    box = predicted_boxes[i]
                    inter_y1 = tf.math.maximum(box[0], ground_truth_boxes[:, 0])
                    inter_x1 = tf.math.maximum(box[1], ground_truth_boxes[:, 1])
                    inter_y2 = tf.math.minimum(box[2], ground_truth_boxes[:, 2])
                    inter_x2 = tf.math.minimum(box[3], ground_truth_boxes[:, 3])

                    inter_h = tf.math.maximum(inter_y2 - inter_y1, 0.0)
                    inter_w = tf.math.maximum(inter_x2 - inter_x1, 0.0)
                    inter_areas = inter_h * inter_w
                    
                    iou = inter_areas / (gt_areas + pred_areas[i] - inter_areas)
                
                    max_iou = tf.reduce_max(iou)
                    
                    if tf.greater_equal(max_iou, self.iou_threshold):
                        arg_max_iou = tf.argmax(iou)
                        if tf.not_equal(matched_gt_boxes[arg_max_iou], 1):
                            self.true_positives.assign_add(1)
                            matched_gt_boxes = tf.tensor_scatter_nd_update(matched_gt_boxes, 
                                                                           tf.reshape(arg_max_iou, [1, 1]), 
                                                                           tf.constant([1], dtype=matched_gt_boxes.dtype))
                        else:
                            self.false_negatives.assign_add(1)
                    else:
                        self.false_negatives.assign_add(1)
Example #17
0
    def __call__(self, confs, locs, gt_confs, gt_locs):
        """ Compute losses for SSD
            regression loss: smooth L1
            classification loss: cross entropy
        Args:
            confs: outputs of classification heads (B, num_default, num_classes)
            locs: outputs of regression heads (B, num_default, 4)
            gt_confs: classification targets (B, num_default)
            gt_locs: regression targets (B, num_default, 4)
        Returns:
            conf_loss: classification loss
            loc_loss: regression loss
        """
        cross_entropy = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction='none')

        # compute classification losses
        # without reduction
        temp_loss = cross_entropy(gt_confs, confs)
        pos = gt_confs > 0
        temp_loss = tf.tensor_scatter_nd_update(
            temp_loss, tf.where(pos), tf.zeros(tf.shape(tf.where(pos))[0]))
        pos_idx, neg_idx = hard_negative_mining(temp_loss, gt_confs,
                                                self.neg_ratio)

        # classification loss will consist of positive and negative examples

        cross_entropy = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction='sum')
        smooth_l1_loss = tf.keras.losses.Huber(reduction='sum')

        conf_loss = cross_entropy(
            gt_confs[tf.math.logical_or(pos_idx, neg_idx)],
            confs[tf.math.logical_or(pos_idx, neg_idx)])

        # regression loss only consist of positive examples
        loc_loss = smooth_l1_loss(gt_locs[pos_idx], locs[pos_idx])

        num_pos = tf.reduce_sum(tf.dtypes.cast(pos_idx, tf.float32))

        conf_loss = conf_loss / num_pos
        loc_loss = loc_loss / num_pos

        return conf_loss, loc_loss
Example #18
0
    def _drop_nodes_helper(
        features: Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]
    ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
      """Performs batch-wise operation to drop nodes and corresponding edges."""
      atoms, atom_mask, pairs, pair_mask = features

      # Select nodes to drop.
      idx_drop = self._sample_nodes(atom_mask)
      if tf.math.equal(tf.shape(idx_drop)[0], 0):
        return features
      idx_dropped_nodes.append(tf.squeeze(idx_drop))

      # Drop selected nodes.
      aug_atom_mask = tf.tensor_scatter_nd_update(
          atom_mask, idx_drop, tf.zeros(tf.shape(idx_drop)[0]))
      if self.perturb_node_features:
        aug_atoms = tf.tensor_scatter_nd_update(
            atoms, idx_drop, tf.zeros(
                (tf.shape(idx_drop)[0], tf.shape(atoms)[1])))
      else:
        aug_atoms = atoms

      # Remove edges attached to dropped nodes.
      # First, remove edges where source node has been dropped (i.e. set rows
      # of dropped node indices to 0).
      if self.perturb_edge_features:
        aug_pairs = tf.tensor_scatter_nd_update(
            pairs, idx_drop,
            tf.zeros((tf.shape(idx_drop)[0], tf.shape(pairs)[1],
                      tf.shape(pairs)[-1])))
      else:
        aug_pairs = pairs
      aug_pair_mask = tf.tensor_scatter_nd_update(
          pair_mask, idx_drop,
          tf.zeros((tf.shape(idx_drop)[0], tf.shape(pair_mask)[-1])))
      # Second, remove edges where target node has been dropped (i.e. set
      # columns of dropped node indices to 0).
      columns = idx_drop
      rows = tf.range(tf.shape(pair_mask)[0], dtype=tf.int64)
      ii, jj = tf.meshgrid(rows, columns, indexing='ij')
      idx_to_update = tf.stack([ii, jj], axis=-1)
      updated_values_pair_mask = tf.broadcast_to(0., tf.shape(ii))
      aug_pair_mask = tf.tensor_scatter_nd_update(aug_pair_mask, idx_to_update,
                                                  updated_values_pair_mask)
      if self.perturb_edge_features:
        updated_values_pairs = tf.zeros(
            (tf.shape(ii)[0], tf.shape(ii)[1], pairs.shape[-1]))
        aug_pairs = tf.tensor_scatter_nd_update(aug_pairs, idx_to_update,
                                                updated_values_pairs)

      return aug_atoms, aug_atom_mask, aug_pairs, aug_pair_mask
def transform_targets_for_output(y_true, grid_size, anchor_idxs):
    # y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor))

    N = tf.shape(y_true)[0]

    y_true_out = tf.zeros(
        (N, grid_size, grid_size, tf.shape(anchord_idxs)[0], 6)
    )

    anchor_idxs = tf.cast(anchor_idxs, tf.int32)

    indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
    updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
    idx = 0

    for i in tf.range(N):
        for j in tf.range(tf.shape(y_true)[1]):
            if tf.equal(y_true[i][j][2], 0):
                continue
            anchor_eq = tf.equal(
                anchor_idxs, tf.cast(y_true[i][j][5], tf.int32)
            )

            if tf.reduce_any(anchor_eq):
                box = y_true[i][j][0:4]
                box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4])

                anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
                grid_xy = tf.cast(box_xy // (1 / grid_size), tf.int32)

                indexes = indexes.write(
                    idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]]
                )

                updates = updates.write(
                    idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]]
                )

                idx += 1

    return tf.tensor_scatter_nd_update(
        y_true_out, indexes.stack(), updates.stack()
    )
Example #20
0
def _compare_tensor_scatter_nd_update_dynamic_indices_with_tf(
    test_case,
    params_shape,
    indices_shape,
    updates_shape,
    indices_static_shape,
    updates_static_shape,
):
    params, updates, indices = _random_inputs(params_shape, indices_shape,
                                              updates_shape, False)

    i = tf.constant(indices)
    x = tf.Variable(params)
    y = tf.Variable(updates)
    z = tf.tensor_scatter_nd_update(x, i, y)

    of_z = _of_tensor_scatter_nd_update_dynamic_indices(
        params, indices, updates, indices_static_shape, updates_static_shape)
    test_case.assertTrue(np.allclose(z.numpy(), of_z))
Example #21
0
    def kmeans(self, max_iter, box_num, clusters, k):
        dists = tf.zeros((box_num, k))
        last = tf.zeros((box_num, ), dtype=tf.int64)

        tf.print(tf.shape(clusters))
        num_iters = 0

        while tf.math.less(num_iters, max_iter):
            dists = 1 - self.iou(self._boxes, clusters)
            curr = tf.math.argmin(dists, axis=-1)
            if tf.math.reduce_all(curr == last):
                break
            for i in range(k):
                hold = tf.math.reduce_mean(self._boxes[curr == i], axis=0)
                clusters = tf.tensor_scatter_nd_update(clusters, [[i]], [hold])
            last = curr
            num_iters += 1
            tf.print('k-Means box generation iteration: ', num_iters, end='\r')
        return clusters
Example #22
0
def group_dist_mat_by_label(i, labels, dist_mat, grouped_dist):
    # half of batch
    num_triplet_pair_per_sample = tf.math.floordiv(tf.size(labels), 2)
    idx = tf.squeeze(tf.where(tf.equal(labels, i)), axis=1)

    if tf.shape(idx)[0] == 0:
        if i == 0.:
            grouped_dist = tf.zeros((1, num_triplet_pair_per_sample))
        else:
            grouped_dist = tf.concat(
                [grouped_dist,
                 tf.zeros((1, num_triplet_pair_per_sample))],
                axis=0)

        i = i + 1
        return i, labels, dist_mat, grouped_dist

    probability = tf.zeros([tf.size(labels)], dtype=tf.float32)  # batch size
    updates = tf.fill(tf.shape(idx),
                      tf.truediv(1.0, tf.cast(tf.size(idx), tf.float32)))
    probability = tf.tensor_scatter_nd_update(probability,
                                              tf.expand_dims(idx, 1), updates)

    probability
    triplet_pair_idx = tf.squeeze(tf.random.categorical(
        tf.math.log(tf.expand_dims(probability, 0)),
        num_triplet_pair_per_sample),
                                  axis=0)

    triplet_pair = tf.squeeze(tf.gather(dist_mat,
                                        triplet_pair_idx,
                                        name="triplet_pair_gather"),
                              axis=1)

    if i == 0.:
        grouped_dist = tf.expand_dims(triplet_pair, axis=0)
    else:
        grouped_dist = tf.concat(
            [grouped_dist, tf.expand_dims(triplet_pair, axis=0)], axis=0)

    i = i + 1

    return i, labels, dist_mat, grouped_dist
Example #23
0
    def _update_context(self, context_in, V, scores, index, L_Q):
        B, H, L_V, D = V.shape

        if self.mask_flag:
            attn_mask = ProbMask(B, H, L_Q, index, scores)

            # scores.masked_fill_(attn_mask.mask, -np.inf)
            num = 3.4 * math.pow(10, 38)
            scores = (scores * attn_mask.mask) + (-((attn_mask.mask * num + num) - num))

        attn = tf.keras.activations.softmax(scores, axis=-1)  # nn.Softmax(dim=-1)(scores)
        batch_indexes = tf.tile(tf.range(V.shape[0])[:, tf.newaxis, tf.newaxis], (1, V.shape[1], index.shape[-1]))
        head_indexes = tf.tile(tf.range(V.shape[1])[tf.newaxis, :, tf.newaxis], (V.shape[0], 1, index.shape[-1]))

        idx = tf.stack(values=[batch_indexes, head_indexes, index], axis=-1)

        context_in = tf.tensor_scatter_nd_update(context_in, idx, tf.matmul(attn, V))

        return tf.convert_to_tensor(context_in)
    def magneticField(self, gaugeField, scalarField, cpt):
        cpt1 = (cpt + 1) % 3
        cpt2 = (cpt + 2) % 3

        magneticField = tf.math.angle(
            tf.linalg.trace(
                self.u1Plaquette(gaugeField, scalarField, cpt1, cpt2)))

        if (cpt == 0 and self.tHooftLine):
            # Correct values along the 't Hooft line
            latShape = tf.shape(magneticField)
            indices = self.tHooftLineIndices(latShape)

            updates = tf.gather_nd(magneticField, indices)
            updates = updates - np.pi * tf.sign(updates)

            magneticField = tf.tensor_scatter_nd_update(
                magneticField, indices, updates)
        return 2.0 / self.gaugeCoupling * magneticField
Example #25
0
    def _dequeue_and_enqueue(self, keys):
        # keys: [GN, C]
        end_queue_ptr = self.queue_ptr + self.global_batch_size
        indices = tf.range(self.queue_ptr, end_queue_ptr,
                           dtype=tf.int64)  # [GN,  ]
        indices = tf.expand_dims(indices, axis=1)  # [GN, 1]

        # update to new values
        updated_queue = tf.tensor_scatter_nd_update(tensor=self.queue,
                                                    indices=indices,
                                                    updates=keys)
        updated_queue_ptr = end_queue_ptr % self.K

        # update queue
        self.queue.assign(updated_queue)

        # update pointer
        self.queue_ptr.assign(updated_queue_ptr)
        return
Example #26
0
def _merge_sorted(a, b, *args):
    """
    Merge sorted arrays efficiently, inspired by https://stackoverflow.com/a/54131815

    Parameters
    ----------
    a: tf.Tensor
        Sorted tensor for ordering
    b: tf.Tensor
        Sorted tensor for ordering
    args: list of tuple of tf.Tensor
            Some data ordered according to a and b that need to be merged whilst keeping the order.


    Returns
    -------
    cs: list of tf.Tensor
        Merging of a_x and b_x in the right order.

    """
    with tf.name_scope("merge_sorted"):
        assert len(a.shape) == len(b.shape) == 1
        a_shape, b_shape = tf.shape(a)[0], tf.shape(b)[0]
        c_len = tf.shape(a)[0] + tf.shape(b)[0]
        if a_shape < b_shape:
            a, b = b, a
            a_shape, b_shape = tf.shape(a)[0], tf.shape(b)[0]
            args = tuple((j, i) for i, j in args)
        b_indices = tf.range(b_shape, dtype=tf.int32) + tf.searchsorted(a, b)
        a_indices = tf.ones((c_len, ), dtype=tf.bool)
        a_indices = tf.tensor_scatter_nd_update(
            a_indices, b_indices[:, None], tf.zeros_like(b_indices, tf.bool))
        c_range = tf.range(c_len, dtype=tf.int32)
        a_mask = tf.boolean_mask(c_range, a_indices)[:, None]

        def _inner_merge(u, v):
            c = tf.concat([u, v], 0)
            c = tf.tensor_scatter_nd_update(c, b_indices[:, None], v)
            c = tf.tensor_scatter_nd_update(c, a_mask, u)
            return c

        return (_inner_merge(a, b), ) + tuple(
            _inner_merge(i, j) for i, j in args)
Example #27
0
def read_one_mol(idx):
    atoms = oe_mol_dicts[int(idx.numpy())]['atomic_symbols']
    atoms = tf.expand_dims(tf.convert_to_tensor(
            atoms,
            tf.string),
        1)
    atoms = tf.cast(
        tf.map_fn(
            lambda x: TRANSLATION[x.numpy()[0]],
            atoms,
            tf.int32),
        tf.int64)

    atoms = tf.reshape(
        atoms,
        [-1])

    n_atoms = tf.shape(atoms, tf.int64)[0]

    bonds = tf.convert_to_tensor(
        oe_mol_dicts[int(idx.numpy())]['connectivity'],
        dtype=tf.float32)

    adjacency_map = tf.zeros(
        (n_atoms, n_atoms),
        tf.float32)

    adjacency_map = tf.tensor_scatter_nd_update(
        adjacency_map,

        tf.cast(
            bonds[:, :2],
            tf.int64),

        bonds[:, 2])

    adjacency_map = tf.i_o.utils.conjugate_average(atoms, adjacency_map)

    charges = tf.convert_to_tensor(
        oe_mol_dicts[idx]['partial_charges'],
        tf.float32)

    return atoms, adjacency_map, charges
Example #28
0
def test2():
    '''测试不变卷积核'''
    kernel_value = tf.zeros((3, 3, 2, 4), dtype=tf.float32)
    for i in range(4):
        kernel_value = tf.tensor_scatter_nd_update(kernel_value,
                                                   [[1, 1, i % 2, i]], [1])
    # print('kernel_value:', kernel_value)
    x = tf.random.uniform((1, 4, 4, 4), dtype=tf.float32)
    conv1 = tf.keras.layers.Conv2D(4,
                                   3,
                                   strides=1,
                                   padding='same',
                                   groups=2,
                                   use_bias=False)
    out = conv1(x, training=False)
    conv1.kernel.assign(kernel_value)
    out = conv1(x, training=False)
    tf.print('x:', x)
    tf.print('out:', out)
Example #29
0
def random_proba(shape, clipindex=None, clipproba=None, seed=None):

    logits = tf.random.normal(shape=shape,
                              mean=0.0,
                              stddev=xsigma(shape),
                              dtype=tf.float32,
                              seed=seed)

    if clipindex is not None:

        clipproba = clip_for_logits(clipproba)

        cliplogits = tf.math.log(clipproba) - tf.math.log(1.0 - clipproba)

        logits = tf.tensor_scatter_nd_update(logits, [clipindex], [cliplogits])

    proba = tf.math.softmax(logits)

    return proba
Example #30
0
        def single_batch_nms(candidate_boxes):
            # filter out predictions with score less than score_threshold
            candidate_boxes = tf.boolean_mask(
                candidate_boxes,
                candidate_boxes[..., 4] >= self.score_threshold)
            outputs = tf.zeros(
                (self.max_detection + 1, tf.shape(candidate_boxes)[-1]))
            indices = []
            updates = []

            count = 0
            # keep running this until there's no more candidate box or max_detection is met
            while tf.shape(
                    candidate_boxes)[0] > 0 and count < self.max_detection:
                # pick the box with the highest score
                best_idx = tf.math.argmax(candidate_boxes[..., 4], axis=0)
                best_box = candidate_boxes[best_idx]
                # add this best box to the output
                indices.append([count])
                updates.append(best_box)
                count += 1
                # remove this box from candidate boxes
                candidate_boxes = tf.concat([
                    candidate_boxes[0:best_idx],
                    candidate_boxes[best_idx + 1:tf.shape(candidate_boxes)[0]]
                ],
                                            axis=0)
                # calculate IOU between this box and all remaining candidate boxes
                iou = broadcast_iou(best_box[0:4], candidate_boxes[..., 0:4])
                # remove all candidate boxes with IOU bigger than iou_threshold
                candidate_boxes = tf.boolean_mask(candidate_boxes,
                                                  iou[0] <= self.iou_threshold)
            if count > 0:
                # also append num_detection to the result
                count_index = [[self.max_detection]]
                count_updates = [
                    tf.fill([tf.shape(candidate_boxes)[-1]], count)
                ]
                indices = tf.concat([indices, count_index], axis=0)
                updates = tf.concat([updates, count_updates], axis=0)
                outputs = tf.tensor_scatter_nd_update(outputs, indices,
                                                      updates)
            return outputs