def __call__(self, labels, encodings):
        batch_size = labels.shape[0]
        labels = tf.reshape(labels, (-1, ))  # to 1-D

        # Get the centers of each sample in this batch
        centers_batch = tf.gather(self.centers, labels)

        # Compute loss
        delta = tf.subtract(
            centers_batch,
            encodings)  # difference between encodings and centers
        loss = tf.nn.l2_loss(delta) / batch_size

        # Update centers
        unique_labels, unique_idc, unique_counts = tf.unique_with_counts(
            labels)
        appear_times = tf.gather(unique_counts, unique_idc)
        appear_times = tf.reshape(appear_times, (-1, 1))
        delta = delta / tf.cast((1 + appear_times), tf.float32)
        delta = tf.scalar_mul(self.alpha, delta)
        labels = tf.expand_dims(labels, -1)  # to match dim of self.centers
        self.centers.assign(
            tf.tensor_scatter_nd_sub(self.centers, labels, delta))

        return loss, tf.identity(self.centers)
 def call(self, embedding, labels):
     centers_batch = tf.gather(self.centers, labels)
     diff = (1 - self.alpha) * (centers_batch - embedding)
     centers = tf.tensor_scatter_nd_sub(self.centers, labels, diff)
     center_loss = self.mse()
     logits = self.fc(embedding)
     return logits, center_loss
    def calculate_pooling_center_loss(features,
                                      label,
                                      alfa,
                                      nrof_classes,
                                      weights,
                                      name,
                                      centers=None):
        features = tf.reshape(features, [features.shape[0], -1])
        label = tf.argmax(label, 1)

        nrof_features = features.get_shape()[1]
        if centers is None:
            centers = tf.compat.v1.get_variable(
                name, [nrof_classes, nrof_features],
                dtype=tf.float32,
                initializer=tf.constant_initializer(0),
                trainable=False)
        label = tf.reshape(label, [-1])
        centers_batch = tf.gather(centers, label)
        centers_batch = tf.nn.l2_normalize(centers_batch, axis=-1)

        diff = (1 - alfa) * (centers_batch - features)
        centers = tf.tensor_scatter_nd_sub(centers,
                                           tf.expand_dims(label, axis=-1),
                                           diff)

        with tf.control_dependencies([centers]):
            distance = tf.square(features - centers_batch)
            distance = tf.reduce_sum(distance, axis=-1)
            center_loss = tf.reduce_mean(distance)

        center_loss = tf.identity(center_loss * weights, name=name + '_loss')

        return center_loss, centers
Exemplo n.º 4
0
    def call(self, y_true, y_pred):
        embedding = y_pred[:, :self.feature_dim]
        labels = tf.argmax(y_true, axis=1)
        centers_batch = tf.gather(self.centers, labels)
        # loss = tf.reduce_mean(tf.square(embedding - centers_batch))
        loss = tf.reduce_mean(tf.square(embedding - centers_batch),
                              axis=-1) * self.factor

        # Update centers
        # diff = (1 - self.alpha) * (centers_batch - embedding)
        diff = centers_batch - embedding
        unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
        appear_times = tf.gather(unique_count, unique_idx)
        appear_times = tf.reshape(appear_times, [-1, 1])

        diff = diff / tf.cast((1 + appear_times), tf.float32)
        diff = self.alpha * diff
        # print(centers_batch.shape, self.centers.shape, labels.shape, diff.shape)
        self.centers.assign(
            tf.tensor_scatter_nd_sub(self.centers, tf.expand_dims(labels, 1),
                                     diff))
        # centers_batch = tf.gather(self.centers, labels)
        if self.logits_loss:
            self.centerloss = tf.reduce_mean(loss)
            tf.print("\033[k - centerloss:", self.centerloss, end="")
            return self.logits_loss(y_true, y_pred[:,
                                                   self.feature_dim:]) + loss
        else:
            return loss
Exemplo n.º 5
0
    def calculate_attention_loss(self, labels, embeddings, beta=0.05):
        """
        :param labels: Tensor shape (B, )
        :param embeddings: Tensor shape (B, 1, 1, attention_num * feature_num)
        :param beta: float
        :return: loss Tensor, float32
        """
        embeddings = tf.squeeze(embeddings, axis=[1, 2])
        embeddings = tf.cast(embeddings, dtype=tf.float32)
        batch, dims = embeddings.shape

        labels = labels - tf.constant(1)

        if self.global_feature_centers is None:
            self.global_feature_centers = tf.zeros(shape=(self.num_class,
                                                          dims))
        self.global_feature_centers = tf.cast(self.global_feature_centers,
                                              dtype=tf.float32)

        batch_centers = tf.gather(self.global_feature_centers, labels)
        batch_centers = tf.math.l2_normalize(batch_centers, axis=-1)

        diff = beta * (batch_centers - embeddings)
        labels = tf.expand_dims(labels, axis=-1)
        self.global_feature_centers = tf.tensor_scatter_nd_sub(
            self.global_feature_centers, labels, diff)
        distance = tf.math.square(embeddings - batch_centers)
        distance = tf.math.reduce_sum(distance, axis=-1)
        loss = tf.reduce_mean(distance)

        return loss
Exemplo n.º 6
0
 def __call__(self, features, label):
     embedding_size = tf.shape(features)[1]
     label = tf.expand_dims(label, axis=1)
     centers_batch = tf.gather_nd(self.centers, label)
     diff = (1 - self.alpha) * (centers_batch - features)
     centers = tf.tensor_scatter_nd_sub(self.centers, label, diff)
     loss = tf.math.reduce_mean(tf.math.square(features - centers_batch))
     return loss
Exemplo n.º 7
0
def _get_transition_matrix(lamda: tf.Tensor, d: int, dtype: tf.DType) -> tf.Tensor:
    with tf.name_scope("get_transition_matrix"):
        F = tf.linalg.diag(tf.ones((d - 1,), dtype=dtype), k=1, num_cols=d, num_rows=d)
        binomial_coeffs = binom(d, np.arange(0, d, dtype=int)).astype(dtype)
        binomial_coeffs = tf.convert_to_tensor(binomial_coeffs, dtype=dtype)
        lambda_powers = lamda ** np.arange(d, 0, -1, dtype=dtype)
        update_indices = [[d - 1, k] for k in range(d)]
        F = tf.tensor_scatter_nd_sub(F, update_indices, lambda_powers * binomial_coeffs)
        return F
Exemplo n.º 8
0
    def _step_reward(self, observation: tf.Tensor, action: tf.Tensor,
                     next_observation: tf.Tensor) -> tf.Tensor:
        done = self._termination.terminates(next_observation)
        theta = next_observation[:, 2]

        reward = tf.cos(theta)
        # The episode is ended if the cart leaves the observation space.
        if any(done.numpy()):
            termination_indexes = tf.where(done)
            number_terminations = termination_indexes.shape[0]
            reward = tf.tensor_scatter_nd_sub(
                reward,
                termination_indexes,
                tf.constant(100,
                            shape=(number_terminations, ),
                            dtype=tf.float32),
            )

        return reward
Exemplo n.º 9
0
    def call(self, y_true, embedding):
        # embedding = y_pred[:, : self.emb_shape]
        labels = tf.argmax(y_true, axis=1)
        centers_batch = tf.gather(self.centers, labels)
        # loss = tf.reduce_mean(tf.square(embedding - centers_batch))
        loss = self.__calculate_center_loss__(centers_batch, embedding)

        # Update centers
        diff = centers_batch - embedding
        unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
        appear_times = tf.cast(tf.gather(unique_count, unique_idx), tf.float32)

        # diff = diff / tf.expand_dims(appear_times, 1)
        diff = diff / tf.expand_dims(appear_times + 1, 1)  # Δcj
        diff = self.num_replicas * self.alpha * diff
        # print(centers_batch.shape, self.centers.shape, labels.shape, diff.shape)
        self.centers.assign(tf.tensor_scatter_nd_sub(self.centers, tf.expand_dims(labels, 1), diff))
        # centers_batch = tf.gather(self.centers, labels)
        return loss
Exemplo n.º 10
0
def _move_events(event_tensor, event_id, m, from_t, to_t, n_move):
    """Subtracts n_move from event_tensor[m, from_t, event_id]
    and adds n_move to event_tensor[m, to_t, event_id].

    :param event_tensor: shape [M, T, X]
    :param event_id: the event id to move
    :param m: the metapopulation to move
    :param from_t: the move-from time
    :param to_t: the move-to time
    :param n_move: the number of events to move
    :return: the modified event_tensor
    """
    # Todo rationalise this -- compute a delta, and add once.
    indices = tf.stack(
        [m, from_t, tf.broadcast_to(event_id, m.shape)],
        axis=-1,  # All meta-populations
    )  # Event
    # Subtract x_star from the [from_t, :, event_id] row of the state tensor
    n_move = tf.cast(n_move, event_tensor.dtype)
    new_state = tf.tensor_scatter_nd_sub(event_tensor, indices, n_move)
    indices = tf.stack([m, to_t, tf.broadcast_to(event_id, m.shape)], axis=-1)
    # Add x_star to the [to_t, :, event_id] row of the state tensor
    new_state = tf.tensor_scatter_nd_add(new_state, indices, n_move)
    return new_state
Exemplo n.º 11
0
import numpy as np

if __name__ == '__main__':
    t2 = tf.constant([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14],
                      [15, 16, 17, 18, 19]])
    t6 = tf.constant([10])
    indices = tf.constant([[1], [3], [5], [7], [9]])
    data = tf.constant([2, 4, 6, 8, 10])
    """the tensor into which you insert values is zero-initialized"""
    print(tf.scatter_nd(indices=indices, updates=data, shape=t6))

    # Gather values from one tensor by specifying indices
    new_indices = tf.constant([[0, 2], [2, 1], [3, 3]])
    t7 = tf.gather_nd(t2, indices=new_indices)

    # Add these values into a new tensor
    t8 = tf.scatter_nd(indices=new_indices,
                       updates=t7,
                       shape=tf.constant([4, 5]))
    print(t8)
    """insert data into a tensor with pre-existing values"""
    t11 = tf.constant([[2, 7, 1], [9, 1, 1], [1, 3, 8]])
    t12 = tf.tensor_scatter_nd_add(t11,
                                   indices=[[0, 2], [1, 1], [2, 0]],
                                   updates=[6, 5, 4])
    print(t12)
    t13 = tf.tensor_scatter_nd_sub(t12,
                                   indices=[[0, 2], [1, 1], [2, 0]],
                                   updates=[6, 5, 4])
    print(t13)
Exemplo n.º 12
0
def compute_targets(anchors,
                    bboxes,
                    num_classes,
                    labels=None,
                    negative_iou_thresh=0.3,
                    positive_iou_thresh=0.5):
    """Compute Classification and Regression Targets for Anchor Box dependent loss
    
    Args:
        anchors (tensor): anchor boxes of shape [N x 4]
        bboxes (tensor): unnormalized bounding boxes if format (x1, x2, y1, y2) of shape [K x 4]
        num_classes (int): Number of classes
        labels (tensor): Tensor of tf.int for each bbox if more than one class; else assume binary [K x 1]
        negative_iou_thresh (float): All anchors < negative_iou_thresh are consider background or 'negative' examples (anchor state 0)
        positive_iou_thresh (float): All anchors >= positive_iou_thresh are used as 'positive' examples for the loss (anchor state 1)
            * anything in between is set to 'ignore' (anchor state -1)
    
    Returns:
        classification_targets (tensor): Tensor of one-hot encoded labels for all bboxes with highest IoU per anchor, plus anchor state column (N, num_classes + 1)
            * anchor state column -> (1) for positive anchor boxes, (0) for negative, (-1) for those to be ignored
        regression_targets (tensor): Tensor of ground truth transformations applied to positive anchor boxes to get ground truth bounding boxes (N, 4 + 1)
            * 4 + 1 = 4 transformations on each coordinate + same anchor state column as classification targets
    """
    positive_indices, ignore_indices, negative_indices, max_iou_indices = tf_compute_gt_indices(
        anchors, bboxes, negative_iou_thresh=0.4, positive_iou_thresh=0.5)

    #create the sine column for whether a anchor is background (0), an object (1), or should be ignore (-1)
    iou_sine_col = tf.zeros(anchors.get_shape()[0])
    pos_iou_sine_col = tf.zeros(anchors.get_shape()[0])
    if positive_indices.get_shape()[0] != 0:
        # we call this something else b/c we can use it to get the positive classes matrix
        pos_iou_sine_col = tf.tensor_scatter_nd_add(
            iou_sine_col, positive_indices,
            tf.ones(tf.shape(positive_indices)[0]))
    if ignore_indices.get_shape()[0] != 0:
        iou_sine_col = tf.tensor_scatter_nd_sub(
            pos_iou_sine_col, ignore_indices,
            tf.ones(tf.shape(ignore_indices)[0]))

    #create the class targets (N, K+1)
    def _map_class(max_iou_indices, labels):
        """Fast way to map indexes of boxes to corresponsing labels"""
        #add on index column
        max_iou_indices = tf.stack([
            tf.reshape(
                tf.convert_to_tensor([np.arange(0,
                                                tf.shape(all_anchors)[0])]),
                [1, tf.shape(max_iou_indices)[0]]),
            tf.cast(tf.expand_dims(max_iou_indices, axis=0), dtype=tf.int32)
        ],
                                   axis=0)
        max_iou_indices = tf.transpose(tf.squeeze(max_iou_indices))
        broadcasted_labels = tf.broadcast_to(
            labels, [tf.shape(all_anchors)[0],
                     tf.shape(random_labels)[0]])
        anchor_classes = tf.gather_nd(broadcasted_labels, temp)
        return anchor_classes

    if num_classes <= 2:
        classification_targets = tf.transpose(
            tf.stack([pos_iou_sine_col, iou_sine_col], axis=0))
    else:
        assert labels is not None, "Labels as tensor of ints for each bbox need to be passed if multiple classes"
        # map the bbox index that each anchor overlaps with the most to the corresponsing label
        # this is very slow so need to come back to find a better way
        anchor_classes = _map_class(max_iou_indices, labels)

        # keep only the positive ones (swap with -1 since tensorflow make -1 become 0 and one-hot enconding)
        anchor_classes = tf.tensor_scatter_nd_update(
            anchor_classes, ignore_indices,
            tf.constant(-1, shape=tf.shape(ignore_indices)[0], dtype=tf.int32))
        anchor_classes = tf.tensor_scatter_nd_update(
            anchor_classes, negative_indices,
            tf.constant(-1,
                        shape=tf.shape(negative_indices)[0],
                        dtype=tf.int32))

        class_matrix = tf.one_hot(tf.cast(anchor_classes, tf.int32),
                                  num_classes)

        #add on the sine col
        classification_targets = tf.concat(
            [class_matrix, tf.expand_dims(iou_sine_col, -1)], axis=1)

    #create regression targets (N, 4 + 1)
    #closest bounding box to each anchor
    gt_bboxes = tf.gather(bboxes, max_iou_indices)  # (N, 4)

    regression_matrix = compute_gt_transforms(anchors,
                                              gt_bboxes,
                                              mean=0.0,
                                              std=0.2)
    #add on the sine col
    regression_targets = tf.concat(
        [regression_matrix,
         tf.expand_dims(iou_sine_col, -1)], axis=1)
    return (classification_targets, regression_targets)
Exemplo n.º 13
0
    def step(self, J, dt, voltage, refractory_time, adaptation, inhibition):
        """Implement the AdaptiveLIF nonlinearity."""
        def inhibit(voltage, output, inhibition, spiked_mask):
            # inhibit all other neurons than one with highest input
            J_mask = tf.equal(J, tf.reduce_max(J))

            voltage = tf.multiply(voltage, tf.cast(J_mask, voltage.dtype))
            output = tf.multiply(output, tf.cast(J_mask, output.dtype))
            spiked_mask = tf.logical_and(spiked_mask,
                                         tf.cast(J_mask, spiked_mask.dtype))
            inhibition = tf.where(
                tf.logical_and(tf.logical_not(J_mask),
                               tf.equal(inhibition, 0)), self.tau_inhibition,
                inhibition)

            return voltage, output, inhibition, spiked_mask

        J = J - adaptation

        # compute effective dt for each neuron, based on remaining time.
        # note that refractory times that have completed midway into this
        # timestep will be given a partial timestep, and moreover these will
        # be subtracted to zero at the next timestep (or reset by a spike)
        delta_t = tf.clip_by_value(dt - refractory_time, self.zero, dt)

        # update voltage using discretized lowpass filter
        # since v(t) = v(0) + (J - v(0))*(1 - exp(-t/tau)) assuming
        # J is constant over the interval [t, t + dt)
        dV = (voltage - J) * tf.math.expm1(-delta_t / self.tau_rc  # pylint: disable=invalid-unary-operand-type
                                           )
        voltage += dV

        # determine which neurons spiked (set them to 1/dt, else 0)
        spiked_mask = voltage > self.one
        output = tf.cast(spiked_mask, J.dtype) * self.alpha

        inhibition_mask = tf.equal(inhibition, 0)
        # if neuron that spiked had highest input but was still inhibited from a previous timestep
        voltage = tf.multiply(voltage, tf.cast(inhibition_mask, voltage.dtype))
        output = tf.multiply(output, tf.cast(inhibition_mask, output.dtype))
        spiked_mask = tf.logical_and(
            spiked_mask, tf.cast(inhibition_mask, spiked_mask.dtype))

        # inhibit all other neurons than one with highest input
        voltage, output, inhibition, spiked_mask = tf.cond(
            tf.math.count_nonzero(output) > 0,
            lambda: inhibit(voltage, output, inhibition, spiked_mask),
            lambda: (tf.identity(voltage), tf.identity(output),
                     tf.identity(inhibition), tf.identity(spiked_mask)))

        # set v(0) = 1 and solve for t to compute the spike time
        t_spike = dt + self.tau_rc * tf.math.log1p(-(voltage - 1) / (J - 1))

        # set spiked voltages to zero, refractory times to tau_ref, and
        # rectify negative voltages to a floor of min_voltage
        voltage = tf.where(spiked_mask, self.zeros,
                           tf.maximum(voltage, self.min_voltage))
        refractory_time = tf.where(spiked_mask, self.tau_ref + t_spike,
                                   refractory_time - dt)

        adaptation += (dt / self.tau_n) * (self.inc_n * output - adaptation)

        inhibition_mask = tf.not_equal(inhibition, 0)
        inhibition = tf.tensor_scatter_nd_sub(
            inhibition, tf.where(inhibition_mask),
            tf.ones(tf.math.count_nonzero(inhibition_mask)))

        return output, voltage, refractory_time, adaptation, inhibition