Exemple #1
0
    def _build_loss(self):
        train_paf, train_paf_mask = super().get_train_paf()
        train_heatmap, train_heatmap_mask = super().get_train_heatmap()
        train_mask = super().get_train_mask()

        paf_losses = []
        heatmap_losses = []
        for paf in super().get_paf_tensors():
            paf_loss = Loss.abs_loss(train_paf, paf, raw_tensor=True) * train_paf_mask

            # --- LOSS MASKING
            paf_loss = paf_loss * tf.expand_dims(train_mask, axis=-1)
            paf_loss = paf_loss * train_paf_mask

            if self._paf_weight is not None:
                abs_training_paf = tf.abs(train_paf)

                mask = tf.cast(
                    tf.math.greater(abs_training_paf, EPS),
                    dtype=tf.float32
                )

                weights_mask = mask * self._paf_weight + self.__IDENTITY

                paf_loss = paf_loss * weights_mask

            paf_losses.append(
                tf.reduce_mean(paf_loss)
            )

        for heatmap in super().get_heatmap_tensors():
            heatmap_loss = Loss.abs_loss(train_heatmap, heatmap, raw_tensor=True)

            # --- LOSS MASKING
            heatmap_loss = heatmap_loss * tf.expand_dims(train_mask, axis=-1)
            heatmap_loss = heatmap_loss * train_heatmap_mask

            if self._heatmap_weight is not None:
                # Create mask for scaling loss
                # Add 1.0 for saving values that are equal to 0 (approximately equal to 0)
                weight_mask = train_heatmap * self._heatmap_weight + self.__IDENTITY

                heatmap_loss = heatmap_loss * weight_mask

            heatmap_losses.append(
                tf.reduce_mean(heatmap_loss)
            )

        self._paf_loss = tf.reduce_sum(paf_losses)
        self._heatmap_loss = tf.reduce_sum(heatmap_losses)

        loss = self._heatmap_loss * self._heatmap_scale + \
               self._paf_loss * self._paf_scale

        # For Tensorboard
        super().track_loss(self._paf_loss, PETrainer.PAF_LOSS)
        super().track_loss(self._heatmap_loss, PETrainer.HEATMAP_LOSS)

        return loss
    def _build_loss(self):
        logits = super().get_logits()
        labels = super().get_labels()

        num_positives = None
        if self._normalize_by_positives:
            positives = tf.cast(tf.not_equal(labels, 0),
                                tf.float32)  # [BATCH_SIZE, ...]
            positives_dim_n = len(positives.get_shape())
            axis = list(range(1, positives_dim_n))
            num_positives = tf.reduce_sum(
                positives, axis=axis)  # [BATCH_SIZE, N_POSITIVES]

        focal_loss = Loss.focal_binary_loss(
            logits=logits,
            labels=labels,
            num_positives=num_positives,
            focal_gamma=self._focal_gamma,
            label_smoothing=self._smoothing_labels)

        if not self._normalize_by_positives:
            focal_loss = focal_loss / float(super().get_batch_size())

        super().track_loss(focal_loss, FocalBinaryTrainer.FOCAL_LOSS)
        return focal_loss
Exemple #3
0
    def _build_loss(self):
        logits = super().get_logits()
        labels = super().get_labels()
        ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                                 logits=logits)
        num_classes = super().get_num_classes()
        logits_shape = tf.shape(logits)
        batch_size = logits_shape[0]

        positives = tf.not_equal(labels, 0)  # [BATCH_SIZE, ...]
        positives_dim_n = len(positives.get_shape())
        axis = list(range(1, positives_dim_n))
        num_positives = tf.reduce_sum(positives,
                                      axis=axis)  # [BATCH_SIZE, N_POSITIVES]

        maki_loss = Loss.maki_loss(
            flattened_logits=tf.reshape(logits,
                                        shape=[batch_size, -1, num_classes]),
            flattened_labels=tf.reshape(labels, shape=[batch_size, -1]),
            num_classes=num_classes,
            num_positives=num_positives,
            maki_gamma=self._maki_gamma,
            ce_loss=ce_loss)
        super().track_loss(maki_loss, MakiLossTrainer.MAKI_LOSS)
        return maki_loss
Exemple #4
0
    def _build_loss(self):
        labels = super().get_labels()
        logits = super().get_logits()

        # p - predicted probability
        # g - ground truth label
        p = tf.nn.sigmoid(logits)
        g = labels
        dice_loss = Loss.dice_loss(
            p=p,
            g=g,
            eps=self._eps,
            axes=self._axes
        )

        super().track_loss(dice_loss, DiceTrainer.DICE_LOSS)
        return dice_loss
    def _build_loss(self):
        logits = super().get_logits()
        labels = super().get_labels()
        num_classes = super().get_num_classes()

        positives = tf.not_equal(labels, 0)  # [BATCH_SIZE, ...]
        positives_dim_n = len(positives.get_shape())
        axis = list(range(1, positives_dim_n))
        num_positives = tf.reduce_sum(positives,
                                      axis=axis)  # [BATCH_SIZE, N_POSITIVES]

        focal_loss = Loss.focal_loss(logits=logits,
                                     labels=labels,
                                     num_classes=num_classes,
                                     num_positives=num_positives,
                                     focal_gamma=self._focal_gamma,
                                     raw_tensor=True)

        weights = super().get_weight_map()
        focal_loss = tf.reduce_sum(focal_loss * weights)
        super().track_loss(focal_loss, WeightedFocalTrainer.FOCAL_LOSS)
        return focal_loss
Exemple #6
0
 def _build_local_loss(self, prediction, label):
     abs_loss = Loss.abs_loss(label, prediction, raw_tensor=True)
     final_loss = tf.reduce_mean(abs_loss)
     return final_loss
Exemple #7
0
 def _build_loss(self):
     ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
         labels=super().get_labels(), logits=super().get_logits())
     qce_loss = Loss.quadratic_ce_loss(ce_loss=ce_loss)
     super().track_loss(qce_loss, QCETrainer.QCE_LOSS)
     return qce_loss
    def _build_loss(self):
        train_paf, train_paf_mask = super().get_train_paf()
        train_heatmap, train_heatmap_mask = super().get_train_heatmap()
        train_mask = super().get_train_mask()

        paf_losses = []
        heatmap_losses = []
        # --- PAF LOSS
        for paf in super().get_paf_tensors():
            # Division by 2.0 makes it similar to tf.nn.l2_loss
            paf_loss = Loss.mse_loss(train_paf, paf, raw_tensor=True) / 2.0

            # --- LOSS MASKING
            paf_loss = paf_loss * train_mask
            paf_loss = paf_loss * train_paf_mask

            if self._paf_weight is not None:
                abs_training_paf = tf.abs(train_paf)

                mask = tf.cast(tf.math.greater(abs_training_paf, EPS),
                               dtype=tf.float32)

                weights_mask = mask * self._paf_weight + self.__IDENTITY

                paf_loss = paf_loss * weights_mask

            paf_losses.append(tf.reduce_sum(paf_loss))
        # --- HEATMAP LOSS
        for heatmap in super().get_heatmap_tensors():
            # We need to expand dims first, because tf.keras.losses.binary_crossentropy averages
            # the loss along the last dimension: [BS, H, W, C] -> [BS, H, W]
            # [BS, H, W, C, 1]
            train_heatmap_expanded = tf.expand_dims(train_heatmap, axis=-1)
            heatmap_expanded = tf.expand_dims(heatmap, axis=-1)
            heatmap_loss = tf.keras.losses.binary_crossentropy(
                y_true=train_heatmap_expanded,
                y_pred=heatmap_expanded,
                label_smoothing=self._label_smoothing) / 2.0

            # --- LOSS MASKING
            heatmap_loss = heatmap_loss * train_mask
            heatmap_loss = heatmap_loss * train_heatmap_mask

            # heatmap_loss - [BS, H, W, C]
            if self._is_nullify_absent_labels:
                # [bs, 1, 1, C]
                label_sum = tf.reduce_sum(train_heatmap * train_mask,
                                          axis=[1, 2],
                                          keepdims=True)
                scale_factor = label_sum / (label_sum + 1e-5)
                heatmap_loss = heatmap_loss * scale_factor

            if self._heatmap_weight is not None:
                # Create mask for scaling loss
                # Add 1.0 for saving values that are equal to 0 (approximately equal to 0)
                weight_mask = train_heatmap * self._heatmap_weight + self.__IDENTITY
                heatmap_loss = heatmap_loss * weight_mask

            heatmap_losses.append(tf.reduce_sum(heatmap_loss))

        # The original repo takes mean over the sums of the losses
        self._paf_loss = tf.reduce_mean(paf_losses)
        self._heatmap_loss = tf.reduce_mean(heatmap_losses)

        loss = self._heatmap_loss * self._heatmap_scale + \
               self._paf_loss * self._paf_scale

        # For Tensorboard
        super().track_loss(self._paf_loss, PETrainer.PAF_LOSS)
        super().track_loss(self._heatmap_loss, PETrainer.HEATMAP_LOSS)

        return loss