def evaluation(logits, labels, images, nlabels, loss_type):
    '''
    A function for evaluating the performance of the netwrok on a minibatch. This function returns the loss and the 
    current foreground Dice score, and also writes example segmentations and imges to to tensorboard.
    :param logits: Output of network before softmax
    :param labels: Ground-truth label mask
    :param images: Input image mini batch
    :param nlabels: Number of labels in the dataset
    :param loss_type: Which loss should be evaluated
    :return: The loss without weight decay, the foreground dice of a minibatch
    '''

    mask = tf.arg_max(tf.nn.softmax(logits, dim=-1), dimension=-1)  # was 3
    mask_gt = labels

    tf.summary.image('example_gt', prepare_tensor_for_summary(mask_gt, mode='mask', nlabels=nlabels))
    tf.summary.image('example_pred', prepare_tensor_for_summary(mask, mode='mask', nlabels=nlabels))
    tf.summary.image('example_zimg', prepare_tensor_for_summary(images, mode='image'))

    total_loss, nowd_loss, weights_norm = loss(logits, labels, nlabels=nlabels, loss_type=loss_type)

    cdice_structures = losses.per_structure_dice(logits, tf.one_hot(labels, depth=nlabels))
    cdice_foreground = cdice_structures[:,1:]

    cdice = tf.reduce_mean(cdice_foreground)

    return nowd_loss, cdice
Ejemplo n.º 2
0
def evaluate_losses(logits, labels, nlabels, loss_type):
    '''
    A function to compute various loss measures to compare the predicted and ground truth annotations
    '''
    
    nowd_loss = loss(logits, labels, nlabels=nlabels, loss_type=loss_type)

    cdice_structures = losses.per_structure_dice(logits, tf.one_hot(labels, depth=nlabels))
    cdice_foreground = tf.slice(cdice_structures, (0,1), (-1,-1))
    cdice = tf.reduce_mean(cdice_foreground)
    
    return nowd_loss, cdice, cdice_structures, cdice_foreground
Ejemplo n.º 3
0
def evaluation(logits, labels, images, nlabels, loss_type, weak_supervision=False, cnn_threshold=None, include_bg=True):
    '''
    A function for evaluating the performance of the netwrok on a minibatch. This function returns the loss and the 
    current foreground Dice score, and also writes example segmentations and imges to to tensorboard.
    :param logits: Output of network before softmax
    :param labels: Ground-truth label mask
    :param images: Input image mini batch
    :param nlabels: Number of labels in the dataset
    :param loss_type: Which loss should be evaluated
    :param include_bg: Whether or not to include the definitely_background label
    :return: The loss without weight decay, the foreground dice of a minibatch
    '''
    
    softmax = tf.nn.softmax(logits, dim=-1)
    mask = tf.arg_max(softmax, dimension=-1)  # was 3
    mask_gt = labels
    if weak_supervision:
        if not cnn_threshold is None:
            threshold = tf.constant(cnn_threshold, dtype=tf.float32)
            ####
            s = tf.multiply(tf.ones(shape=[4, 212, 212, 1]), threshold)
            softmax = tf.concat([s, softmax[..., 1:]], axis=-1)
            mask_thresholded = tf.arg_max(softmax, dimension=-1)
        if include_bg:
            # include_bg typically true for training and false for validation
            # this involves creating two tensorflow variables
            # which will result in two copies of the images appearing on tensorboard
            # which is very annoying
            tf.summary.image('example_gt', prepare_tensor_for_summary(mask_gt, mode='mask', nlabels=nlabels))
            tf.summary.image('example_pred', prepare_tensor_for_summary(mask, mode='mask', nlabels=nlabels))
            if not cnn_threshold is None:
                tf.summary.image('example_thresholded_pred', prepare_tensor_for_summary(mask_thresholded, mode='mask', nlabels=nlabels))
            tf.summary.image('example_zimg', prepare_tensor_for_summary(images, mode='image'))
    else:
        tf.summary.image('example_gt', prepare_tensor_for_summary(mask_gt, mode='mask', nlabels=nlabels))
        tf.summary.image('example_pred', prepare_tensor_for_summary(mask, mode='mask', nlabels=nlabels))
        tf.summary.image('example_zimg', prepare_tensor_for_summary(images, mode='image'))

    total_loss, nowd_loss, weights_norm = loss(logits, labels, nlabels=nlabels, loss_type=loss_type)

    cdice_structures,hard_pred, labels,l,r = losses.per_structure_dice(logits, tf.one_hot(labels, depth=nlabels))
    if weak_supervision and not include_bg:
        ####FIX THIS: Effectively want it to be -2 but
        cdice_foreground = tf.slice(cdice_structures, [0, 1], [-1, nlabels - 2])
    else:
        cdice_foreground = tf.slice(cdice_structures, [0, 1], [-1,-1])

#    cdice = tf.reduce_mean(cdice_foreground)
    return nowd_loss, cdice_foreground
Ejemplo n.º 4
0
    def evaluation(self, logits, labels, images):
        '''
        A function for evaluating the performance of the netwrok on a minibatch. This function returns the loss and the 
        current foreground Dice score, and also writes example segmentations and imges to to tensorboard.
        :param logits: Output of network before softmax
        :param labels: Ground-truth label mask
        :param images: Input image mini batch
        :return: The loss without weight decay, the foreground dice of a minibatch
        '''
        nlabels = self.config.nlabels

        if self.params > 2:
            output = logits[0]
        else:
            output = logits

        mask = tf.argmax(tf.nn.softmax(output, axis=-1),
                         axis=-1)  # reduce dimensionality
        mask_gt = labels

        tf.summary.image(
            'example_gt',
            self.prepare_tensor_for_summary(mask_gt,
                                            mode='mask',
                                            nlabels=nlabels))
        tf.summary.image(
            'example_pred',
            self.prepare_tensor_for_summary(mask, mode='mask',
                                            nlabels=nlabels))
        tf.summary.image('example_zimg',
                         self.prepare_tensor_for_summary(images, mode='image'))
        if self.params > 2:
            mask = tf.clip_by_value(tf.cast(labels, tf.float32), 0, 1)
            mask = tf.expand_dims(mask, -1)
            tf.summary.image(
                'example_masked_input',
                self.prepare_tensor_for_summary(images * mask, mode='image'))

        _, nowd_loss, _ = self.loss(logits, labels, images)

        cdice_structures = losses.per_structure_dice(
            output, tf.one_hot(labels, depth=nlabels))
        cdice_foreground = cdice_structures[:, 1:]

        cdice = tf.reduce_mean(cdice_foreground)

        return nowd_loss, cdice