예제 #1
0
 def __init__(self,
              soft_dice_kwargs,
              ce_kwargs,
              aggregate="sum",
              square_dice=False,
              weight_ce=1,
              weight_dice=1):
     """
     CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
     :param soft_dice_kwargs:
     :param ce_kwargs:
     :param aggregate:
     :param square_dice:
     :param weight_ce:
     :param weight_dice:
     """
     super(DC_and_CE_loss, self).__init__()
     self.weight_dice = weight_dice
     self.weight_ce = weight_ce
     self.aggregate = aggregate
     self.ce = CrossentropyND(**ce_kwargs)
     if not square_dice:
         self.dc = SoftDiceLoss(apply_nonlin=softmax_helper,
                                **soft_dice_kwargs)
     else:
         self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper,
                                       **soft_dice_kwargs)
예제 #2
0
    def __init__(self,
                 plans_file,
                 fold,
                 local_rank,
                 output_folder=None,
                 dataset_directory=None,
                 batch_dice=True,
                 stage=None,
                 unpack_data=True,
                 deterministic=True,
                 distribute_batch_size=False,
                 fp16=False):
        super().__init__(plans_file, fold, output_folder, dataset_directory,
                         batch_dice, stage, unpack_data, deterministic, fp16)
        self.init_args = (plans_file, fold, local_rank, output_folder,
                          dataset_directory, batch_dice, stage, unpack_data,
                          deterministic, distribute_batch_size, fp16)
        self.distribute_batch_size = distribute_batch_size
        np.random.seed(local_rank)
        torch.manual_seed(local_rank)
        torch.cuda.manual_seed_all(local_rank)
        self.local_rank = local_rank

        torch.cuda.set_device(local_rank)
        dist.init_process_group(backend='nccl', init_method='env://')

        self.val_loss_ma_alpha = 0.95
        self.val_loss_MA = None

        self.loss = None
        self.ce_loss = CrossentropyND()

        self.global_batch_size = None  # we need to know this to properly steer oversample
 def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", ex=True):
     super(DC_CE_Marginal_Exclusion_loss, self).__init__()
     self.aggregate = aggregate
     self.ce = CrossentropyND(**ce_kwargs)
     self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
     self.ex = Exclusion_loss(self.dc)
     self.ex_CE = Exclusion_loss(self.ce)
     self.ex_choice = ex
     print(f"mode:{aggregate}/ weight:[1:1] with exclusion:{ex}")
예제 #4
0
 def __init__(self,
              soft_dice_kwargs,
              ce_kwargs,
              aggregate="sum",
              use_label=None):
     super(DC_and_CE_loss, self).__init__()
     self.aggregate = aggregate
     self.ce = CrossentropyND(**ce_kwargs)
     self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
     self.use_label = use_label
예제 #5
0
 def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1):
     super(DC_and_CE_list_loss, self).__init__()
     self.weight_dice = weight_dice
     self.weight_ce = weight_ce
     self.aggregate = aggregate
     self.ce = CrossentropyND(**ce_kwargs)
     if not square_dice:
         self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
     else:
         self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
예제 #6
0
    def forward(self, net_output, target):
        # print('target shape is:',target.shape)
        #target shape is: torch.Size([8, 1, 192, 192, 48])
        ce_weights = torch.tensor([0.28, 0.28,
                                   0.44]).to(torch.cuda.current_device())
        ce_1 = CrossentropyND(weight=ce_weights)
        # dc_loss = self.dc(net_output, target)
        # # ce_loss = self.ce(net_output, target)
        # ce1_loss = ce_1(net_output, target)
        # target_layers=list()
        dc_loss_layers = list()
        ce_loss_layers = list()

        if isinstance(target, list):
            # print('The target is list!')

            for i in range(len(target)):
                # print('net_output[%d] is cuda?'%(2*i),net_output[2*i].is_cuda)
                # print('target[%d] is cuda?' % (i), target[i].is_cuda)
                # print('target %d shape is:'%i,target[i].shape)
                # print('net_output %d shape is:'%(2*i),net_output[2*i].shape)
                # print('net_output[%d] shape is:'%i,net_output[i].shape)
                # print('target[%d] shape is:' % i, target[i].shape)
                dc_loss_layers.append(self.dc(net_output[i], target[i]))
                ce_loss_layers.append(ce_1(net_output[i], target[i]))

            dc_loss = dc_loss_layers[0] * 0.6 + dc_loss_layers[
                1] * 0.1 + dc_loss_layers[2] * 0.1 + dc_loss_layers[
                    3] * 0.1 + dc_loss_layers[4] * 0.1
            ce_loss = ce_loss_layers[0] * 0.6 + ce_loss_layers[
                1] * 0.1 + ce_loss_layers[2] * 0.1 + ce_loss_layers[
                    3] * 0.1 + ce_loss_layers[4] * 0.1
            # print('Final dc_loss is:',dc_loss)
            # print('Final ce_loss is:',ce_loss)
            if self.aggregate == "sum":
                result = ce_loss + dc_loss

            else:
                raise NotImplementedError(
                    "nah son")  # reserved for other stuff (later)
            return result
        else:
            # print('Target is not list!')
            dc_loss = self.dc(net_output, target)
            ce_loss = ce_1(net_output, target)
            if self.aggregate == "sum":
                result = ce_loss + dc_loss

            else:
                raise NotImplementedError(
                    "nah son")  # reserved for other stuff (later)
            return result
 def __init__(self,
              plans_file,
              fold,
              output_folder=None,
              dataset_directory=None,
              batch_dice=True,
              stage=None,
              unpack_data=True,
              deterministic=True,
              fp16=False):
     super().__init__(plans_file, fold, output_folder, dataset_directory,
                      batch_dice, stage, unpack_data, deterministic, fp16)
     self.loss = CrossentropyND()
예제 #8
0
    def __init__(self,
                 input_channels,
                 base_num_features,
                 num_classes,
                 num_pool,
                 num_conv_per_stage=2,
                 feat_map_mul_on_downscale=2,
                 conv_op=nn.Conv2d,
                 norm_op=nn.BatchNorm2d,
                 norm_op_kwargs=None,
                 dropout_op=nn.Dropout2d,
                 dropout_op_kwargs=None,
                 nonlin=nn.LeakyReLU,
                 nonlin_kwargs=None,
                 deep_supervision=True,
                 dropout_in_localization=False,
                 weightInitializer=InitWeights_He(1e-2),
                 pool_op_kernel_sizes=None,
                 conv_kernel_sizes=None,
                 upscale_logits=False,
                 convolutional_pooling=False,
                 convolutional_upsampling=False,
                 max_num_features=None):
        """
		As opposed to the Generic_UNet, this class will compute parts of the loss function in the forward pass. This is
		useful for GPU parallelization. The batch DICE loss, if used, must be computed over the whole batch. Therefore, in a
		naive implementation, all softmax outputs must be copied to a single GPU which will then
		do the loss computation all by itself. In the context of 3D Segmentation, this results in a lot of overhead AND
		is inefficient because the DICE computation is also kinda expensive (Think 8 GPUs with a result of shape
		2x4x128x128x128 each.). The DICE is a global metric, but its parts can be computed locally (TP, FP, FN). Thus,
		this implementation will compute all the parts of the loss function in the forward pass (and thus in a
		parallelized way). The results are very small (batch_size x num_classes for TP, FN and FP, respectively; scalar for CE) and
		copied easily. Also the final steps of the loss function (computing batch dice and average CE values) are easy
		and very quick on the one GPU they need to run on. BAM.
		final_nonlin is lambda x:x here!
		"""
        super(Generic_UNet_DP, self).__init__(
            input_channels, base_num_features, num_classes, num_pool,
            num_conv_per_stage, feat_map_mul_on_downscale, conv_op, norm_op,
            norm_op_kwargs, dropout_op, dropout_op_kwargs, nonlin,
            nonlin_kwargs, deep_supervision, dropout_in_localization,
            lambda x: x, weightInitializer, pool_op_kernel_sizes,
            conv_kernel_sizes, upscale_logits, convolutional_pooling,
            convolutional_upsampling, max_num_features)
        self.ce_loss = CrossentropyND()
예제 #9
0
파일: dice_loss.py 프로젝트: whyuek/nnUNet
 def __init__(self, gdl_dice_kwargs, ce_kwargs, aggregate="sum"):
     super(GDL_and_CE_loss, self).__init__()
     self.aggregate = aggregate
     self.ce = CrossentropyND(**ce_kwargs)
     self.dc = GDL(softmax_helper, **gdl_dice_kwargs)
예제 #10
0
 def __init__(self, soft_iou_kwargs, ce_kwargs, aggregate="sum"):
     super(Iou_and_CE_loss, self).__init__()
     self.aggregate = aggregate
     self.ce = CrossentropyND(**ce_kwargs)
     self.iou = IoULoss(apply_nonlin=softmax_helper, **soft_iou_kwargs)
 def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", ex=True):
     super(pann_loss, self).__init__()
     self.aggregate = aggregate
     self.ce = CrossentropyND(**ce_kwargs)
     self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)