コード例 #1
0
ファイル: nnUNetTrainerV2_DDP.py プロジェクト: Karol-G/nnUNet
    def __init__(self,
                 plans_file,
                 fold,
                 local_rank,
                 output_folder=None,
                 dataset_directory=None,
                 batch_dice=True,
                 stage=None,
                 unpack_data=True,
                 deterministic=True,
                 distribute_batch_size=False,
                 fp16=False):
        super().__init__(plans_file, fold, output_folder, dataset_directory,
                         batch_dice, stage, unpack_data, deterministic, fp16)
        self.init_args = (plans_file, fold, local_rank, output_folder,
                          dataset_directory, batch_dice, stage, unpack_data,
                          deterministic, distribute_batch_size, fp16)
        self.distribute_batch_size = distribute_batch_size
        np.random.seed(local_rank)
        torch.manual_seed(local_rank)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(local_rank)
        self.local_rank = local_rank

        if torch.cuda.is_available():
            torch.cuda.set_device(local_rank)
        dist.init_process_group(backend='nccl', init_method='env://')

        self.loss = None
        self.ce_loss = RobustCrossEntropyLoss()

        self.global_batch_size = None  # we need to know this to properly steer oversample
コード例 #2
0
ファイル: dice_loss.py プロジェクト: nikh9l/NNU-Net
    def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1,
                 log_dice=False, ignore_label=None):
        """
        CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
        :param soft_dice_kwargs:
        :param ce_kwargs:
        :param aggregate:
        :param square_dice:
        :param weight_ce:
        :param weight_dice:
        """
        super(DC_and_CE_loss, self).__init__()
        if ignore_label is not None:
            assert not square_dice, 'not implemented'
            ce_kwargs['reduction'] = 'none'
        self.log_dice = log_dice
        self.weight_dice = weight_dice
        self.weight_ce = weight_ce
        self.aggregate = aggregate
        self.ce = RobustCrossEntropyLoss(**ce_kwargs)

        self.ignore_label = ignore_label

        if not square_dice:
            self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
        else:
            self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
コード例 #3
0
ファイル: ceLossTrainer.py プロジェクト: LeAlex27/nnUNet
 def __init__(self,
              plans_file,
              fold,
              output_folder=None,
              dataset_directory=None,
              batch_dice=True,
              stage=None,
              unpack_data=True,
              deterministic=True,
              fp16=False):
     super().__init__(plans_file, fold, output_folder, dataset_directory,
                      batch_dice, stage, unpack_data, deterministic, fp16)
     self.loss = RobustCrossEntropyLoss()
コード例 #4
0
ファイル: dice_loss.py プロジェクト: zijinY/SegLoss
 def __init__(self,
              soft_dice_kwargs,
              topk_kwargs,
              ce_kwargs,
              aggregate="sum",
              square_dice=False):
     super(DC_topk_ce_loss, self).__init__()
     self.aggregate = aggregate
     self.topk = TopKLoss(**topk_kwargs)
     self.ce = RobustCrossEntropyLoss(**ce_kwargs)
     if not square_dice:
         self.dc = SoftDiceLoss(apply_nonlin=softmax_helper,
                                **soft_dice_kwargs)
     else:
         self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper,
                                       **soft_dice_kwargs)
コード例 #5
0
ファイル: dice_loss.py プロジェクト: CamilaGL/nnUNet
    def __init__(self, ce_kwargs, ignore_label=None):
        """
        CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
        :param soft_dice_kwargs:
        :param ce_kwargs:
        :param aggregate:
        :param square_dice:
        :param weight_ce:
        :param weight_dice:
        """
        super(Only_CE_loss, self).__init__()
        if ignore_label is not None:
            ce_kwargs['reduction'] = 'none'
        self.ce = RobustCrossEntropyLoss(**ce_kwargs)

        self.ignore_label = ignore_label
コード例 #6
0
 def __init__(self,
              input_channels,
              base_num_features,
              num_classes,
              num_pool,
              num_conv_per_stage=2,
              feat_map_mul_on_downscale=2,
              conv_op=nn.Conv2d,
              norm_op=nn.BatchNorm2d,
              norm_op_kwargs=None,
              dropout_op=nn.Dropout2d,
              dropout_op_kwargs=None,
              nonlin=nn.LeakyReLU,
              nonlin_kwargs=None,
              deep_supervision=True,
              dropout_in_localization=False,
              weightInitializer=InitWeights_He(1e-2),
              pool_op_kernel_sizes=None,
              conv_kernel_sizes=None,
              upscale_logits=False,
              convolutional_pooling=False,
              convolutional_upsampling=False,
              max_num_features=None):
     """
     As opposed to the Generic_UNet, this class will compute parts of the loss function in the forward pass. This is
     useful for GPU parallelization. The batch DICE loss, if used, must be computed over the whole batch. Therefore, in a
     naive implementation, all softmax outputs must be copied to a single GPU which will then
     do the loss computation all by itself. In the context of 3D Segmentation, this results in a lot of overhead AND
     is inefficient because the DICE computation is also kinda expensive (Think 8 GPUs with a result of shape
     2x4x128x128x128 each.). The DICE is a global metric, but its parts can be computed locally (TP, FP, FN). Thus,
     this implementation will compute all the parts of the loss function in the forward pass (and thus in a
     parallelized way). The results are very small (batch_size x num_classes for TP, FN and FP, respectively; scalar for CE) and
     copied easily. Also the final steps of the loss function (computing batch dice and average CE values) are easy
     and very quick on the one GPU they need to run on. BAM.
     final_nonlin is lambda x:x here!
     """
     super(Generic_UNet_DP, self).__init__(
         input_channels, base_num_features, num_classes, num_pool,
         num_conv_per_stage, feat_map_mul_on_downscale, conv_op, norm_op,
         norm_op_kwargs, dropout_op, dropout_op_kwargs, nonlin,
         nonlin_kwargs, deep_supervision, dropout_in_localization,
         lambda x: x, weightInitializer, pool_op_kernel_sizes,
         conv_kernel_sizes, upscale_logits, convolutional_pooling,
         convolutional_upsampling, max_num_features)
     self.ce_loss = RobustCrossEntropyLoss()
コード例 #7
0
 def __init__(self,
              plans_file,
              fold,
              output_folder=None,
              dataset_directory=None,
              batch_dice=True,
              stage=None,
              unpack_data=True,
              deterministic=True,
              fp16=False):
     super(sauNetTrainer,
           self).__init__(plans_file, fold, output_folder,
                          dataset_directory, batch_dice, stage, unpack_data,
                          deterministic, fp16)
     self.loss = RobustCrossEntropyLoss()
     self.max_num_epochs = 350
     self.initial_lr = 1e-3
     self.num_classes = 1
コード例 #8
0
ファイル: dice_loss.py プロジェクト: zijinY/SegLoss
 def __init__(self, gdl_dice_kwargs, ce_kwargs, aggregate="sum"):
     super(GDL_and_CE_loss, self).__init__()
     self.aggregate = aggregate
     self.ce = RobustCrossEntropyLoss(**ce_kwargs)
     self.dc = GDL(softmax_helper, **gdl_dice_kwargs)