def setup(self):
        pkl_dir = self.config.split_dir
        with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
            splits = pickle.load(f)

        tr_keys = splits[self.config.fold]['train']
        val_keys = splits[self.config.fold]['val']
        test_keys = splits[self.config.fold]['test']

        self.device = torch.device(
            self.config.device if torch.cuda.is_available() else "cpu")

        self.train_data_loader = NumpyDataSet(
            self.config.data_dir,
            target_size=self.config.patch_size,
            batch_size=self.config.batch_size,
            keys=tr_keys)
        self.val_data_loader = NumpyDataSet(self.config.data_dir,
                                            target_size=self.config.patch_size,
                                            batch_size=self.config.batch_size,
                                            keys=val_keys,
                                            mode="val",
                                            do_reshuffle=False)
        self.test_data_loader = NumpyDataSet(
            self.config.data_test_dir,
            target_size=self.config.patch_size,
            batch_size=self.config.batch_size,
            keys=test_keys,
            mode="test",
            do_reshuffle=False)
        self.model = UNet(num_classes=self.config.num_classes,
                          in_channels=self.config.in_channels)

        self.model.to(self.device)

        # We use a combination of DICE-loss and CE-Loss in this example.
        # This proved good in the medical segmentation decathlon.
        self.dice_loss = SoftDiceLoss(
            batch_dice=True)  # Softmax für DICE Loss!
        self.ce_loss = torch.nn.CrossEntropyLoss(
        )  # Kein Softmax für CE Loss -> ist in torch schon mit drin!

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.config.learning_rate)
        self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')

        # If directory for checkpoint is provided, we load it.
        if self.config.do_load_checkpoint:
            if self.config.checkpoint_dir == '':
                print(
                    'checkpoint_dir is empty, please provide directory to load checkpoint.'
                )
            else:
                self.load_checkpoint(name=self.config.checkpoint_dir,
                                     save_types=("model"))

        self.save_checkpoint(name="checkpoint_start")
        self.elog.print('Experiment set up.')
Example #2
0
 def __init__(self,
              plans_file,
              fold,
              output_folder=None,
              dataset_directory=None,
              batch_dice=True,
              stage=None,
              unpack_data=True,
              deterministic=True,
              fp16=False):
     super().__init__(plans_file, fold, output_folder, dataset_directory,
                      batch_dice, stage, unpack_data, deterministic, fp16)
     self.apply_nonlin = softmax_helper
     self.loss = SoftDiceLoss(apply_nonlin=self.apply_nonlin,
                              batch_dice=self.batch_dice,
                              smooth=1e-5,
                              do_bg=False)
 def __init__(self,
              plans_file,
              fold,
              output_folder=None,
              dataset_directory=None,
              batch_dice=True,
              stage=None,
              unpack_data=True,
              deterministic=True,
              fp16=False):
     super().__init__(plans_file, fold, output_folder, dataset_directory,
                      batch_dice, stage, unpack_data, deterministic, fp16)
     self.loss = SoftDiceLoss(
         **{
             'apply_nonlin': softmax_helper,
             'batch_dice': self.batch_dice,
             'smooth': 1e-5,
             'do_bg': True
         })
Example #4
0
    def setup(self):

        pkl_dir = self.config.split_dir
        with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
            splits = pickle.load(f)

        tr_keys = splits[self.config.fold]['train']
        val_keys = splits[self.config.fold]['val']
        keys = tr_keys + val_keys
        test_keys = splits[self.config.fold]['test']

        self.device = torch.device(self.config.device if torch.cuda.is_available() else 'cpu')    #

        self.model = UNet(num_classes=self.config.num_classes, num_downs=3)

        self.model.to(self.device)

        self.data_loader = NumpyDataSet(self.config.data_dir, target_size=256, batch_size=self.config.batch_size,
                                        keys=keys, mode='test', do_reshuffle=False)

        self.data_16_loader = NumpyDataSet(self.config.scaled_image_32_dir, target_size=32, batch_size=self.config.batch_size,
                                        keys=keys, mode='test', do_reshuffle=False)

        # We use a combination of DICE-loss and CE-Loss in this example.
        # This proved good in the medical segmentation decathlon.
        self.dice_loss = SoftDiceLoss(batch_dice=True)  # Softmax für DICE Loss!

        # weight = torch.tensor([1, 30, 30]).float().to(self.device)
        self.ce_loss = torch.nn.CrossEntropyLoss()  # Kein Softmax für CE Loss -> ist in torch schon mit drin!
        # self.dice_pytorch = dice_pytorch(self.config.num_classes)

        self.optimizer = optim.Adam(self.model.parameters(), lr=self.config.learning_rate)
        # self.optimizer = optim.SGD(self.model.parameters(), lr=self.config.learning_rate)

        self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')

        # If directory for checkpoint is provided, we load it.
        if self.config.do_load_checkpoint:
            if self.config.checkpoint_dir == '':
                print('checkpoint_dir is empty, please provide directory to load checkpoint.')
            else:
                self.load_checkpoint(name=self.config.checkpoint_dir, save_types=("model"))
Example #5
0
    def setup(self):
        pkl_dir = self.config.split_dir
        with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
            splits = pickle.load(f)

        tr_keys = splits[self.config.fold]['train']
        val_keys = splits[self.config.fold]['val']
        test_keys = splits[self.config.fold]['test']

        self.device = torch.device(
            self.config.device if torch.cuda.is_available() else "cpu")

        self.train_data_loader = NumpyDataSet(
            self.config.data_dir,
            target_size=self.config.patch_size,
            batch_size=self.config.batch_size,
            keys=tr_keys)
        self.val_data_loader = NumpyDataSet(self.config.data_dir,
                                            target_size=self.config.patch_size,
                                            batch_size=self.config.batch_size,
                                            keys=val_keys,
                                            mode="val",
                                            do_reshuffle=False)
        self.test_data_loader = NumpyDataSet(
            self.config.data_test_dir,
            target_size=self.config.patch_size,
            batch_size=self.config.batch_size,
            keys=test_keys,
            mode="test",
            do_reshuffle=False)
        self.model = UNet(num_classes=self.config.num_classes,
                          in_channels=self.config.in_channels)

        self.model.to(self.device)

        # We use a combination of DICE-loss and CE-Loss in this example.
        # This proved good in the medical segmentation decathlon.
        self.dice_loss = SoftDiceLoss(
            batch_dice=True)  # Softmax for DICE Loss!
        self.ce_loss = torch.nn.CrossEntropyLoss(
        )  # No softmax for CE Loss -> is implemented in torch!

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.config.learning_rate)

        self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')

        # If directory for checkpoint is provided, we load it.
        if self.config.do_load_checkpoint:
            if self.config.checkpoint_dir == '':
                print('Checkpoint_dir is empty, training from scratch.')
            else:
                self.load_checkpoint(name=self.config.checkpoint_filename,
                                     save_types=("model"),
                                     path=self.config.checkpoint_dir)

            if self.config.fine_tune in ['expanding_all', 'expanding_plus1']:
                # freeze part of the network, fine-tune the other part
                unfreeze_block_parameters(
                    model=self.model, fine_tune_option=self.config.fine_tune)
                # else just train the whole network

        self.save_checkpoint(name="checkpoint_start")
        self.elog.print('Experiment set up.')
 def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
              unpack_data=True, deterministic=True, fp16=False):
     super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
                      deterministic, fp16)
     self.loss = SoftDiceLoss(apply_nonlin=torch.sigmoid, **{'batch_dice': False, 'do_bg': True, 'smooth': 0})