コード例 #1
0
 def run_training(self):
     self.maybe_update_lr(
         self.epoch
     )  # if we dont overwrite epoch then self.epoch+1 is used which is not what we
     # want at the start of the training
     ds = self.network.decoder.deep_supervision
     self.network.decoder.deep_supervision = True
     ret = nnUNetTrainer.run_training(self)
     self.network.decoder.deep_supervision = ds
     return ret
コード例 #2
0
    def run_training(self):
        self.maybe_update_lr(self.epoch)

        # amp must be initialized before DP

        ds = self.network.do_ds
        self.network.do_ds = True
        self.network = DataParallel(self.network, tuple(range(self.num_gpus)), )
        ret = nnUNetTrainer.run_training(self)
        self.network = self.network.module
        self.network.do_ds = ds
        return ret
コード例 #3
0
    def run_training(self):
        """
        if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first
        continued epoch with self.initial_lr

        we also need to make sure deep supervision in the network is enabled for training, thus the wrapper
        :return:
        """
        self.maybe_update_lr(self.epoch)  # if we dont overwrite epoch then self.epoch+1 is used which is not what we
        # want at the start of the training
        if isinstance(self.network, DDP):
            net = self.network.module
        else:
            net = self.network
        ds = net.do_ds
        net.do_ds = True
        ret = nnUNetTrainer.run_training(self)
        net.do_ds = ds
        return ret