def __init__(self, config, split, dataset):
        self.n_epochs = config.n_epochs
        self.split = split
        self._time_start = ""
        self._time_end = ""
        self.epoch = 0
        self.name = config.name
        self.patch_size = config.patch_size

        # Create output folders
        dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
        self.out_dir = os.path.join(config.test_results_dir, dirname)
        os.makedirs(self.out_dir, exist_ok=True)

        # Create data loaders
        # TASK: SlicesDataset class is not complete. Go to the file and complete it.
        # Note that we are using a 2D version of UNet here, which means that it will expect
        # batches of 2D slices.
        self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=0)
        self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=0)

        # we will access volumes directly for testing
        self.test_data = dataset[split["test"]]

        # Do we have CUDA available?
        if not torch.cuda.is_available():
            print(
                "WARNING: No CUDA device is found. This may take significantly longer!"
            )
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # Configure our model and other training implements
        # We will use a recursive UNet model from German Cancer Research Center,
        # Division of Medical Image Computing. It is quite complicated and works
        # very well on this task. Feel free to explore it or plug in your own model
        self.model = UNet(num_classes=3)
        self.model.to(self.device)

        # We are using a standard cross-entropy loss since the model output is essentially
        # a tensor with softmax'd prediction of each pixel's probability of belonging
        # to a certain class
        self.loss_function = torch.nn.CrossEntropyLoss()

        # We are using standard SGD method to optimize our weights
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=config.learning_rate)
        # Scheduler helps us update learning rate automatically
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, 'min')

        # Set up Tensorboard. By default it saves data into runs folder. You need to launch
        self.tensorboard_train_writer = SummaryWriter(comment="_train")
        self.tensorboard_val_writer = SummaryWriter(comment="_val")
    def setup(self):
        pkl_dir = self.config.split_dir
        with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
            splits = pickle.load(f)

        tr_keys = splits[self.config.fold]['train']
        val_keys = splits[self.config.fold]['val']
        test_keys = splits[self.config.fold]['test']

        self.device = torch.device(
            self.config.device if torch.cuda.is_available() else "cpu")

        self.train_data_loader = NumpyDataSet(
            self.config.data_dir,
            target_size=self.config.patch_size,
            batch_size=self.config.batch_size,
            keys=tr_keys)
        self.val_data_loader = NumpyDataSet(self.config.data_dir,
                                            target_size=self.config.patch_size,
                                            batch_size=self.config.batch_size,
                                            keys=val_keys,
                                            mode="val",
                                            do_reshuffle=False)
        self.test_data_loader = NumpyDataSet(
            self.config.data_test_dir,
            target_size=self.config.patch_size,
            batch_size=self.config.batch_size,
            keys=test_keys,
            mode="test",
            do_reshuffle=False)
        self.model = UNet(num_classes=self.config.num_classes,
                          in_channels=self.config.in_channels)

        self.model.to(self.device)

        # We use a combination of DICE-loss and CE-Loss in this example.
        # This proved good in the medical segmentation decathlon.
        self.dice_loss = SoftDiceLoss(
            batch_dice=True)  # Softmax für DICE Loss!
        self.ce_loss = torch.nn.CrossEntropyLoss(
        )  # Kein Softmax für CE Loss -> ist in torch schon mit drin!

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.config.learning_rate)
        self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')

        # If directory for checkpoint is provided, we load it.
        if self.config.do_load_checkpoint:
            if self.config.checkpoint_dir == '':
                print(
                    'checkpoint_dir is empty, please provide directory to load checkpoint.'
                )
            else:
                self.load_checkpoint(name=self.config.checkpoint_dir,
                                     save_types=("model"))

        self.save_checkpoint(name="checkpoint_start")
        self.elog.print('Experiment set up.')
Example #3
0
    def __init__(self, config, split, dataset):
        self.n_epochs = config.n_epochs
        self.split = split
        self._time_start = ""
        self._time_end = ""
        self.epoch = 0
        self.name = config.name

        # Create output folders
        dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
        self.out_dir = os.path.join(config.test_results_dir, dirname)
        os.makedirs(self.out_dir, exist_ok=True)

        # Create data loaders
        # Note that we are using a 2D version of UNet here, which means that it will expect
        # batches of 2D slices.
        self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=0)
        self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=0)

        # we will access volumes directly for testing
        self.test_data = dataset[split["test"]]

        # USE CUDA
        if not torch.cuda.is_available():
            print(
                "WARNING: No CUDA device is found. This may take significantly longer!"
            )
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # Configure our model and other training implements
        # We will use a recursive UNet model from German Cancer Research Center,
        # Division of Medical Image Computing.
        self.model = UNet(num_classes=3)
        self.model.to(self.device)

        self.loss_function = torch.nn.CrossEntropyLoss()
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=config.learning_rate)
        # Update learning rate automatically
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, 'min')

        # Set up Tensorboard. By default it saves data into runs folder.
        # To check the training results, run the tensorboard.
        # Like how we use the jupyter notebook remotely.
        self.tensorboard_train_writer = SummaryWriter(comment="_train")
        self.tensorboard_val_writer = SummaryWriter(comment="_val")
Example #4
0
    def segment_single_image(self, data):
        self.model = UNet(num_classes=self.config.num_classes,
                          in_channels=self.config.in_channels)
        self.device = torch.device(
            self.config.device if torch.cuda.is_available() else "cpu")

        # a model must be present and loaded in here
        if self.config.model_dir == '':
            print(
                'model_dir is empty, please provide directory to load checkpoint.'
            )
        else:
            self.load_checkpoint(name=self.config.model_dir,
                                 save_types=("model"))

        self.elog.print("=====SEGMENT_SINGLE_IMAGE=====")
        self.model.eval()
        self.model.to(self.device)

        # Desired shape = [b, c, w, h]
        # split into even chunks (lets use size)
        with torch.no_grad():

            ######
            # When working entirely on CPU and in memory, the following lines replace the split/concat method
            # mr_data = data.float().to(self.device)
            # pred = self.model(mr_data)
            # pred_argmax = torch.argmax(pred.data.cpu(), dim=1, keepdim=True)
            ######

            ######
            # for CUDA (also works on CPU) split into batches
            blocksize = self.config.batch_size

            # number_of_elements = round(data.shape[0]/blocksize+0.5)     # make blocks large enough to not lose any slices
            chunks = [
                data[i:i + blocksize, ::, ::, ::]
                for i in range(0, data.shape[0], blocksize)
            ]
            pred_list = []
            for data_batch in chunks:
                mr_data = data_batch.float().to(self.device)
                pred_dict = self.model(mr_data)
                pred_list.append(pred_dict.cpu())

            pred = torch.Tensor(np.concatenate(pred_list))
            pred_argmax = torch.argmax(pred, dim=1, keepdim=True)

        # detach result and put it back to cpu so that we can work with, create a numpy array
        result = pred_argmax.short().detach().cpu().numpy()

        return result
    def __init__(self, parameter_file_path='', model=None, device="cpu", patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device))

        self.model.to(self.device)
    def __init__(self, config, split, dataset):
        self.n_epochs = config.n_epochs
        self.split = split
        self._time_start = ""
        self._time_end = ""
        self.epoch = 0
        self.name = config.name

        # Create output folders
        dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
        self.out_dir = os.path.join(config.test_results_dir, dirname)
        os.makedirs(self.out_dir, exist_ok=True)

        # Create data loaders
        self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=0)
        self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=0)

        # we will access volumes directly for testing
        self.test_data = dataset[split["test"]]

        # Do we have CUDA available?
        if not torch.cuda.is_available():
            print(
                "WARNING: No CUDA device is found. This may take significantly longer!"
            )
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # Configure our model and other training implements
        self.model = UNet(num_classes=3)
        self.model.to(self.device)

        # Cross entropy loss
        self.loss_function = torch.nn.CrossEntropyLoss()

        # We are using standard SGD method to optimize our weights
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=config.learning_rate)
        # Scheduler helps us update learning rate automatically
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, 'min')

        # Set up Tensorboard. By default it saves data into runs folder.
        self.tensorboard_train_writer = SummaryWriter(comment="_train")
        self.tensorboard_val_writer = SummaryWriter(comment="_val")
Example #7
0
    def __init__(self, config, split, dataset):
        self.n_epochs = config.n_epochs
        self.split = split
        self._time_start = ""
        self._time_end = ""
        self.epoch = 0
        self.name = config.name

        # Create output folders
        dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
        self.out_dir = os.path.join(config.test_results_dir, dirname)
        os.makedirs(self.out_dir, exist_ok=True)

        # Create data loaders
        self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=0)
        self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=0)

        # access volumes directly for testing
        self.test_data = dataset[split["test"]]

        if not torch.cuda.is_available():
            print(
                "WARNING: No CUDA device is found. This may take significantly longer!"
            )
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # use a recursive UNet model from German Cancer Research Center, Division of Medical Image Computing
        self.model = UNet()
        self.model.to(self.device)

        # use a standard cross-entropy loss since the model output is essentially
        # a tensor with softmax prediction of each pixel's probability of belonging to a certain class
        self.loss_function = torch.nn.CrossEntropyLoss()

        # use standard SGD method to optimize the weights
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=config.learning_rate)

        # Scheduler helps to update learning rate automatically
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, 'min')
Example #8
0
    def setup(self):

        pkl_dir = self.config.split_dir
        with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
            splits = pickle.load(f)

        tr_keys = splits[self.config.fold]['train']
        val_keys = splits[self.config.fold]['val']
        keys = tr_keys + val_keys
        test_keys = splits[self.config.fold]['test']

        self.device = torch.device(self.config.device if torch.cuda.is_available() else 'cpu')    #

        self.model = UNet(num_classes=self.config.num_classes, num_downs=3)

        self.model.to(self.device)

        self.data_loader = NumpyDataSet(self.config.data_dir, target_size=256, batch_size=self.config.batch_size,
                                        keys=keys, mode='test', do_reshuffle=False)

        self.data_16_loader = NumpyDataSet(self.config.scaled_image_32_dir, target_size=32, batch_size=self.config.batch_size,
                                        keys=keys, mode='test', do_reshuffle=False)

        # We use a combination of DICE-loss and CE-Loss in this example.
        # This proved good in the medical segmentation decathlon.
        self.dice_loss = SoftDiceLoss(batch_dice=True)  # Softmax für DICE Loss!

        # weight = torch.tensor([1, 30, 30]).float().to(self.device)
        self.ce_loss = torch.nn.CrossEntropyLoss()  # Kein Softmax für CE Loss -> ist in torch schon mit drin!
        # self.dice_pytorch = dice_pytorch(self.config.num_classes)

        self.optimizer = optim.Adam(self.model.parameters(), lr=self.config.learning_rate)
        # self.optimizer = optim.SGD(self.model.parameters(), lr=self.config.learning_rate)

        self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')

        # If directory for checkpoint is provided, we load it.
        if self.config.do_load_checkpoint:
            if self.config.checkpoint_dir == '':
                print('checkpoint_dir is empty, please provide directory to load checkpoint.')
            else:
                self.load_checkpoint(name=self.config.checkpoint_dir, save_types=("model"))
Example #9
0
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size
        Arguments:
            volume {Numpy array} -- 3D array representing the volume
        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # volume is a numpy array of shape [X,Y,Z] and I will slice X axis
        slices = []

        # create mask for each slice across the X (0th) dimension.
        # put all slices into a 3D Numpy array

        for ix in range(0, volume.shape[0]):
            slice_tensor = torch.from_numpy(volume[ix, :, :].astype(
                np.single)).unsqueeze(0).unsqueeze(0)
            pred = self.model(slice_tensor.to(self.device))
            mask = torch.argmax(np.squeeze(pred.cpu().detach()), dim=0)
            slices.append(mask)
        return np.dstack(slices).transpose(2, 0, 1)

    def single_volume_inference_unpadded(self, volume, patch_size):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first
        Arguments:
            volume {Numpy array} -- 3D array representing the volume
        Returns:
            3D NumPy array with prediction mask
        """

        volume = med_reshape(volume, (volume.shape[0], patch_size, patch_size))

        return self.single_volume_inference(volume)
Example #10
0
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self, parameter_file_path='', model=None, device="cpu", patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        
        raise NotImplementedError

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # code that will create mask for each slice across the X (0th) dimension. After 
        # that, put all slices into a 3D Numpy array. You can verify if your method is 
        # correct by running it on one of the volumes 
        mask = np.zeros(volume.shape)
        for i in range(0, volume.shape[0]):
            ind_slice = torch.from_numpy(volume[i, : , : ].astype(np.single)).unsqueeze(0).unsqueeze(0)
            pred = self.model(ind_slice.to(self.device))
            mask[i,:, :] = torch.argmax(np.squeeze(pred.cpu().detach()), dim = 0)
         

        return mask 
Example #11
0
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self, parameter_file_path='', model=None, device="cpu", patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        
        raise NotImplementedError

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # TASK: Write code that will create mask for each slice across the X (0th) dimension. After 
        # that, put all slices into a 3D Numpy array. You can verify if your method is 
        # correct by running it on one of the volumes in your training set and comparing 
        # with the label in 3D Slicer.
        # <YOUR CODE HERE>
        
        outputs = self.model(torch.tensor(volume).type(torch.cuda.FloatTensor).unsqueeze(1).to(self.device)).cpu().detach()
        _, slices = torch.max(outputs, 1)
    
        return slices.numpy()
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device
        #print('testing init...')
        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)
        print('testing init end...')

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        #print('single_volume_inference_unpadded')
        raise NotImplementedError

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        print('single_volume_inference init...')
        self.model.eval()
        print('volume', volume.shape)
        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = np.zeros(volume.shape)
        for i in range(volume.shape[0]):
            tsr_test = torch.from_numpy(
                volume[i, :, :].astype(np.single) /
                np.max(volume[i, :, :])).unsqueeze(0).unsqueeze(0)
            #print('tsr_test',tsr_test.shape)
            pred = self.model(tsr_test)
            slices[i, :, :] = torch.argmax(np.squeeze(pred.cpu().detach()),
                                           dim=0)
        #print('slices.slices',(slices[0]))

        # TASK: Write code that will create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array. You can verify if your method is
        # correct by running it on one of the volumes in your training set and comparing
        # with the label in 3D Slicer.
        # <YOUR CODE HERE>

        return slices
Example #13
0
class UNetExperiment:
    """
    This class implements the basic life cycle for a segmentation task with UNet(https://arxiv.org/abs/1505.04597).
    The basic life cycle of a UNetExperiment is:

        run():
            for epoch in n_epochs:
                train()
                validate()
        test()
    """
    def __init__(self, config, split, dataset):
        self.n_epochs = config.n_epochs
        self.split = split
        self._time_start = ""
        self._time_end = ""
        self.epoch = 0
        self.name = config.name

        # Create output folders
        dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
        self.out_dir = os.path.join(config.test_results_dir, dirname)
        os.makedirs(self.out_dir, exist_ok=True)

        # Create data loaders
        # TASK: SlicesDataset class is not complete. Go to the file and complete it.
        # Note that we are using a 2D version of UNet here, which means that it will expect
        # batches of 2D slices.
        self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       num_workers=0)
        self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=0)

        # we will access volumes directly for testing
        self.test_data = dataset[split["test"]]

        # Do we have CUDA available?
        if not torch.cuda.is_available():
            print(
                "WARNING: No CUDA device is found. This may take significantly longer!"
            )
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        # Configure our model and other training implements
        # We will use a recursive UNet model from German Cancer Research Center,
        # Division of Medical Image Computing. It is quite complicated and works
        # very well on this task. Feel free to explore it or plug in your own model
        self.model = UNet(num_classes=3)
        self.model.to(self.device)

        # We are using a standard cross-entropy loss since the model output is essentially
        # a tensor with softmax'd prediction of each pixel's probability of belonging
        # to a certain class
        self.loss_function = torch.nn.CrossEntropyLoss()

        # We are using standard SGD method to optimize our weights
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=config.learning_rate)
        # Scheduler helps us update learning rate automatically
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, 'min')

        # Set up Tensorboard. By default it saves data into runs folder. You need to launch
        self.tensorboard_train_writer = SummaryWriter(comment="_train")
        self.tensorboard_val_writer = SummaryWriter(comment="_val")

    def train(self):
        """
        This method is executed once per epoch and takes 
        care of model weight update cycle
        """
        print(f"Training epoch {self.epoch}...")
        self.model.train()

        # Loop over our minibatches
        for i, batch in enumerate(self.train_loader):
            self.optimizer.zero_grad()

            # TASK: You have your data in batch variable. Put the slices as 4D Torch Tensors of
            # shape [BATCH_SIZE, 1, PATCH_SIZE, PATCH_SIZE] into variables data and target.
            # Feed data to the model and feed target to the loss function
            #
            # data = <YOUR CODE HERE>
            # target = <YOUR CODE HERE>
            data = batch["image"].to(self.device, dtype=torch.float)
            target = batch["seg"].to(self.device)

            prediction = self.model(data)

            # We are also getting softmax'd version of prediction to output a probability map
            # so that we can see how the model converges to the solution
            prediction_softmax = F.softmax(prediction, dim=1)

            loss = self.loss_function(prediction, target[:, 0, :, :])

            # TASK: What does each dimension of variable prediction represent?
            # ANSWER: Dimensions represent: batch_size, classes, coronal data, axial data

            loss.backward()
            self.optimizer.step()

            if (i % 10) == 0:
                # Output to console on every 10th batch
                print(
                    f"\nEpoch: {self.epoch} Train loss: {loss}, {100*(i+1)/len(self.train_loader):.1f}% complete"
                )

                counter = 100 * self.epoch + 100 * (i / len(self.train_loader))

                # You don't need to do anything with this function, but you are welcome to
                # check it out if you want to see how images are logged to Tensorboard
                # or if you want to output additional debug data
                log_to_tensorboard(self.tensorboard_train_writer, loss, data,
                                   target, prediction_softmax, prediction,
                                   counter)

            print(".", end='')

        print("\nTraining complete")

    def validate(self):
        """
        This method runs validation cycle, using same metrics as 
        Train method. Note that model needs to be switched to eval
        mode and no_grad needs to be called so that gradients do not 
        propagate
        """
        print(f"Validating epoch {self.epoch}...")

        # Turn off gradient accumulation by switching model to "eval" mode
        self.model.eval()
        loss_list = []

        with torch.no_grad():
            for i, batch in enumerate(self.val_loader):

                # TASK: Write validation code that will compute loss on a validation sample
                # <YOUR CODE HERE>

                data = batch["image"].to(self.device, dtype=torch.float)
                target = batch["seg"].to(self.device)

                prediction = self.model(data)

                prediction_softmax = F.softmax(prediction, dim=1)
                loss = self.loss_function(prediction, target[:, 0, :, :])

                print(f"Batch {i}. Data shape {data.shape} Loss {loss}")

                # We report loss that is accumulated across all of validation set
                loss_list.append(loss.item())

        self.scheduler.step(np.mean(loss_list))

        log_to_tensorboard(self.tensorboard_val_writer, np.mean(loss_list),
                           data, target, prediction_softmax, prediction,
                           (self.epoch + 1) * 100)
        print(f"Validation complete")

    def save_model_parameters(self):
        """
        Saves model parameters to a file in results directory
        """
        path = os.path.join(self.out_dir, "model.pth")

        torch.save(self.model.state_dict(), path)

    def load_model_parameters(self, path=''):
        """
        Loads model parameters from a supplied path or a
        results directory
        """
        if not path:
            model_path = os.path.join(self.out_dir, "model.pth")
        else:
            model_path = path

        if os.path.exists(model_path):
            self.model.load_state_dict(torch.load(model_path))
        else:
            raise Exception(f"Could not find path {model_path}")

    def run_test(self):
        """
        This runs test cycle on the test dataset.
        Note that process and evaluations are quite different
        Here we are computing a lot more metrics and returning
        a dictionary that could later be persisted as JSON
        """
        print("Testing...")
        self.model.eval()

        # In this method we will be computing metrics that are relevant to the task of 3D volume
        # segmentation. Therefore, unlike train and validation methods, we will do inferences
        # on full 3D volumes, much like we will be doing it when we deploy the model in the
        # clinical environment.

        # TASK: Inference Agent is not complete. Go and finish it. Feel free to test the class
        # in a module of your own by running it against one of the data samples
        inference_agent = UNetInferenceAgent(model=self.model,
                                             device=self.device)

        out_dict = {}
        out_dict["volume_stats"] = []
        dc_list = []
        jc_list = []

        # for every in test set
        for i, x in enumerate(self.test_data):
            pred_label = inference_agent.single_volume_inference(x["image"])

            # We compute and report Dice and Jaccard similarity coefficients which
            # assess how close our volumes are to each other

            # TASK: Dice3D and Jaccard3D functions are not implemented.
            #  Complete the implementation as we discussed
            # in one of the course lessons, you can look up definition of Jaccard index
            # on Wikipedia. If you completed it
            # correctly (and if you picked your train/val/test split right ;)),
            # your average Jaccard on your test set should be around 0.80

            dc = Dice3d(pred_label, x["seg"])
            jc = Jaccard3d(pred_label, x["seg"])
            dc_list.append(dc)
            jc_list.append(jc)

            # STAND-OUT SUGGESTION: By way of exercise, consider also outputting:
            # * Sensitivity and specificity (and explain semantic meaning in terms of
            #   under/over segmenting)
            # * Dice-per-slice and render combined slices with lowest and highest DpS
            # * Dice per class (anterior/posterior)

            out_dict["volume_stats"].append({
                "filename": x['filename'],
                "dice": dc,
                "jaccard": jc
            })
            print(
                f"{x['filename']} Dice {dc:.4f}. {100*(i+1)/len(self.test_data):.2f}% complete"
            )

        out_dict["overall"] = {
            "mean_dice": np.mean(dc_list),
            "mean_jaccard": np.mean(jc_list)
        }

        print("\nTesting complete.")
        return out_dict

    def run(self):
        """
        Kicks off train cycle and writes model parameter file at the end
        """
        self._time_start = time.time()

        print("Experiment started.")

        # Iterate over epochs
        for self.epoch in range(self.n_epochs):
            self.train()
            self.validate()

        # save model for inferencing
        self.save_model_parameters()

        self._time_end = time.time()
        print(
            f"Run complete. Total time: {time.strftime('%H:%M:%S', time.gmtime(self._time_end - self._time_start))}"
        )
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        volume = med_reshape(volume,
                             new_shape=(self.patch_size, self.patch_size,
                                        self.patch_size))
        return self.single_volume_inference(volume)

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # Create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array.
        for slice in volume:
            if (np.count_nonzero(slice) == 0):
                slices.append(np.zeros((self.patch_size, self.patch_size)))
                continue
            slice_input = torch.tensor(slice[None, None, :, :],
                                       dtype=torch.float).to(self.device)
            prediction = np.squeeze(self.model(slice_input).cpu().detach())
            mask = torch.argmax(prediction, dim=0).numpy()
            #             print(f"SVI slice count nonzero: {np.count_nonzero(mask)}" )
            slices.append(mask)

        return np.array(slices)
Example #15
0
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """

        reshaped = med_reshape(
            volume, (volume.shape[0], self.patch_size, self.patch_size))

        return self.single_volume_inference(reshaped)

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []
        for slc in volume:
            indata = torch.tensor(slc).unsqueeze(0).unsqueeze(0).type(
                torch.FloatTensor).to(self.device)
            rawp = self.model(indata)
            amax = torch.argmax(rawp, dim=1)
            pred = amax.squeeze().type(torch.LongTensor).cpu()
            slices.append(pred.numpy())

        return np.array(slices)
Example #16
0
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        slices = np.zeros(volume.shape)
        for i in range(volume.shape[0]):
            s = volume[i, :, :]
            s = s.astype(np.single)
            s = s / 255.0
            s = torch.from_numpy(s).unsqueeze(0).unsqueeze(0)

            pred = self.model(s.to(self.device))
            pred = np.squeeze(pred.cpu().detach())
            slices[i, :, :] = torch.argmax(pred, dim=0)

        return slices

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """

        patch_size = 64
        volume = med_reshape(volume,
                             new_shape=(volume.shape[0], patch_size,
                                        patch_size))
        return single_volume_inference(volume)
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self, parameter_file_path='', model=None, device="cpu", patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        
        raise NotImplementedError

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # TASK: Write code that will create mask for each slice across the X (0th) dimension. After 
        # that, put all slices into a 3D Numpy array. You can verify if your method is 
        # correct by running it on one of the volumes in your training set and comparing 
        # with the label in 3D Slicer.
        # <YOUR CODE HERE>
        slices.append(volume[0:1])
        arr = np.zeros(volume.shape, dtype=np.float32) 
        
        for idx, label in enumerate(slices):
            if label is not np.nan:
                label = label.split(" ")
                mask = np.zeros(volume.shape[1] * volume.shape[2], dtype=np.uint8)

                posit = map(int, label[0::2])
                leng = map(int, label[1::2])

                for p, l in zip(posit, leng):
                    mask[p:(p+l)] = 1
                arr[:, :, idx] = mask.reshape(volume.shape[1], volume.shape[1], order='F')

        slices = slices.asarray()
        slices.reshape((-1, slices.shape[0], slices.shape[1]))
        return slices
Example #18
0
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """

        raise NotImplementedError

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # TASK: Write code that will create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array. You can verify if your method is
        # correct by running it on one of the volumes in your training set and comparing
        # with the label in 3D Slicer.
        vol_tensor = torch.from_numpy(volume).type(torch.FloatTensor).to(
            self.device)
        vol_tensor = vol_tensor.unsqueeze(1)
        # v now has shape (num_sagittal_slices,1,patch_size,patch_size)
        # 0th index is being used as batch index. so the entire volume is being treated as
        # one big batch of slices
        with torch.no_grad():
            prediction = F.softmax(self.model(vol_tensor), dim=1).cpu().numpy()
        # prediction has shape (num_sagittal_slices,3,patch_size,patch_size)

        return prediction.argmax(axis=1)
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self, parameter_file_path='', model=None, device="cpu", patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device))

        self.model.to(self.device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        
        raise NotImplementedError

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # TASK: Write code that will create mask for each slice across the X (0th) dimension. After 
        # that, put all slices into a 3D Numpy array. You can verify if your method is 
        # correct by running it on one of the volumes in your training set and comparing 
        # with the label in 3D Slicer.
        
        img_ = med_reshape(volume, new_shape=(volume.shape[0], self.patch_size, self.patch_size))
        for i in range(img_.shape[0]):
            image = torch.from_numpy(img_[i] / np.max(img_[i])).unsqueeze(0).unsqueeze(0)
            output = self.model(image.to(self.device, dtype=torch.float))
            output = np.squeeze(output.cpu().detach())
 
            slices.append(torch.argmax(output, dim=0).numpy())

        return np.asarray(slices)
Example #20
0
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """

        x, y, z = volume.shape
        new_shape = (x, self.patch_size, self.patch_size)
        new_volume = med_reshape(volume, new_shape)
        slices = np.zeros(new_volume.shape)
        for slice_idx in range(new_volume.shape[0]):
            slc = new_volume[slice_idx, :, :]
            slice = torch.from_numpy(slc.astype(np.single) /
                                     np.max(slc)).unsqueeze(0).unsqueeze(0).to(
                                         self.device)
            prediction = self.model(slice)
            prediction = np.squeeze(prediction).cpu().detach()
            slices[slice_idx, :, :] = torch.argmax(prediction, dim=0)
        return slices

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        # slices = []

        # Create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array.

        slices = np.zeros(volume.shape)
        for slice_index in range(volume.shape[0]):
            slc = volume[slice_index, :, :]
            slc = slc.astype(np.single) / np.max(slc)
            slice = torch.from_numpy(slc).unsqueeze(0).unsqueeze(0).to(
                self.device)
            prediction = self.model(slice)
            prediction = np.squeeze(prediction.cpu().detach())
            slices[slice_index, :, :] = torch.argmax(prediction, dim=0)

        return slices
Example #21
0
class UNetExperiment(PytorchExperiment):
    """
    The UnetExperiment is inherited from the PytorchExperiment. It implements the basic life cycle for a segmentation task with UNet(https://arxiv.org/abs/1505.04597).
    It is optimized to work with the provided NumpyDataLoader.

    The basic life cycle of a UnetExperiment is the same s PytorchExperiment:

        setup()
        (--> Automatically restore values if a previous checkpoint is given)
        prepare()

        for epoch in n_epochs:
            train()
            validate()
            (--> save current checkpoint)

        end()
    """
    def setup(self):
        pkl_dir = self.config.split_dir
        with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
            splits = pickle.load(f)

        tr_keys = splits[self.config.fold]['train']
        val_keys = splits[self.config.fold]['val']
        test_keys = splits[self.config.fold]['test']

        self.device = torch.device(
            self.config.device if torch.cuda.is_available() else "cpu")

        self.train_data_loader = NumpyDataSet(
            self.config.data_dir,
            target_size=self.config.patch_size,
            batch_size=self.config.batch_size,
            keys=tr_keys)
        self.val_data_loader = NumpyDataSet(self.config.data_dir,
                                            target_size=self.config.patch_size,
                                            batch_size=self.config.batch_size,
                                            keys=val_keys,
                                            mode="val",
                                            do_reshuffle=False)
        self.test_data_loader = NumpyDataSet(
            self.config.data_test_dir,
            target_size=self.config.patch_size,
            batch_size=self.config.batch_size,
            keys=test_keys,
            mode="test",
            do_reshuffle=False)
        self.model = UNet(num_classes=self.config.num_classes,
                          in_channels=self.config.in_channels)

        self.model.to(self.device)

        # We use a combination of DICE-loss and CE-Loss in this example.
        # This proved good in the medical segmentation decathlon.
        self.dice_loss = SoftDiceLoss(
            batch_dice=True)  # Softmax for DICE Loss!
        self.ce_loss = torch.nn.CrossEntropyLoss(
        )  # No softmax for CE Loss -> is implemented in torch!

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.config.learning_rate)
        self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')

        # If directory for checkpoint is provided, we load it.
        if self.config.do_load_checkpoint:
            if self.config.checkpoint_dir == '':
                print(
                    'checkpoint_dir is empty, please provide directory to load checkpoint.'
                )
            else:
                self.load_checkpoint(name=self.config.checkpoint_dir,
                                     save_types=("model"))

        self.save_checkpoint(name="checkpoint_start")
        self.elog.print('Experiment set up.')

    def train(self, epoch):
        self.elog.print('=====TRAIN=====')
        self.model.train()

        data = None
        batch_counter = 0
        for data_batch in self.train_data_loader:

            self.optimizer.zero_grad()

            # Shape of data_batch = [1, b, c, w, h]
            # Desired shape = [b, c, w, h]
            # Move data and target to the GPU
            data = data_batch['data'][0].float().to(self.device)
            target = data_batch['seg'][0].long().to(self.device)

            pred = self.model(data)
            pred_softmax = F.softmax(
                pred, dim=1
            )  # We calculate a softmax, because our SoftDiceLoss expects that as an input. The CE-Loss does the softmax internally.

            #loss = self.dice_loss(pred_softmax, target.squeeze()) + self.ce_loss(pred, target.squeeze())
            loss = self.ce_loss(pred, target.squeeze())

            loss.backward()
            self.optimizer.step()

            # Some logging and plotting
            if (batch_counter % self.config.plot_freq) == 0:
                self.elog.print('Epoch: {0} Loss: {1:.4f}'.format(
                    self._epoch_idx, loss))

                self.add_result(
                    value=loss.item(),
                    name='Train_Loss',
                    tag='Loss',
                    counter=epoch +
                    (batch_counter /
                     self.train_data_loader.data_loader.num_batches))

                self.clog.show_image_grid(data.float().cpu(),
                                          name="data",
                                          normalize=True,
                                          scale_each=True,
                                          n_iter=epoch)
                self.clog.show_image_grid(target.float().cpu(),
                                          name="mask",
                                          title="Mask",
                                          n_iter=epoch)
                self.clog.show_image_grid(torch.argmax(pred.cpu(),
                                                       dim=1,
                                                       keepdim=True),
                                          name="unt_argmax",
                                          title="Unet",
                                          n_iter=epoch)
                self.clog.show_image_grid(pred.cpu()[:, 1:2, ],
                                          name="unt",
                                          normalize=True,
                                          scale_each=True,
                                          n_iter=epoch)

            batch_counter += 1

        assert data is not None, 'data is None. Please check if your dataloader works properly'

    def validate(self, epoch):
        self.elog.print('VALIDATE')
        self.model.eval()

        data = None
        loss_list = []

        with torch.no_grad():
            for data_batch in self.val_data_loader:
                data = data_batch['data'][0].float().to(self.device)
                target = data_batch['seg'][0].long().to(self.device)

                pred = self.model(data)
                pred_softmax = F.softmax(
                    pred, dim=1
                )  # We calculate a softmax, because our SoftDiceLoss expects that as an input. The CE-Loss does the softmax internally.

                #loss = self.dice_loss(pred_softmax, target.squeeze()) + self.ce_loss(pred, target.squeeze())
                loss = self.ce_loss(pred, target.squeeze())
                loss_list.append(loss.item())

        assert data is not None, 'data is None. Please check if your dataloader works properly'
        self.scheduler.step(np.mean(loss_list))

        self.elog.print('Epoch: %d Loss: %.4f' %
                        (self._epoch_idx, np.mean(loss_list)))

        self.add_result(value=np.mean(loss_list),
                        name='Val_Loss',
                        tag='Loss',
                        counter=epoch + 1)

        self.clog.show_image_grid(data.float().cpu(),
                                  name="data_val",
                                  normalize=True,
                                  scale_each=True,
                                  n_iter=epoch)
        self.clog.show_image_grid(target.float().cpu(),
                                  name="mask_val",
                                  title="Mask",
                                  n_iter=epoch)
        self.clog.show_image_grid(torch.argmax(pred.data.cpu(),
                                               dim=1,
                                               keepdim=True),
                                  name="unt_argmax_val",
                                  title="Unet",
                                  n_iter=epoch)
        self.clog.show_image_grid(pred.data.cpu()[:, 1:2, ],
                                  name="unt_val",
                                  normalize=True,
                                  scale_each=True,
                                  n_iter=epoch)

    def test(self):
        from evaluation.evaluator import aggregate_scores, Evaluator
        from collections import defaultdict

        self.elog.print('=====TEST=====')
        self.model.eval()

        pred_dict = defaultdict(list)
        gt_dict = defaultdict(list)

        batch_counter = 0
        with torch.no_grad():
            for data_batch in self.test_data_loader:
                print('testing...', batch_counter)
                batch_counter += 1

                # Get data_batches
                mr_data = data_batch['data'][0].float().to(self.device)
                mr_target = data_batch['seg'][0].float().to(self.device)

                pred = self.model(mr_data)
                pred_argmax = torch.argmax(pred.data.cpu(),
                                           dim=1,
                                           keepdim=True)

                fnames = data_batch['fnames']
                for i, fname in enumerate(fnames):
                    pred_dict[fname[0]].append(
                        pred_argmax[i].detach().cpu().numpy())
                    gt_dict[fname[0]].append(
                        mr_target[i].detach().cpu().numpy())

        test_ref_list = []
        for key in pred_dict.keys():
            test_ref_list.append(
                (np.stack(pred_dict[key]), np.stack(gt_dict[key])))

        scores = aggregate_scores(test_ref_list,
                                  evaluator=Evaluator,
                                  json_author=self.config.author,
                                  json_task=self.config.name,
                                  json_name=self.config.name,
                                  json_output_file=self.elog.work_dir +
                                  "/{}_".format(self.config.author) +
                                  self.config.name + '.json')

        print("Scores:\n", scores)

    def segment_single_image(self, data):
        self.model = UNet(num_classes=self.config.num_classes,
                          in_channels=self.config.in_channels)
        self.device = torch.device(
            self.config.device if torch.cuda.is_available() else "cpu")

        # a model must be present and loaded in here
        if self.config.model_dir == '':
            print(
                'model_dir is empty, please provide directory to load checkpoint.'
            )
        else:
            self.load_checkpoint(name=self.config.model_dir,
                                 save_types=("model"))

        self.elog.print("=====SEGMENT_SINGLE_IMAGE=====")
        self.model.eval()
        self.model.to(self.device)

        # Desired shape = [b, c, w, h]
        # split into even chunks (lets use size)
        with torch.no_grad():

            ######
            # When working entirely on CPU and in memory, the following lines replace the split/concat method
            # mr_data = data.float().to(self.device)
            # pred = self.model(mr_data)
            # pred_argmax = torch.argmax(pred.data.cpu(), dim=1, keepdim=True)
            ######

            ######
            # for CUDA (also works on CPU) split into batches
            blocksize = self.config.batch_size

            # number_of_elements = round(data.shape[0]/blocksize+0.5)     # make blocks large enough to not lose any slices
            chunks = [
                data[i:i + blocksize, ::, ::, ::]
                for i in range(0, data.shape[0], blocksize)
            ]
            pred_list = []
            for data_batch in chunks:
                mr_data = data_batch.float().to(self.device)
                pred_dict = self.model(mr_data)
                pred_list.append(pred_dict.cpu())

            pred = torch.Tensor(np.concatenate(pred_list))
            pred_argmax = torch.argmax(pred, dim=1, keepdim=True)

        # detach result and put it back to cpu so that we can work with, create a numpy array
        result = pred_argmax.short().detach().cpu().numpy()

        return result
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """

        # Set model to eval no gradients
        self.model.eval()
        # Initialise slices with zeros
        slices = np.zeros(volume.shape)
        # Reshape volume to conform to required model patch size
        volume = med_reshape(volume,
                             new_shape=(volume.shape[0], self.patch_size,
                                        self.patch_size))

        # For each x slice in the volume
        for x_index in range(volume.shape[0]):
            # Get the x slice
            x_slice = volume[x_index, :, :].astype(np.single)
            # Convert to tensor
            tensor_x_slice = torch.from_numpy(x_slice).unsqueeze(0).unsqueeze(
                0).to(self.device)
            # Pass slice through model to get predictions
            predictions = self.model(tensor_x_slice)
            # Resize predictions
            pred_resized = np.squeeze(predictions.cpu().detach())
            # Append predictions
            slices[x_index, :, :] = torch.argmax(pred_resized, dim=0)

        # Return volume of predictions
        return slices

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # Create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array. We can verify if the method is
        # correct by running it on one of the volumes in your training set and comparing
        # with the label in 3D Slicer.

        # Set model to eval no gradients
        self.model.eval()
        # Initialise slices with zeros
        slices = np.zeros(volume.shape)

        # For each x slice in the volume
        for x_index in range(volume.shape[0]):
            # Get the x slice
            x_slice = volume[x_index, :, :].astype(np.single)
            # Convert to tensor
            tensor_x_slice = torch.from_numpy(x_slice).unsqueeze(0).unsqueeze(
                0).to(self.device)
            # Pass slice through model to get predictions
            predictions = self.model(tensor_x_slice)
            # Resize predictions
            pred_resized = np.squeeze(predictions.cpu().detach())
            # Append predictions
            slices[x_index, :, :] = torch.argmax(pred_resized, dim=0)

        # Return volume of predictions
        return slices
Example #23
0
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self, parameter_file_path='', model=None, device="cpu", patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        # raise NotImplementedError
        volume = med_reshape(volume, new_shape=(volume.shape[0], self.patch_size, self.patch_size))
        return self.single_volume_inference(volume)

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        # TASK: Write code that will create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array. You can verify if your method is
        # correct by running it on one of the volumes in your training set and comparing
        # with the label in 3D Slicer.
        x = volume.shape[0]
        mask = np.zeros(volume.shape)
        for i in range(x):
            _slice = volume[i, :, :]
            if _slice.min() != _slice.max():
                _slice = (_slice - _slice.min()) / (_slice.max() - _slice.min())  # change the range of _slice to [0, 1]
            _slice_torch = torch.from_numpy(_slice).type(torch.float).unsqueeze(0).unsqueeze(0).to(self.device)
            pred = self.model(_slice_torch)
            pred = np.squeeze(pred.cpu().detach())
            mask[i, :, :] = torch.argmax(pred, dim=0)
        return mask
Example #24
0
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """

        # normalize the data volume
        image = (volume.astype(np.single) - np.min(volume)) / (np.max(volume) -
                                                               np.min(volume))
        # reshape the image volume to the same patch size used for training
        img_reshaped = med_reshape(image,
                                   new_shape=(self.patch_size, self.patch_size,
                                              image.shape[2]))
        # create a new 3d mask to store predicted results
        mask3d = np.zeros(img_reshaped.shape)
        # iterate over the image array and predict the all the slices
        for slc_idx in range(img_reshaped.shape[2]):
            # compute for each slice
            slc = torch.from_numpy(img_reshaped[:, :, slc_idx].astype(
                np.single)).unsqueeze(0).unsqueeze(0)
            # make prediction
            pred = self.model(slc.to(self.device))
            pred = np.squeeze(pred.cpu().detach())
            # store predicted data
            mask3d[:, :, slc_idx] = torch.argmax(pred, dim=0)
        # return the predicted volume
        return mask3d

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # Write code that will create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array. You can verify if your method is
        # correct by running it on one of the volumes in your training set and comparing
        # with the label in 3D Slicer.

        # normalize
        image = (volume.astype(np.single) - np.min(volume)) / (np.max(volume) -
                                                               np.min(volume))

        new_image = med_reshape(image,
                                new_shape=(self.patch_size, self.patch_size,
                                           image.shape[2]))
        mask3d = np.zeros(new_image.shape)

        for slc_ix in range(new_image.shape[2]):
            tsr_test = torch.from_numpy(new_image[:, :, slc_ix].astype(
                np.single)).unsqueeze(0).unsqueeze(0)
            #image = torch.from_numpy(self.data[slc[0]]["image"][:,:,slc[1]]).unsqueeze(0)
            #tsr_test = torch.from_numpy(slc.astype(np.single)).unsqueeze(0).unsqueeze(0)
            pred = self.model(tsr_test.to(self.device))
            pred = np.squeeze(pred.cpu().detach())
            mask3d[:, :, slc_ix] = torch.argmax(pred, dim=0)

        return mask3d
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """

        volume = med_reshape(volume, (volume.shape[0], patch_size, patch_size))

        raise self.single_volume_inference(volume)

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # TASK: Write code that will create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array. You can verify if your method is
        # correct by running it on one of the volumes in your training set and comparing
        # with the label in 3D Slicer.
        # <YOUR CODE HERE>

        for i in range(0, volume.shape[0]):
            slc_tensor = torch.from_numpy(volume[i, :, :].astype(
                np.single)).unsqueeze(0).unsqueeze(0)
            pred = self.model(slc_tensor.to(self.device))
            mask = torch.argmax(np.squeeze(pred.cpu().detach()), dim=0)
            slices.append(mask)

        return np.dstack(slices).transpose(2, 0, 1)
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        volume_padded = med_reshape(volume,
                                    new_shape=(self.patch_size,
                                               self.patch_size,
                                               self.patch_size))
        prediction_mask = self.single_volume_inference(volume_padded)
        return prediction_mask

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # TASK: Write code that will create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array. You can verify if your method is
        # correct by running it on one of the volumes in your training set and comparing
        # with the label in 3D Slicer.
        # <YOUR CODE HERE>
        for i in range(volume.shape[0]):
            test_slice = torch.from_numpy(volume[i, :, :].astype(np.single) /
                                          np.max(volume[i, :, :]))
            test_slice = test_slice.unsqueeze(0).unsqueeze(0).to(self.device)
            pred = self.model(test_slice)
            #             print(pred.shape)  # [1, 3, 64, 64]

            cpu_pred = pred.cpu()
            # class 0 is no hippocampus and 1,2 are the two segments of hippocampi
            result = cpu_pred.detach().numpy()[
                0]  # does .data instead of .detach() work ?
            result = np.argmax(
                result, axis=0
            )  # as fast as torch.argmax and doesn't return tensor but np array

            slices.append(result)

        return np.stack(slices)
Example #27
0
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

#     def single_volume_inference_unpadded(self, volume):
#         """
#         Runs inference on a single volume of arbitrary patch size,
#         padding it to the conformant size first

#         Arguments:
#             volume {Numpy array} -- 3D array representing the volume

#         Returns:
#             3D NumPy array with prediction mask
#         """

#         self.model.eval()

#         # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
#         slices = []
#         volume = med_reshape(volume, new_shape=(volume.shape[0], 64, 64))
#         slices = np.zeros(volume.shape)

#         def inference(img):
#             tsr_test = torch.from_numpy(img.astype(np.single)/np.max(img)).unsqueeze(0).unsqueeze(0)
#             print(tsr_test.shape)
#             pred = self.model(tsr_test.to(self.device))
#             return np.squeeze(pred.cpu().detach())

#         for slc_ix in range(volume.shape[0]):
#             pred = inference(volume[slc_ix,:,:])
#             slices[slc_ix,:,:] = torch.argmax(pred, dim=0)

#         return slices

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        new_volume = np.moveaxis(volume, -1, 0)
        print("new volume", new_volume.shape)
        reshaped_volume = med_reshape(new_volume,
                                      new_shape=(new_volume.shape[0],
                                                 self.patch_size,
                                                 self.patch_size))
        pred = self.single_volume_inference(reshaped_volume.astype(np.float32))
        reshaped_pred = np.zeros_like(new_volume)
        print("hey", reshaped_volume.shape, pred.shape, reshaped_pred.shape)
        resize_dim = [reshaped_pred.shape[2], reshaped_pred.shape[1]]
        print("lala", resize_dim)
        for i in range(reshaped_pred.shape[0]):
            reshaped_pred[i] = Image.fromarray(pred[i].astype(
                np.uint8)).resize(resize_dim)

        return np.moveaxis(reshaped_pred, 0, -1)

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # TASK: Write code that will create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array. You can verify if your method is
        # correct by running it on one of the volumes in your training set and comparing
        # with the label in 3D Slicer.
        # <YOUR CODE HERE>

        # slicing the x axis
        #test_tensor = torch.tensor(volume).unsqueeze(1)
        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        #         outputs = self.model(test_tensor.float().to(self.device)).cpu().detach()
        #         _, slices = torch.max(outputs, 1)
        #         slices = slices.numpy
        input_tensor = torch.tensor(volume).unsqueeze(1)
        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        outputs = self.model(input_tensor.float().to(
            self.device)).cpu().detach()
        _, slices = torch.max(outputs, 1)

        return slices.numpy()
class UNetExperiment(PytorchExperiment):
    """
    The UnetExperiment is inherited from the PytorchExperiment. It implements the basic life cycle for a segmentation task with UNet(https://arxiv.org/abs/1505.04597).
    It is optimized to work with the provided NumpyDataLoader.

    The basic life cycle of a UnetExperiment is the same s PytorchExperiment:

        setup()
        (--> Automatically restore values if a previous checkpoint is given)
        prepare()

        for epoch in n_epochs:
            train()
            validate()
            (--> save current checkpoint)

        end()
    """
    def setup(self):
        pkl_dir = self.config.split_dir
        with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
            splits = pickle.load(f)

        tr_keys = splits[self.config.fold]['train']
        val_keys = splits[self.config.fold]['val']
        test_keys = splits[self.config.fold]['test']

        self.device = torch.device(
            self.config.device if torch.cuda.is_available() else "cpu")

        self.train_data_loader = NumpyDataSet(
            self.config.data_dir,
            target_size=self.config.patch_size,
            batch_size=self.config.batch_size,
            keys=tr_keys)
        self.val_data_loader = NumpyDataSet(self.config.data_dir,
                                            target_size=self.config.patch_size,
                                            batch_size=self.config.batch_size,
                                            keys=val_keys,
                                            mode="val",
                                            do_reshuffle=False)
        self.test_data_loader = NumpyDataSet(
            self.config.data_test_dir,
            target_size=self.config.patch_size,
            batch_size=self.config.batch_size,
            keys=test_keys,
            mode="test",
            do_reshuffle=False)
        self.model = UNet(num_classes=self.config.num_classes,
                          in_channels=self.config.in_channels)

        self.model.to(self.device)

        # We use a combination of DICE-loss and CE-Loss in this example.
        # This proved good in the medical segmentation decathlon.
        self.dice_loss = SoftDiceLoss(
            batch_dice=True)  # Softmax für DICE Loss!
        self.ce_loss = torch.nn.CrossEntropyLoss(
        )  # Kein Softmax für CE Loss -> ist in torch schon mit drin!

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.config.learning_rate)
        self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')

        # If directory for checkpoint is provided, we load it.
        if self.config.do_load_checkpoint:
            if self.config.checkpoint_dir == '':
                print(
                    'checkpoint_dir is empty, please provide directory to load checkpoint.'
                )
            else:
                self.load_checkpoint(name=self.config.checkpoint_dir,
                                     save_types=("model"))

        self.save_checkpoint(name="checkpoint_start")
        self.elog.print('Experiment set up.')

    def train(self, epoch):
        self.elog.print('=====TRAIN=====')
        self.model.train()

        data = None
        batch_counter = 0
        for data_batch in self.train_data_loader:

            self.optimizer.zero_grad()

            # Shape of data_batch = [1, b, c, w, h]
            # Desired shape = [b, c, w, h]
            # Move data and target to the GPU
            data = data_batch['data'][0].float().to(self.device)
            target = data_batch['seg'][0].long().to(self.device)

            pred = self.model(data)
            pred_softmax = F.softmax(
                pred, dim=1
            )  # We calculate a softmax, because our SoftDiceLoss expects that as an input. The CE-Loss does the softmax internally.

            loss = self.dice_loss(pred_softmax,
                                  target.squeeze()) + self.ce_loss(
                                      pred, target.squeeze())
            # loss = self.ce_loss(pred, target.squeeze())
            loss.backward()
            self.optimizer.step()

            # Some logging and plotting
            if (batch_counter % self.config.plot_freq) == 0:
                self.elog.print('Epoch: %d Loss: %.4f' %
                                (self._epoch_idx, loss))

                self.add_result(
                    value=loss.item(),
                    name='Train_Loss',
                    tag='Loss',
                    counter=epoch +
                    (batch_counter /
                     self.train_data_loader.data_loader.num_batches))

                self.clog.show_image_grid(data.float(),
                                          name="data",
                                          normalize=True,
                                          scale_each=True,
                                          n_iter=epoch)
                self.clog.show_image_grid(target.float(),
                                          name="mask",
                                          title="Mask",
                                          n_iter=epoch)
                self.clog.show_image_grid(torch.argmax(pred.cpu(),
                                                       dim=1,
                                                       keepdim=True),
                                          name="unt_argmax",
                                          title="Unet",
                                          n_iter=epoch)
                self.clog.show_image_grid(pred.cpu()[:, 1:2, ],
                                          name="unt",
                                          normalize=True,
                                          scale_each=True,
                                          n_iter=epoch)

            batch_counter += 1

        assert data is not None, 'data is None. Please check if your dataloader works properly'

    def validate(self, epoch):
        self.elog.print('VALIDATE')
        self.model.eval()

        data = None
        loss_list = []

        with torch.no_grad():
            for data_batch in self.val_data_loader:
                data = data_batch['data'][0].float().to(self.device)
                target = data_batch['seg'][0].long().to(self.device)

                pred = self.model(data)
                pred_softmax = F.softmax(
                    pred
                )  # We calculate a softmax, because our SoftDiceLoss expects that as an input. The CE-Loss does the softmax internally.

                loss = self.dice_loss(pred_softmax,
                                      target.squeeze()) + self.ce_loss(
                                          pred, target.squeeze())
                loss_list.append(loss.item())

        assert data is not None, 'data is None. Please check if your dataloader works properly'
        self.scheduler.step(np.mean(loss_list))

        self.elog.print('Epoch: %d Loss: %.4f' %
                        (self._epoch_idx, np.mean(loss_list)))

        self.add_result(value=np.mean(loss_list),
                        name='Val_Loss',
                        tag='Loss',
                        counter=epoch + 1)

        self.clog.show_image_grid(data.float(),
                                  name="data_val",
                                  normalize=True,
                                  scale_each=True,
                                  n_iter=epoch)
        self.clog.show_image_grid(target.float(),
                                  name="mask_val",
                                  title="Mask",
                                  n_iter=epoch)
        self.clog.show_image_grid(torch.argmax(pred.data.cpu(),
                                               dim=1,
                                               keepdim=True),
                                  name="unt_argmax_val",
                                  title="Unet",
                                  n_iter=epoch)
        self.clog.show_image_grid(pred.data.cpu()[:, 1:2, ],
                                  name="unt_val",
                                  normalize=True,
                                  scale_each=True,
                                  n_iter=epoch)

    def test(self):
        # TODO
        print('TODO: Implement your test() method here')
Example #29
0
class UNetExperiment:
    """
    This class implements the basic life cycle for a segmentation task with UNet(https://arxiv.org/abs/1505.04597).
    The basic life cycle of a UNetExperiment is:
        run():
            for epoch in n_epochs:
                train()
                validate()
        test()
    """
    def __init__(self, config, split, dataset):
        self.n_epochs = config.n_epochs
        self.split = split
        self._time_start = ""
        self._time_end = ""
        self.epoch = 0
        self.name = config.name

        # Create output folders
        dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
        self.out_dir = os.path.join(config.test_results_dir, dirname)
        os.makedirs(self.out_dir, exist_ok=True)

        # Create data loaders
        self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
                batch_size=config.batch_size, shuffle=True, num_workers=0)
        self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
                batch_size=config.batch_size, shuffle=True, num_workers=0)

        # access volumes directly for testing
        self.test_data = dataset[split["test"]]

        if not torch.cuda.is_available():
            print("WARNING: No CUDA device is found. This may take significantly longer!")
        #self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.device = torch.device('cpu')

        # use a recursive UNet model from German Cancer Research Center, Division of Medical Image Computing
        self.model = UNet()
        self.model.to(self.device)

        # use a standard cross-entropy loss since the model output is essentially
        # a tensor with softmax prediction of each pixel's probability of belonging to a certain class
        self.loss_function = torch.nn.CrossEntropyLoss()

        # use standard SGD method to optimize the weights
        self.optimizer = optim.Adam(self.model.parameters(), lr=config.learning_rate)
        
        # Scheduler helps to update learning rate automatically
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min')

        # Set up Tensorboard. By default it saves data into runs folder. You need to launch
#         self.tensorboard_train_writer = SummaryWriter(comment="_train")
#         self.tensorboard_val_writer = SummaryWriter(comment="_val")

    def train(self):
        """
        This method is executed once per epoch and takes 
        care of model weight update cycle
        """
        print(f"Training epoch {self.epoch}...")
        self.model.train()

        # Loop over the minibatches
        for i, batch in enumerate(self.train_loader):
            self.optimizer.zero_grad()

            # Feed data to the model and feed target to the loss function
            data = batch['image'].float()
            target = batch['seg']
            prediction = self.model(data.to(self.device))
            prediction_softmax = F.softmax(prediction, dim=1)
            loss = self.loss_function(prediction_softmax, target[:, 0, :, :].to(self.device))

            # What does each dimension of variable prediction represent?
            # batch_size, 3 classes, coronal, axial

            loss.backward()
            self.optimizer.step()

            if (i % 10) == 0:
                # Output to console on every 10th batch
                print(f"\nEpoch: {self.epoch} Train loss: {loss}, {100*(i+1)/len(self.train_loader):.1f}% complete")

                counter = 100*self.epoch + 100*(i/len(self.train_loader))

#                 log_to_tensorboard(
#                     self.tensorboard_train_writer,
#                     loss,
#                     data,
#                     target,
#                     prediction_softmax,
#                     prediction,
#                     counter)

            print(".", end='')

        print("\nTraining complete")

    def validate(self):
        """
        This method runs validation cycle, using same metrics as 
        Train method. Note that model needs to be switched to eval
        mode and no_grad needs to be called so that gradients do not 
        propagate
        """
        print(f"Validating epoch {self.epoch}...")

        # Turn off gradient accumulation by switching model to "eval" mode
        self.model.eval()
        loss_list = []

        with torch.no_grad():
            for i, batch in enumerate(self.val_loader):              
                data = batch['image'].float()
                target = batch['seg']
                prediction = self.model(data.to(self.device))
                prediction_softmax = F.softmax(prediction, dim=1)
                loss = self.loss_function(prediction_softmax, target[:, 0, :, :].to(self.device))

                print(f"Batch {i}. Data shape {data.shape} Loss {loss}")

                # We report loss that is accumulated across all of validation set
                loss_list.append(loss.item())

        self.scheduler.step(np.mean(loss_list))

#         log_to_tensorboard(
#             self.tensorboard_val_writer,
#             np.mean(loss_list),
#             data,
#             target,
#             prediction_softmax, 
#             prediction,
#             (self.epoch+1) * 100)
        print(f"Validation complete")

    def save_model_parameters(self):
        """
        Saves model parameters to a file in results directory
        """
        path = os.path.join(self.out_dir, "model.pth")

        torch.save(self.model.state_dict(), path)

    def load_model_parameters(self, path=''):
        """
        Loads model parameters from a supplied path or a
        results directory
        """
        if not path:
            model_path = os.path.join(self.out_dir, "model.pth")
        else:
            model_path = path

        if os.path.exists(model_path):
            self.model.load_state_dict(torch.load(model_path))
        else:
            raise Exception(f"Could not find path {model_path}")

    def run_test(self):
        """
        This runs test cycle on the test dataset.
        Note that process and evaluations are quite different
        Here we are computing a lot more metrics and returning
        a dictionary that could later be persisted as JSON
        """
        print("Testing...")
        self.model.eval()

        inference_agent = UNetInferenceAgent(model=self.model, device=self.device)

        out_dict = {}
        out_dict["volume_stats"] = []
        dc_list = []
        jc_list = []

        # for every in test set
        for i, x in enumerate(self.test_data):
            pred_label = inference_agent.single_volume_inference(x["image"])

            # We compute and report Dice and Jaccard similarity coefficients which 
            # assess how close our volumes are to each other

            dc = Dice3d(pred_label, x["seg"])
            jc = Jaccard3d(pred_label, x["seg"])
            dc_list.append(dc)
            jc_list.append(jc)

            # STAND-OUT SUGGESTION: By way of exercise, consider also outputting:
            # * Sensitivity and specificity (and explain semantic meaning in terms of 
            #   under/over segmenting)
            # * Dice-per-slice and render combined slices with lowest and highest DpS
            # * Dice per class (anterior/posterior)

            out_dict["volume_stats"].append({
                "filename": x['filename'],
                "dice": dc,
                "jaccard": jc
                })
            print(f"{x['filename']} Dice {dc:.4f}. {100*(i+1)/len(self.test_data):.2f}% complete")

        out_dict["overall"] = {
            "mean_dice": np.mean(dc_list),
            "mean_jaccard": np.mean(jc_list)}

        print("\nTesting complete.")
        return out_dict

    def run(self):
        """
        Kicks off train cycle and writes model parameter file at the end
        """
        self._time_start = time.time()

        print("Experiment started.")

        # Iterate over epochs
        for self.epoch in range(self.n_epochs):
            self.train()
            self.validate()

        # save model for inferencing
        self.save_model_parameters()

        self._time_end = time.time()
        print(f"Run complete. Total time: {time.strftime('%H:%M:%S', time.gmtime(self._time_end - self._time_start))}")
class UNetInferenceAgent:
    """
    Stores model and parameters and some methods to handle inferencing
    """
    def __init__(self,
                 parameter_file_path='',
                 model=None,
                 device="cpu",
                 patch_size=64):

        self.model = model
        self.patch_size = patch_size
        self.device = device

        if model is None:
            self.model = UNet(num_classes=3)

        if parameter_file_path:
            self.model.load_state_dict(
                torch.load(parameter_file_path, map_location=self.device))

        self.model.to(device)

    def single_volume_inference_unpadded(self, volume):
        """
        Runs inference on a single volume of arbitrary patch size,
        padding it to the conformant size first

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """

        raise NotImplementedError

    def single_volume_inference(self, volume):
        """
        Runs inference on a single volume of conformant patch size

        Arguments:
            volume {Numpy array} -- 3D array representing the volume

        Returns:
            3D NumPy array with prediction mask
        """
        self.model.eval()

        # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis
        slices = []

        # TASK: Write code that will create mask for each slice across the X (0th) dimension. After
        # that, put all slices into a 3D Numpy array. You can verify if your method is
        # correct by running it on one of the volumes in your training set and comparing
        # with the label in 3D Slicer.
        # <YOUR CODE HERE>
        #Initialize the mask3d array
        mask3d = np.zeros(volume.shape)
        #cycle for each slice x
        for ix in range(volume.shape[0]):
            #get the image slice and normalize it
            slice = volume[ix, :, :].astype(np.single) / np.max(
                volume[ix, :, :])

            #inference on the slice
            mytensor = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0)
            pred = self.model(mytensor.to(self.device))

            #apply the torch.argmax
            mask3d[ix, :, :] = torch.argmax(np.squeeze(pred.cpu().detach()),
                                            dim=0)
        return mask3d