Exemple #1
0
    def validate(self, epoch):
        self.elog.print('VALIDATE')
        self.model.eval()

        data = None
        loss_list = []
        acc_list = []
        metrics = defaultdict(float)
        with torch.no_grad():
            for batch_idx, (images, masks) in enumerate(self.val_data_loader):
                data, target = images.to(self.device), masks.to(self.device)
                pred = self.model(data)
                # pred_softmax = F.softmax(pred, dim=1)
                # We calculate a softmax, because our SoftDiceLoss expects that as an input. The CE-Loss does the softmax internally.
                # Ramesh check if soft max is needed
                # loss = self.dice_loss(pred_softmax, target.squeeze()) + self.ce_loss(pred, target.squeeze())
                # loss = F.binary_cross_entropy(pred, masks)

                #loss,dice = calc_loss(pred, target, metrics)
                acc = soft_dice(pred, target)
                acc_list.append(acc.item())

                loss = F.binary_cross_entropy(pred, target) + soft_dice(
                    pred, target)
                loss_list.append(loss.item())

        assert data is not None, 'data is None. Please check if your dataloader works properly'
        self.scheduler.step(np.mean(loss_list))

        self.elog.print(
            'Epoch: %d Mean Loss: %.4f Mean Dice :' %
            (self._epoch_idx, np.mean(loss_list)), np.mean(acc_list))

        self.add_result(value=np.mean(loss_list),
                        name='Val_Loss',
                        tag='Loss',
                        counter=epoch + 1)
        self.add_result(value=np.mean(acc_list),
                        name='Val_Mean_Accuracy',
                        tag='Accuracy',
                        counter=epoch + 1)

        self.clog.show_image_grid(data.float().cpu(),
                                  name="data_val",
                                  normalize=True,
                                  scale_each=True,
                                  n_iter=epoch)
        self.clog.show_image_grid(target.float().cpu(),
                                  name="mask_val",
                                  title="Mask",
                                  n_iter=epoch)
        self.clog.show_image_grid(torch.argmax(pred.data.cpu(),
                                               dim=1,
                                               keepdim=True),
                                  name="unt_argmax_val",
                                  title="Unet",
                                  n_iter=epoch)
Exemple #2
0
    def train(self, epoch):
        self.elog.print('=====TRAIN=====')
        self.model.train()

        data = None
        batch_counter = 0
        metrics = defaultdict(float)
        #running_loss = 0.0
        for batch_idx, (images, masks) in enumerate(self.train_data_loader):
            data, target = images.to(self.device), masks.to(self.device)

            self.optimizer.zero_grad()

            #print("data  shape :",data.shape, "target shape :",target.shape)
            pred = self.model(data)

            #pred_softmax = F.softmax(pred, dim=1)
            #We calculate a softmax, because our SoftDiceLoss expects that as an input. The CE-Loss does the softmax internally.
            #print("pred_softmax  shape :",pred_softmax.shape, "target shape :",target.shape)
            #loss = self.dice_loss(pred_softmax, target.squeeze()) + self.ce_loss(pred, target.squeeze())
            loss = F.binary_cross_entropy(pred, target) + soft_dice(
                pred, target)

            #loss,_ = calc_loss(pred, target, metrics)
            loss.backward()
            self.optimizer.step()

            #running_loss+=loss.item()
            #epoch_loss = running_loss/len(train_data_loader)

            # Some logging and plotting
            if (batch_counter % self.config.plot_freq) == 0:
                self.elog.print('Epoch: {0} Loss: {1:.4f}'.format(
                    self._epoch_idx, loss.item()))

                #self.add_result(value=loss.item(), name='Train_Loss', tag='Loss', counter=epoch + (batch_counter / self.train_data_loader.num_batches))
                self.add_result(value=loss.item(),
                                name='Train_Loss',
                                tag='Loss',
                                counter=epoch)
                self.clog.show_image_grid(data.float().cpu(),
                                          name="data",
                                          normalize=True,
                                          scale_each=True,
                                          n_iter=epoch)
                self.clog.show_image_grid(target.float().cpu(),
                                          name="mask",
                                          title="Mask",
                                          n_iter=epoch)
                self.clog.show_image_grid(torch.argmax(pred.cpu(),
                                                       dim=1,
                                                       keepdim=True),
                                          name="unt_argmax",
                                          title="Unet",
                                          n_iter=epoch)
                #self.clog.show_image_grid(pred.cpu()[:, 1:2, ], name="unt", normalize=True, scale_each=True, n_iter=epoch)

            batch_counter += 1

        assert data is not None, 'data is None. Please check if your dataloader works properly'
Exemple #3
0
    def test(self):
        # TODO
        print(' In test() method here')
        self.elog.print('----------Test-------------')
        self.model.eval()
        trial = 10
        data = None
        loss_list = []
        acc_list = []
        metrics = defaultdict(float)
        with torch.no_grad():
             for batch_idx, (images, masks) in enumerate(self.test_data_loader):
                data, target = images.to(self.device), masks.to(self.device)
                pred = self.model(data)
                pred = torch.sigmoid(pred) 
                #pred = F.softmax(pred, dim=1)  
                # We calculate a softmax, because our SoftDiceLoss expects that as an input. The CE-Loss does the softmax internally.
                # Ramesh check if soft max is needed
                # loss = self.dice_loss(pred_softmax, target.squeeze()) + self.ce_loss(pred, target.squeeze())
                # loss = F.binary_cross_entropy(pred, masks)
                
                #loss,dice = calc_loss(pred, target, metrics)
                acc = (-1)*soft_dice(pred,target) 
                acc_list.append(acc.item())

                #loss = F.binary_cross_entropy(pred, target) + soft_dice(pred,target)
                loss = self.bce_weight*F.binary_cross_entropy(pred, target) + (1-self.bce_weight)*soft_dice(pred,target)
                loss_list.append(loss.item())
                assert data is not None, 'data is None. Please check if your dataloader works properly'
      		#self.scheduler.step(np.mean(loss_list))
                self.add_result(value=loss.item(), name='Test_Loss', tag='Test_Loss', counter=trial+1)
                self.add_result(value=acc.item(), name='Test_Mean_Accuracy', tag='Test_Accuracy', counter=trial+1)

                self.clog.show_image_grid(data.float().cpu(), name="data_test", normalize=True, scale_each=True, n_iter=trial)

                self.clog.show_image_grid(target.float().cpu(), name="mask_test", title="Mask", n_iter=trial)

                self.clog.show_image_grid(torch.argmax(pred.data.cpu(), dim=1, keepdim=True), name="unt_argmax_test", title="Unet", n_iter=trial)

                self.clog.show_image_grid(pred.data.cpu(), name="unt_test", normalize=True, scale_each=True, n_iter=trial)
           
             self.elog.print('Test Mean Loss: %.4f Test Mean Dice :' % (np.mean(loss_list)),np.mean(acc_list))
# %%###########################################################################
########################### HYPERPARAMETERS ###################################
###############################################################################
LEARN_RATE_0 = 1e-3
LEARN_RATE_FINETUNE = 1e-4
LEARN_RATE_ALPHA = 0.25
LEARN_RATE_STEP = 3
N_EPOCH = 30
MB_SIZE = 10
KEEP_PROB = 0.8
ACTIVATION = 'selu'
PADDING = 'SYMMETRIC'
AUGMENTATION = True

LOSS = [[categorical_cross_entropy(), soft_dice(logits_axis=1, label_axis=0, weight=2)]]

#LOSS = [[categorical_cross_entropy(), soft_dice(logits_axis=1, label_axis=0)],
#        [categorical_cross_entropy(onehot_convert=False),
#         soft_dice(logits_axis=1, label_axis=1, weight=2),
#         soft_dice(logits_axis=2, label_axis=2, weight=10)]]

# LOSS = [None,
#         [categorical_cross_entropy(onehot_convert=False),
#          soft_dice(logits_axis=1, label_axis=1),
#          soft_dice(logits_axis=2, label_axis=2, weight=2)]]

# %%###########################################################################
############################## LOADING DATASETS ###############################
###############################################################################
# Display working/train/test directories.