Ejemplo n.º 1
0
def validate_net(net, loader, name='Test', epoch=None, is_save=True, limit=None, display=False):
    net.eval()
    dices_a = []
    dices_b = []
    losses_a = []
    losses_b = []
    
    count = 0
    
    for j, data in enumerate(loader):
        
        if limit != None and count >= limit:
            break
        
        #### look here is it training on image 0 or edges 2?????
        image_a = data['A'][IMAGE_INDEX].cuda()
        target_a = data['A'][1].cuda()
        
        image_b = data['B'][IMAGE_INDEX].cuda()
        target_b = data['B'][1].cuda()
        
        # do validation on test set here!
        with torch.no_grad():
            if torch.max(target_a) != 0:
#                 res_a = net.downsample(image_a)      
#                 pred = net.upsample(*res_a)
                pred = net(image_a)
                loss = criterion(pred, target_a).item()
                pred = torch.round(pred)
                dice_score = compute_dice_metric(pred, target_a).item()
                dices_a.append(dice_score)
                losses_a.append(loss)
            
            if torch.max(target_b) != 0:
#                 res_b = net.downsample(image_b)      
#                 pred  = net.upsample(*res_b)
                pred = net(image_b)
                loss = criterion(pred, target_b).item()
                pred = torch.round(pred)
                dice_score = compute_dice_metric(pred, target_b).item()
                dices_b.append(dice_score)
                losses_b.append(loss)
                        
        count += 1
    
    # print dice scores here!
    mean_dice_a = np.mean(dices_a)
    mean_dice_b = np.mean(dices_b)
    
    mean_loss_a = np.mean(losses_a)
    mean_loss_b = np.mean(losses_b)
    
    print('{} - Avg dice A: {}, Avg dice B: {}, Avg loss A: {}, Avg loss B: {}'.format(name, 
                                                                                       mean_dice_a, 
                                                                                       mean_dice_b, 
                                                                                       mean_loss_a, 
                                                                                       mean_loss_b))
            
    # return none for the time being
    return mean_dice_a, mean_dice_b, mean_loss_a, mean_loss_b
Ejemplo n.º 2
0
    def step(self, x_s, x_t, y_s):        
        # GAN loss - update discriminator and generator here
        # GAN loss - max log(D(x)) + log(1 - D(G(x)))
        
        # update d only
        self.d_optim.zero_grad()
        
        out_x_s = self.forward_gen(x_s)
        out_x_t = self.forward_gen(x_t)
        x_s_real = self.d(out_x_s)

        target_real = self.get_target(x_s_real)        
        loss_real = self.criterion_gan(x_s_real, target_real)
        loss_real.backward()
        
        # get generated feature maps from pool / replay for stability
        x_s_fake_map = (self.pool.query(out_x_t)).detach()
        x_s_fake = self.d(x_s_fake_map)
        
        target_fake = self.get_target(x_s_fake, is_true=False)
        loss_fake = self.criterion_gan(x_s_fake, target_fake)
        loss_fake.backward()
        self.d_optim.step()
        
        # update g - max D(G(X))
        self.g_optim.zero_grad()
        x_s_fake = self.d(x_s_fake_map)
        target_real = self.get_target(x_s_real)        
        loss_g = self.criterion_gan(x_s_fake, target_real)
        loss_g.backward()
        self.g_optim.step()
        
        # Segmentation loss
        self.set_requires_grad(self.g, requires_grad=False)
        # self.g_optim.zero_grad()
        self.seg_optim.zero_grad()
        
        out_seg = self.forward_seg(x_s)
        seg_loss = self.criterion_seg(out_seg, y_s)
        seg_loss.backward()        
        
        # self.g_optim.step()
        self.seg_optim.step()
        
        # calculate dice score for current batch
        dice_score = compute_dice_metric(torch.round(out_seg), y_s).item()
        
        # backward pass
        return seg_loss.item(), (loss_real + loss_fake).item(), dice_score
Ejemplo n.º 3
0
def train_net(net, loader, data_key, image_idx, target_idx, epoch, is_train=True, opt=None, labeller=None, name='train'):
    dice_scores = []
    losses = []
    count = 0
    
    for i, data in enumerate(tqdm.tqdm(loader)):                
        image = data[data_key][image_idx].cuda()
        
        if labeller == None:
            target = data[data_key][target_idx].cuda()
        else:
            labeller.eval()
            with torch.no_grad():
                target = labeller.upsample(*(labeller.downsample(image)))
                target = torch.round(target).detach().cuda()
                
        if is_train:
            net.train()
            opt.zero_grad()
            # format for prediction is fixed
            pred = net.upsample(*(net.downsample(image)))
            loss = criterion(pred, target)
            pred = torch.round(pred)
            loss.backward()
            opt.step()
            loss = loss.item()
        else:
            net.eval()
            # do validation on test set here!
            with torch.no_grad():
                pred = net.upsample(*(net.downsample(image)))
                loss = criterion(pred, target).item()
                pred = torch.round(pred)
        
        dice_score = compute_dice_metric(pred, target).item()
        dice_scores.append(dice_score)
        losses.append(loss)
                        
        count += 1
    
    # print dice scores here!
    mean_dice = np.mean(dice_scores)
    mean_loss = np.mean(losses)
    
    print('{} - epoch {} - Avg dice: {}, Avg loss: {}'.format(name, epoch, mean_dice, mean_loss))
            
    # return none for the time being
    return mean_dice, mean_loss
Ejemplo n.º 4
0
            # optimiser.zero_grad()
            
            net(image_b)
            
            # pred = net.upsample(*res2)
            loss = discrepancy_loss(net.logits, net.logits)
            
            loss.backward()
            
            optimiser_enc.step()
            # optimiser.step()
        
        pred_seg_a = net(image_a)        
        pred_seg_b = net(image_b)
        
        dice_score_a = compute_dice_metric(torch.round(pred_seg_a), target_a)

        epoch_train_dice_a.append(dice_score_a.item())
        epoch_train_loss_seg.append(loss_seg_a.item())
        
    # mean_loss_rec = np.mean(epoch_train_loss_rec)
    mean_loss_seg = np.mean(epoch_train_loss_seg)
    mean_dice_a = np.mean(epoch_train_dice_a)
    
    # print('Train A - avg seg:{}'.format(np.mean(epoch_train_loss_seg)))
    
    print('Train A - avg seg: {}, dice A: {}'.format(mean_loss_seg, mean_dice_a))

    dice_a, dice_b, loss_a, loss_b = validate_net(net=net, loader=val_loader, name='Validation ', epoch=str(e))
    
    val_loss_a.append(loss_a)
Ejemplo n.º 5
0
def validate_net(net,
                 loader,
                 name='Test',
                 epoch=None,
                 is_save=True,
                 limit=None,
                 display=False):
    net.eval()
    dices_a = []
    dices_b = []
    losses_a = []
    losses_b = []

    count = 0

    for j, data in enumerate(loader):

        if limit != None and count >= limit:
            break

        image_a = data['A'][0].cuda()
        target_a = data['A'][1].cuda()

        image_b = data['B'][0].cuda()
        target_b = data['B'][1].cuda()

        image_a_adapt = FDA_source_to_target(image_a, image_b, 0.02).cuda()
        # image_b_adapt = FDA_source_to_target(image_b, image_a, 0.02).cuda()

        # do validation on test set here!
        with torch.no_grad():
            if torch.max(target_a) != 0:
                a1, a2, a3, a4, a5 = net.downsample(image_a_adapt)
                pred = net.upsample(a1, a2, a3, a4, a5)
                #                 pred = net(image_a)
                loss = criterion(pred, target_a).item()
                pred = torch.round(pred)
                dice_score = compute_dice_metric(pred, target_a).item()
                dices_a.append(dice_score)
                losses_a.append(loss)

            if torch.max(target_b) != 0:
                a1, a2, a3, a4, a5 = net.downsample(image_b)
                pred = net.upsample(a1, a2, a3, a4, a5)
                #                 pred = net(image_b)
                loss = criterion(pred, target_b).item()
                pred = torch.round(pred)
                dice_score = compute_dice_metric(pred, target_b).item()
                dices_b.append(dice_score)
                losses_b.append(loss)

        count += 1

    # print dice scores here!
    mean_dice_a = np.mean(dices_a)
    mean_dice_b = np.mean(dices_b)

    mean_loss_a = np.mean(losses_a)
    mean_loss_b = np.mean(losses_b)

    print(
        '{} - Avg dice A: {}, Avg dice B: {}, Avg loss A: {}, Avg loss B: {}'.
        format(name, mean_dice_a, mean_dice_b, mean_loss_a, mean_loss_b))

    # return none for the time being
    return mean_dice_a, mean_dice_b, mean_loss_a, mean_loss_b