Exemplo n.º 1
0
    for i, data in enumerate(tqdm.tqdm(train_loader)):
        iteration += batch_size

        # data A
        image_a = data['A'][2].cuda()
        target_a = data['A'][1].cuda()
        ctr_a = data['A'][3].cuda()
        edt_a = data['A'][4].cuda()

        # data B
        # image_b = data['B'][2].cuda()

        optimiser.zero_grad()

        a1, a2, a3, a4, a5 = net.downsample(image_a)
        pred_seg_a, pred_ctr_a, pred_edt_a, _ = net.upsample(
            a1, a2, a3, a4, a5)

        loss_seg_a = criterion(pred_seg_a, target_a)
        loss_ctr_a = DiceLoss()(pred_ctr_a, ctr_a)
        loss_edt_a = nn.L1Loss()(pred_edt_a, edt_a)

        loss = loss_seg_a + loss_ctr_a + loss_edt_a

        loss.backward()
        # loss_seg_a.backward()
        optimiser.step()

        # dice_score = dice_coeff(torch.round(pred), l).item()
        # epoch_train_loss_rec.append(loss_recon.item())
Exemplo n.º 2
0
 print('Epoch ', e)
 
 for i, data in enumerate(tqdm.tqdm(train_loader)):
     iteration += batch_size
     # data[A or B] - sobel image, mask, original
     
     image_a = data['A'][2].cuda()
     target_a = data['A'][1].cuda()
     
     image_b = data['B'][2].cuda()
     
     optimiser.zero_grad()
     
     wct_features = wct2net(image_a)
             
     a1, a2, a3, a4, a5 = net.downsample(wct_features)        
     pred_seg_a = net.upsample(a1, a2, a3, a4, a5)
     loss_seg_a = criterion(pred_seg_a, target_a)
             
     loss_seg_a.backward()        
     optimiser.step()
     
     # dice_score = dice_coeff(torch.round(pred), l).item()
     # epoch_train_loss_rec.append(loss_recon.item())
     epoch_train_loss_seg.append(loss_seg_a.item())
     
 # mean_loss_rec = np.mean(epoch_train_loss_rec)
 mean_loss_seg = np.mean(epoch_train_loss_seg)
 
 # print('Train A - avg seg:{}'.format(np.mean(epoch_train_loss_seg)))
 
Exemplo n.º 3
0
    print('Epoch ', e)

    for i, data in enumerate(tqdm.tqdm(train_loader)):
        iteration += batch_size

        optimiser_ps.zero_grad()
        optimiser_net.zero_grad()

        # either train pseudolabeller or the net
        # first 10 epochs train the pseudo labeller on edges
        if e < epochs_pseudo:
            edges_a = data['A'][2].cuda()
            target_a = data['A'][1].cuda()

            res_pseudo = pseudo.downsample(edges_a)
            pred_seg_a = pseudo.upsample(*res_pseudo)
            #             pred_seg_a = pseudo(edges_a)
            loss_seg_a = criterion(pred_seg_a, target_a)

            loss_seg_a.backward()
            optimiser_ps.step()

        else:
            pseudo.eval()
            image_a = data['A'][0].cuda()
            target_a = data['A'][1].cuda()

            image_b = data['B'][0].cuda()
            edges_b = data['B'][2].cuda()
            pseudo_b = pseudo.downsample(edges_b)
Exemplo n.º 4
0
    
    print('Epoch ', e)
    
    for i, data in enumerate(tqdm.tqdm(train_loader)):
        iteration += batch_size
        
        optimiser_ps.zero_grad()
        optimiser_net.zero_grad()
                
        # either train pseudolabeller or the net
        # first 10 epochs train the pseudo labeller on edges
        if e < epochs_pseudo:
            edges_a = data['A'][2].cuda()
            target_a = data['A'][1].cuda()
            
            res_pseudo = pseudo.downsample(edges_a)        
            pred_seg_a = pseudo.upsample(*res_pseudo)
#             pred_seg_a = pseudo(edges_a)
            loss_seg_a = criterion(pred_seg_a, target_a)

            loss_seg_a.backward()        
            optimiser_ps.step()
        
        else:
            pseudo.eval()
            image_a = data['A'][0].cuda()
            target_a = data['A'][1].cuda()
            
            image_b = data['B'][0].cuda()
            edges_b = data['B'][2].cuda()
            pseudo_b = pseudo.downsample(edges_b)