Пример #1
0
    for i, data in enumerate(tqdm.tqdm(train_loader)):
        iteration += batch_size

        # data A
        image_a = data['A'][2].cuda()
        target_a = data['A'][1].cuda()
        ctr_a = data['A'][3].cuda()
        edt_a = data['A'][4].cuda()

        # data B
        # image_b = data['B'][2].cuda()

        optimiser.zero_grad()

        a1, a2, a3, a4, a5 = net.downsample(image_a)
        pred_seg_a, pred_ctr_a, pred_edt_a, _ = net.upsample(
            a1, a2, a3, a4, a5)

        loss_seg_a = criterion(pred_seg_a, target_a)
        loss_ctr_a = DiceLoss()(pred_ctr_a, ctr_a)
        loss_edt_a = nn.L1Loss()(pred_edt_a, edt_a)

        loss = loss_seg_a + loss_ctr_a + loss_edt_a

        loss.backward()
        # loss_seg_a.backward()
        optimiser.step()

        # dice_score = dice_coeff(torch.round(pred), l).item()
        # epoch_train_loss_rec.append(loss_recon.item())
        epoch_train_loss_seg.append(loss_seg_a.item())
Пример #2
0
    print('Epoch ', e)

    for i, data in enumerate(tqdm.tqdm(train_loader)):
        iteration += batch_size

        optimiser_ps.zero_grad()
        optimiser_net.zero_grad()

        # either train pseudolabeller or the net
        # first 10 epochs train the pseudo labeller on edges
        if e < epochs_pseudo:
            edges_a = data['A'][2].cuda()
            target_a = data['A'][1].cuda()

            res_pseudo = pseudo.downsample(edges_a)
            pred_seg_a = pseudo.upsample(*res_pseudo)
            #             pred_seg_a = pseudo(edges_a)
            loss_seg_a = criterion(pred_seg_a, target_a)

            loss_seg_a.backward()
            optimiser_ps.step()

        else:
            pseudo.eval()
            image_a = data['A'][0].cuda()
            target_a = data['A'][1].cuda()

            image_b = data['B'][0].cuda()
            edges_b = data['B'][2].cuda()
            pseudo_b = pseudo.downsample(edges_b)
            pred_pseudo_b = pseudo.upsample(*pseudo_b)
Пример #3
0
    for i, data in enumerate(tqdm.tqdm(train_loader)):
        iteration += batch_size
        image_a = data['A'][0].cuda()
        target_a = data['A'][1].cuda()

        image_b = data['B'][0].cuda()

        image_a_adapt = FDA_source_to_target(image_a, image_b, 0.02).cuda()

        image_b_adapt = FDA_source_to_target(image_b, image_a, 0.02).cuda()

        optimiser.zero_grad()

        a1, a2, a3, a4, a5 = net.downsample(image_a)
        pred_seg_a = net.upsample(a1, a2, a3, a4, a5)
        #         pred_seg_a = net(image_a)
        loss_seg_a = criterion(pred_seg_a, target_a)

        res2 = net.downsample(image_b)
        pred_seg_b = net.upsample(*res2)

        ent_a = entropy_loss(pred_seg_a)
        ent_b = entropy_loss(pred_seg_b)

        loss = loss_seg_a + LAMBDA_ENT * (ent_a + ent_b)

        loss.backward()
        optimiser.step()

        # dice_score = dice_coeff(torch.round(pred), l).item()
Пример #4
0
    net.train() 
    
    print('Epoch ', e)
    
    for i, data in enumerate(tqdm.tqdm(train_loader)):
        iteration += batch_size
        image_a = data['A'][0].cuda()
        target_a = data['A'][1].cuda()
        image_b = data['B'][0].cuda()

        optimiser.zero_grad()
        # d_optimiser.zero_grad()
        
        # domain A
        a1, a2, a3, a4, a5 = net.downsample(image_a)
        pred_seg_a = net.upsample(a1, a2, a3, a4, a5)
        loss_seg_a = criterion(pred_seg_a, target_a)
        
        # DANN here
        p = float(i + e * num_train) / epochs / num_train
        alpha = 2. / (1. + np.exp(-10 * p)) - 1
        reverse_feature_a = ReverseLayerF.apply(a5, alpha)
                
        domain_a_result = domain_net(reverse_feature_a)
        a_label = torch.zeros(domain_a_result.size(), requires_grad=False).cuda()
        domain_a_loss = nn.BCELoss()(domain_a_result, a_label)
        
        # domain B
        _, _, _, _, b5 = net.downsample(image_b)
        reverse_feature_b = ReverseLayerF.apply(b5, alpha)
        
Пример #5
0
    print('Epoch ', e)
    
    for i, data in enumerate(tqdm.tqdm(train_loader)):
        iteration += batch_size
        
        optimiser_ps.zero_grad()
        optimiser_net.zero_grad()
                
        # either train pseudolabeller or the net
        # first 10 epochs train the pseudo labeller on edges
        if e < epochs_pseudo:
            edges_a = data['A'][2].cuda()
            target_a = data['A'][1].cuda()
            
            res_pseudo = pseudo.downsample(edges_a)        
            pred_seg_a = pseudo.upsample(*res_pseudo)
#             pred_seg_a = pseudo(edges_a)
            loss_seg_a = criterion(pred_seg_a, target_a)

            loss_seg_a.backward()        
            optimiser_ps.step()
        
        else:
            pseudo.eval()
            image_a = data['A'][0].cuda()
            target_a = data['A'][1].cuda()
            
            image_b = data['B'][0].cuda()
            edges_b = data['B'][2].cuda()
            pseudo_b = pseudo.downsample(edges_b)
            pred_pseudo_b = pseudo.upsample(*pseudo_b)