Ejemplo n.º 1
0
            fake_D = D(Y.detach())
            loss_fake = 0
            for di in fake_D:
                loss_fake += hinge_loss(di[0], False)# 改此权重会让变伪能力增强

            true_D = D(Xs)
            loss_true = 0
            for di in true_D:
                loss_true += hinge_loss(di[0], True)
            # true_score2 = D(Xt)[-1][0]

            lossD = 0.5*(2.*loss_true.mean() + 2.*loss_fake.mean())

            with amp.scale_loss(lossD, opt_D) as scaled_loss:
                scaled_loss.backward()
            # lossD.backward()
            opt_D.step()
            batch_time = time.time() - start_time
            if iteration % show_step == 0:
                image = make_image(Xs, Xt, Y)
                # vis.image(image[::-1, :, :], opts={'title': 'result'}, win='result')
                cv2.imwrite('./gen_images/latest_AEI_mask.jpg', (image*255).transpose([1,2,0]).astype(np.uint8))
            print(f'epoch: {epoch}    {iteration} / {len(dataloader)}')
            print(f'lossD: {lossD.item()}    lossG: {lossG.item()} batch_time: {batch_time}s')
            print(f'L_adv: {L_adv.item()} L_id: {L_id.item()} L_attr: {L_attr.item()} L_rec: {L_rec.item()}')
            if iteration % 200 == 0 and iteration>0:
                torch.save(G.state_dict(), './saved_mask_models/G_latest.pth')
                torch.save(D.state_dict(), './saved_mask_models/D_latest.pth')
        torch.save(G.state_dict(), './saved_mask_models/G_epoch_{}.pth'.format(epoch))
        torch.save(D.state_dict(), './saved_mask_models/D_epoch_{}.pth'.format(epoch))
Ejemplo n.º 2
0
            with amp.scale_loss(lossD, opt_D) as scaled_loss:
                scaled_loss.backward()
            # lossD.backward()
            opt_D.step()
            batch_time = time.time() - start_time
            if iteration % show_step == 0:
                image = make_image(Xs, Xt, Y)
                # vis.image(image[::-1, :, :], opts={'title': 'result'}, win='result')
                cv2.imwrite('./gen_images/latest_AEI_landmarks_mask.jpg',
                            (image * 255).transpose([1, 2,
                                                     0]).astype(np.uint8))
            print(f'epoch: {epoch}    {iteration} / {len(dataloader)}')
            print(
                f'lossD: {lossD.item()}    lossG: {lossG.item()} batch_time: {batch_time}s'
            )
            print(
                f'L_adv: {L_adv.item()} L_id: {L_id.item()} L_attr: {L_attr.item()} L_rec: {L_rec.item()}'
            )

            if ((iteration % 100) == 0) and (iteration > 0):
                torch.save(G.state_dict(),
                           './saved_mask_landmarks_models/G_latest.pth')
                torch.save(D.state_dict(),
                           './saved_mask_landmarks_models/D_latest.pth')
        torch.save(
            G.state_dict(),
            './saved_mask_landmarks_models/G_epoch_{}.pth'.format(epoch))
        torch.save(
            D.state_dict(),
            './saved_mask_landmarks_models/D_epoch_{}.pth'.format(epoch))
Ejemplo n.º 3
0
                'L_attr': L_attr.item(),
                'L_rec': L_rec.item()
            }, niter)
        writer.add_scalars('Train/Adversarial losses', {
            'Generator': lossG.item(),
            'Discriminator': lossD.item()
        }, niter)
    print(
        f'niter: {niter} (epoch: {epoch} {iteration}/{len(train_dataloader)})')
    print(
        f'    lossD: {lossD.item()} lossG: {lossG.item()} batch_time: {batch_time}s'
    )
    print(
        f'    L_adv: {L_adv.item()} L_id: {L_id.item()} L_attr: {L_attr.item()} L_rec: {L_rec.item()}'
    )
    if (niter + 1) % 1000 == 0:
        torch.save(G.state_dict(), './saved_models/AEI_G_latest.pth')
        torch.save(D.state_dict(), './saved_models/AEI_D_latest.pth')
        torch.save(opt_D.state_dict(), './saved_models/AEI_optG_latest.pth')
        torch.save(opt_D.state_dict(), './saved_models/AEI_optD_latest.pth')
        torch.save(scaler.state_dict(), './saved_models/AEI_scaler_latest.pth')
        with open('./saved_models/AEI_niter.pkl', 'wb') as f:
            pickle.dump(niter, f)
    if (niter + 1) % 10000 == 0:
        torch.save(G.state_dict(),
                   f'./saved_models/AEI_G_iteration_{niter + 1}.pth')
        torch.save(D.state_dict(),
                   f'./saved_models/AEI_D_iteration_{niter + 1}.pth')
        with open(f'./saved_models/AEI_niter_{niter + 1}.pkl', 'wb') as f:
            pickle.dump(niter, f)
Ejemplo n.º 4
0
            loss_true += hinge_loss(di[0], True)
        # true_score2 = D(Xt)[-1][0]

        lossD = 0.5 * (loss_true.mean() + loss_fake.mean())

        with amp.scale_loss(lossD, opt_D) as scaled_loss:
            scaled_loss.backward()
        # lossD.backward()
        opt_D.step()
        batch_time = time.time() - start_time
        if iteration % show_step == 0:
            image = make_image(Xs, Xt, Y)
            vis.image(image[::-1, :, :],
                      opts={'title': 'result'},
                      win='result')
            cv2.imwrite('./gen_images/latest_%d.jpg' % (iteration),
                        image.transpose([1, 2, 0]))
        print(f'epoch: {epoch}    {iteration} / {len(dataloader)}')
        print(
            f'lossD: {lossD.item()}    lossG: {lossG.item()} batch_time: {batch_time}s'
        )
        print(
            f'L_adv: {L_adv.item()} L_id: {L_id.item()} L_attr: {L_attr.item()} L_rec: {L_rec.item()}'
        )
        if iteration % 1000 == 0:
            torch.save(G.state_dict(), './saved_models/G_latest.pth')
            torch.save(D.state_dict(), './saved_models/D_latest.pth')
        if iteration % 10000 == 0:
            torch.save(G.state_dict(), './saved_models/G_%d.pth' % (iteration))
            torch.save(D.state_dict(), './saved_models/D_%d.pth' % (iteration))