Example #1
0
        noise=setNoise(noise)
        fake, alpha, A, mixedI  = famosGeneration(content, noise, templatePatch, True)

        output = netD(fake)
        loss_adv = criterion(output, output.detach()*0+real_label)
        D_G_z2 = output.mean()

        if opt.fAdvM>0:
            outputM = netD(mixedI)
            loss_adv_mixed = criterion(outputM, outputM.detach() * 0 + real_label)
            D_G_z2m = outputM.mean()
        else:
            D_G_z2m =D_G_z2*0
            loss_adv_mixed = loss_adv*0

        cLoss= contentLoss(fake,content[:,:3],netR,opt)
        cLoss2= contentLoss(mixedI,content[:,:3],netR,opt)

        entropy = (-A * (1e-8 + A).log()).mean()##entropy
        tv= total_variation(A)

        alpha_loss = alpha.mean()##large means more conv content; small is focus on mixedtemplate result
        atv= total_variation(alpha)
        diversity= gramMatrix(A.mean(3).mean(2).unsqueeze(2).unsqueeze(1)).mean()    #.std(1).mean()*-1  #force various templates -- across batch variance
        errG = loss_adv + opt.fAdvM * loss_adv_mixed + opt.fContent * cLoss + opt.fContentM * cLoss2 + opt.fAlpha * alpha_loss + opt.fEntropy * entropy + opt.fTV * tv + \
               +0.02 * atv + opt.fDiversity * diversity
        errG.backward()
        optimizerU.step()

        print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f mixP %.4f content %4f template %.4f alphareg %.4f entropy %.4f tv %.4f atv %.4f diversity %.4f time %.4f'
              % (epoch, opt.niter, i, len(dataloader),
Example #2
0
        optimizerD.step()
        if i > 0 and opt.WGAN and i % opt.dIter != 0:
            continue  ##critic steps to 1 GEN steps

        for net in Gnets:
            net.zero_grad()

        content = next(iter(cdataloader))[0]
        content = content.to(device)
        # train with fake -- create again
        noise = setNoise(noise)
        fake = ganGeneration(content, noise)
        output = netD(fake)
        loss_adv = criterion(output, output.detach() * 0 + real_label)
        D_G_z2 = output.mean()
        cLoss = contentLoss(fake, content[:, :3], netR, opt)

        errG = loss_adv + opt.fContent * cLoss
        errG.backward()
        optimizerU.step()

        print(
            '[%d/%d][%d/%d] D(x): %.4f D(G(z)): %.4f / %.4f content %4f time %.4f'
            % (epoch, opt.niter, i, len(dataloader), D_x, D_G_z1, D_G_z2,
               cLoss.item(), time.time() - t0))

        buf += [[D_x.item(), D_G_z1.item(), D_G_z2.item(), cLoss.item()]]

        ### RUN INFERENCE AND SAVE LARGE OUTPUT MOSAICS
        if i == 1:
            a = np.array(buf)