if args.b_distribution == 'none': att_b_ = (att_b * 2 - 1) * args.thres_int if args.b_distribution == 'uniform': att_b_ = (att_b * 2 - 1) * \ torch.rand_like(att_b) * \ (2 * args.thres_int) if args.b_distribution == 'truncated_normal': att_b_ = (att_b * 2 - 1) * \ (torch.fmod(torch.randn_like(att_b), 2) + 2) / 4.0 * \ (2 * args.thres_int) if (it + 1) % (args.n_d + 1) != 0: errD = attgan.trainD(img_a, att_a, att_a_, att_b, att_b_) add_scalar_dict(writer, errD, it + 1, 'D') else: errG = attgan.trainG(img_a, att_a, att_a_, att_b, att_b_) add_scalar_dict(writer, errG, it + 1, 'G') progressbar.say(epoch=epoch, iter=it + 1, d_loss=errD['d_loss'], g_loss=errG['g_loss']) if (it + 1) % args.save_interval == 0: # To save storage space, I only checkpoint the weights of G. # If you'd like to keep weights of G, D, optim_G, optim_D, # please use save() instead of saveG(). attgan.saveG( os.path.join('output', args.experiment_name, 'checkpoint', 'weights.{:d}.pth'.format(epoch))) # attgan.save(os.path.join( # 'output', args.experiment_name, 'checkpoint', 'weights.{:d}.pth'.format(epoch)
# 打乱顺序 idx = torch.randperm(len(label_org)) label_trg = label_org[idx].contiguous() c_org = label_org.clone() c_trg = label_trg.clone() c_org = c_org.cuda() if args.gpu else c_org c_trg = c_trg.cuda() if args.gpu else c_trg attr_diff = c_trg - c_org if (it + 1) % (args.n_d + 1) != 0: errD = attgan.trainD(img_real, label_org, attr_diff) add_scalar_dict(writer, errD, it + 1, 'D') else: errG = attgan.trainG(img_real, label_trg, attr_diff) add_scalar_dict(writer, errG, it + 1, 'G') progressbar.say(epoch=epoch, iter=it + 1, d_loss=errD['d_loss'], g_loss=errG['g_loss']) if (it + 1) % args.save_interval == 0: # To save storage space, I only checkpoint the weights of G. # If you'd like to keep weights of G, D, optim_G, optim_D, # please use save() instead of saveG(). attgan.save( os.path.join('output', args.experiment_name, 'checkpoint', 'weights.{:d}.pth'.format(epoch))) if (it + 1) % args.sample_interval == 0: