if it_d % 2000:
            args.k = max(int(args.k*args.v), args.min_k)

        # sample
        if it_g % 1000 == 0:
            x_fake = sample(z)
            img = make_grid(x_fake, normalize=True)
            writer.add_image("samples/generated", img, ep)
            x_fake = np.transpose(x_fake.data.cpu().numpy(), (0, 2, 3, 1))
            img = im.immerge(x_fake, n_rows=10).squeeze()
            im.imwrite(img, py.join(sample_dir, 'iter-%09d.jpg' % it_g))
    if args.save == True:
        if ep % 10 == 0:
            torchlib.save_checkpoint({'ep': ep, 'it_d': it_d, 'it_g': it_g,
                                    'D': D.state_dict(),
                                    'G': G.state_dict(),
                                    'D_optimizer': D_optimizer.state_dict(),
                                    'G_optimizer': G_optimizer.state_dict()},
                                    py.join(ckpt_dir, 'Epoch_inter(%d).ckpt' % ep))

        # save checkpoint
        torchlib.save_checkpoint({'ep': ep, 'it_d': it_d, 'it_g': it_g,
                                'D': D.state_dict(),
                                'G': G.state_dict(),
                                'D_optimizer': D_optimizer.state_dict(),
                                'G_optimizer': G_optimizer.state_dict()},
                                py.join(ckpt_dir, 'Last.ckpt'),)

# Printing the elapsed training times
# for key, value in elapsed_time_dict.items():
#     if len(value)>0:
#         print('Avg. time for: ',key ,' ::', sum(value) / len(value)  )
                              global_step=it_d)

        if it_d % args.n_d == 0:
            G_loss_dict = train_G()
            it_g += 1
            for k, v in G_loss_dict.items():
                writer.add_scalar('G/%s' % k,
                                  v.data.cpu().numpy(),
                                  global_step=it_g)

        # sample
        if it_g % 100 == 0:
            x_fake = sample(z)
            x_fake = np.transpose(x_fake.data.cpu().numpy(), (0, 2, 3, 1))
            img = im.immerge(x_fake, n_rows=10).squeeze()
            im.imwrite(img, py.join(sample_dir, 'iter-%09d.jpg' % it_g))

    # save checkpoint
    torchlib.save_checkpoint(
        {
            'ep': ep,
            'it_d': it_d,
            'it_g': it_g,
            'D': D.state_dict(),
            'G': G.state_dict(),
            'D_optimizer': D_optimizer.state_dict(),
            'G_optimizer': G_optimizer.state_dict()
        },
        py.join(ckpt_dir, 'Epoch_(%d).ckpt' % ep),
        max_keep=1)
Пример #3
0
                              g_gan_loss.data.cpu().numpy(),
                              global_step=step)

        # display
        if step % 1 == 0:
            print("Epoch: (%3d) (%5d/%5d)" % (ep, i + 1, len(train_loader)))

        # sample
        if step % 100 == 0:
            G.eval()
            x_f_sample = (G(z_sample, c_sample) + 1) / 2.0

            save_dir = './output/%s/sample_training' % experiment_name
            pylib.mkdir(save_dir)
            torchvision.utils.save_image(
                x_f_sample,
                '%s/Epoch_(%d)_(%dof%d).jpg' %
                (save_dir, ep, i + 1, len(train_loader)),
                nrow=10)

    torchlib.save_checkpoint(
        {
            'epoch': ep + 1,
            'D': D.state_dict(),
            'G': G.state_dict(),
            'd_optimizer': d_optimizer.state_dict(),
            'g_optimizer': g_optimizer.state_dict()
        },
        '%s/Epoch_(%d).ckpt' % (ckpt_dir, ep + 1),
        max_keep=2)