コード例 #1
0
    # ======================================
    # =               custom               =
    # ======================================

# ==============================================================================
# =                                   model                                    =
# ==============================================================================

# setup the normalization function for discriminator
if args.gradient_penalty_mode == 'none':
    d_norm = 'batch_norm'
else:  # cannot use batch normalization with gradient penalty
    d_norm = args.gradient_penalty_d_norm

# networks
G = module.ConvGenerator(args.z_dim, shape[-1],
                         n_upsamplings=n_G_upsamplings).to(device)
D = module.ConvDiscriminator(shape[-1],
                             n_downsamplings=n_D_downsamplings,
                             norm=d_norm).to(device)
print(G)
print(D)

# adversarial_loss_functions
d_loss_fn, g_loss_fn = gan.get_adversarial_losses_fn(
    args.adversarial_loss_mode)

# optimizer
G_optimizer = torch.optim.Adam(G.parameters(),
                               lr=args.lr,
                               betas=(args.beta_1, 0.999))
D_optimizer = torch.optim.Adam(D.parameters(),
コード例 #2
0
    # ======================================

# setup the normalization function for discriminator
if args.gradient_penalty_mode == 'none':
    d_norm = 'batch_norm'
if args.gradient_penalty_mode in ['dragan', 'wgan-gp']:  # cannot use batch normalization with gradient penalty
    # TODO(Lynn)
    # Layer normalization is more stable than instance normalization here,
    # but instance normalization works in other implementations.
    # Please tell me if you find out the cause.
    d_norm = 'layer_norm'

# networks
# Comment by K.C:
# the following commands set the structure of a G model
G = module.ConvGenerator(input_shape=(1, 1, args.z_dim), output_channels=shape[-1], n_upsamplings=n_G_upsamplings, name='G_%s' % args.dataset)
D = module.ConvDiscriminator(input_shape=shape, n_downsamplings=n_D_downsamplings, norm=d_norm, name='D_%s' % args.dataset)

py.mkdir('%s/summaries' %output_dir)
keras.utils.plot_model(G,'%s/summaries/convGenerator.png' % output_dir, show_shapes=True)
keras.utils.plot_model(D,'%s/summaries/convDiscriminator.png' % output_dir, show_shapes=True)
G.summary()
D.summary()

# adversarial_loss_functions
d_loss_fn, g_loss_fn = gan.get_adversarial_losses_fn(args.adversarial_loss_mode)

G_optimizer = keras.optimizers.Adam(learning_rate=args.lr, beta_1=args.beta_1)
D_optimizer = keras.optimizers.Adam(learning_rate=args.lr, beta_1=args.beta_1)

コード例 #3
0
print(args.experiment_names)
experiment_names = args.experiment_names

use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
torch.manual_seed(0)

if args.dataset in ['cifar10', 'fashion_mnist', 'mnist', 'imagenet']:  # 32x32
    output_channels = 3
    n_G_upsamplings = n_D_downsamplings = 3

for experiment in experiment_names:
    output_dir = py.join('output_new', 'output', experiment)

    G = module.ConvGenerator(args.z_dim,
                             output_channels,
                             n_upsamplings=n_G_upsamplings).to(device)

    # load checkpoint if exists
    ckpt_dir = py.join(output_dir, 'checkpoints', args.checkpoint_name)
    out_dir = py.join(output_dir, args.output_dir)
    py.mkdir(ckpt_dir)
    py.mkdir(out_dir)
    ckpt = torchlib.load_checkpoint(ckpt_dir)
    G.load_state_dict(ckpt['G'])

    for i in range(args.num_samples):
        z = torch.randn(args.batch_size, args.z_dim, 1, 1).to(device)
        x_fake = G(z).detach()
        x_fake = np.transpose(x_fake.data.cpu().numpy(), (0, 2, 3, 1))
        img = im.immerge(x_fake, n_rows=1).squeeze()