Пример #1
0
        logger.info("Using {} GPUs.".format(num_gpus))
        logger.info("Training on {}.\n".format(torch.cuda.get_device_name(0)))
        cudnn.benchmark = True
    device = torch.device(opt.device)

    # create the dataset for training
    dataset = make_dataset(opt.dataset)

    # init the network
    style_gan = StyleGAN(structure=opt.structure,
                         resolution=opt.dataset.resolution,
                         num_channels=opt.dataset.channels,
                         latent_size=opt.model.gen.latent_size,
                         g_args=opt.model.gen,
                         d_args=opt.model.dis,
                         g_opt_args=opt.model.g_optim,
                         d_opt_args=opt.model.d_optim,
                         loss=opt.loss,
                         drift=opt.drift,
                         d_repeats=opt.d_repeats,
                         use_ema=opt.use_ema,
                         ema_decay=opt.ema_decay,
                         device=device)

    # Resume training from checkpoints
    if args.generator_file is not None:
        logger.info("Loading generator from:", args.generator_file)
        style_gan.gen.load_state_dict(torch.load(args.generator_file))
    else:
        logger.info("Training from scratch...")

    if args.discriminator_file is not None:
Пример #2
0
    dataset = make_dataset(opt.dataset)

    print(opt.recon_loss)
    # init the network
    style_gan = StyleGAN(structure=opt.structure,
                         resolution=opt.dataset.resolution,
                         num_channels=opt.dataset.channels,
                         latent_size=opt.model.gen.latent_size,
                         update_encoder_as_discriminator=opt.update_encoder_as_discriminator,
                         use_sleep=opt.use_sleep,
                         use_vae=opt.use_vae,
                         use_adverserial=opt.use_adverserial,
                         g_args=opt.model.gen,
                         d_args=opt.model.dis,
                         e_args=opt.model.encoder,
                         g_opt_args=opt.model.g_optim,
                         d_opt_args=opt.model.d_optim,
                         e_opt_args=opt.model.e_optim,
                         loss=opt.loss,
                         recon_loss = opt.recon_loss,
                         drift=opt.drift,
                         d_repeats=opt.d_repeats,
                         use_ema=opt.use_ema,
                         ema_decay=opt.ema_decay,
                         noise_channel_dropout = opt.noise_channel_dropout,
                         betas = opt.betas,
                         device=device)

    # Resume training from checkpoints
    if args.generator_file is not None:
        logger.info("Loading generator from: %s", args.generator_file)