Ejemplo n.º 1
0
    def __init__(self, cfg):
        super(Preprocessor, self).__init__()

        # load generator
        self.gan = get_generator(cfg, cfg.generator)
        self.z_dim = cfg.generator.args.z_dim
        state_dict = torch.load(cfg.generator.ckpt)
        self.gan.load_state_dict(state_dict['netG'])
        for p in self.gan.parameters():
            p.requires_grad_(False)

        # load encoder
        self.encoder = get_encoder(cfg, cfg.encoder)
        state_dict = torch.load(cfg.encoder.ckpt)
        self.encoder.load_state_dict(state_dict['netE'])
        for p in self.encoder.parameters():
            p.requires_grad_(False)

        # load transformation
        transform = get_transform(cfg)
        self.gan = nn.Sequential(self.gan, transform.gan_deprocess_layer)

        defense_cfg = cfg.defense.args
        self.optimize = defense_cfg.optimize
        self.return_latents = True
        if self.optimize:
            self.optimizer_fn = get_optimizer(defense_cfg.optimizer)
            self.scheduler_fn = get_scheduler(defense_cfg.scheduler)
            self.rec_rr = defense_cfg.rec_rr
            self.rec_iters = defense_cfg.rec_iters

        self.noisy_input = defense_cfg.get('noisy_input', False)
        if self.noisy_input:
            print('[+] Set noisy input True')
    def __init__(self, cfg):
        super(Preprocessor, self).__init__()

        # load generator
        self.gan = get_generator(cfg, cfg.generator)
        self.z_dim = cfg.generator.args.z_dim
        state_dict = torch.load(cfg.generator.ckpt)
        self.gan.load_state_dict(state_dict['netG'])
        for p in self.gan.parameters():
            p.requires_grad_(False)

        # load transformation
        transform = get_transform(cfg)
        self.gan = nn.Sequential(self.gan, transform.gan_deprocess_layer)

        defense_cfg = cfg.defense.args
        self.optimizer_fn = get_optimizer(defense_cfg.optimizer)
        self.scheduler_fn = get_scheduler(defense_cfg.scheduler)
        self.rec_rr = defense_cfg.rec_rr
        self.rec_iters = defense_cfg.rec_iters
        self.w_lpips = defense_cfg.w_lpips
        self.return_latents = True
    def __init__(self, cfg):
        super(Preprocessor, self).__init__()
        self.batch_size = cfg.dataset.test.batch_size
        gan_path = '/vulcanscratch/cplau/Code/Python/InvGAN-PyTorch-master/stylegan_old/pretrain/stylegan_imagenet.pth'
        # load generator
        gen = StyleGANGeneratorModel()
        state_dict = torch.load(gan_path)
        var_name = 'truncation.truncation'
        state_dict[var_name] = gen.state_dict()[var_name]
        gen.load_state_dict(state_dict)

        for p in gen.parameters():
            p.requires_grad_(False)

        gen.cuda()
        gen.eval()
        self.gan = gen

        with torch.no_grad():
            self.noise_sample = torch.randn(10000, 512).cuda()
            self.latent_out = self.gan.mapping(self.noise_sample)
            self.latent_mean = self.latent_out.mean(0)
            self.latent_std = ((self.latent_out - self.latent_mean).pow(2).sum() / 10000) ** 0.5
            print('Finished estimating w statistics ..')

        # load transformation
        transform = get_transform(cfg)
        # self.gan = nn.Sequential(self.gan, transform.gan_deprocess_layer)

        defense_cfg = cfg.defense.args
        self.optimizer_fn = get_optimizer(defense_cfg.optimizer)
        self.scheduler_fn = get_scheduler(defense_cfg.scheduler)
        self.rec_rr = defense_cfg.rec_rr
        self.rec_iters = defense_cfg.rec_iters
        self.w_lpips = defense_cfg.w_lpips
        self.return_latents = True
# set models
netG = get_generator(cfg, cfg.generator)
netD = get_discriminator(cfg, cfg.discriminator)
netG.cuda(), netD.cuda()
netG.train(), netD.train()
print(netG)
print(netD)

# set optimizers
optG = get_optimizer(cfg.optimizer.generator)(params=netG.parameters())
optD = get_optimizer(cfg.optimizer.discriminator)(params=netD.parameters())

# set dataset, dataloader
dataset = get_dataset(cfg)
transform = get_transform(cfg)
trainset = dataset(root=dataset_cfg.path,
                   train=True,
                   transform=transform.gan_training)
trainloader = DataLoader(trainset,
                         batch_size=dataset_cfg.batch_size,
                         num_workers=0,
                         shuffle=True)

# set gan loss
gan_loss = GANLoss(gan_mode=train_cfg.loss_type)

# training, visualizing, saving
iters = 0
for epoch in range(cfg.num_epochs):
    for i, data in enumerate(trainloader):