Beispiel #1
0
 def __init__(self, pgd_alpha: float = 1.0, pgd_epsilon: float = 8.0, pgd_iteration: int = 8,
              stop_conf: float = 0.9,
              magnet: bool = False, randomized_smooth: bool = False, curvature: bool = False, **kwargs):
     super().__init__(**kwargs)
     self.param_list['pgd'] = ['pgd_alpha', 'pgd_epsilon', 'pgd_iteration']
     self.pgd_alpha: float = pgd_alpha
     self.pgd_epsilon: float = pgd_epsilon
     self.pgd_iteration: int = pgd_iteration
     self.pgd = PGD_Optimizer(alpha=self.pgd_alpha / 255, epsilon=self.pgd_epsilon / 255,
                              iteration=self.pgd_iteration)
     self.stop_conf: float = stop_conf
     if magnet:
         self.magnet: MagNet = MagNet(dataset=self.dataset, pretrain=True)
     self.randomized_smooth: bool = randomized_smooth
     if curvature:
         self.curvature: Curvature = Curvature(model=self.model)
Beispiel #2
0
    def __init__(self, noise_dim: int, dim: int, data_shape: list[int] = [3, 32, 32],
                 generator_iters: int = 1000, critic_iter: int = 5):
        self.noise_dim = noise_dim
        self.G: Generator = Generator(noise_dim, dim, data_shape)
        self.D: Discriminator = Discriminator(dim, data_shape)
        if env['num_gpus']:
            self.G.cuda()
            self.D.cuda()
        # the parameter in the original paper
        self.d_optimizer = optim.RMSprop(self.D.parameters(), lr=5e-5)
        self.g_optimizer = optim.RMSprop(self.G.parameters(), lr=5e-5)
        self.generator_iters = generator_iters  # larger: 1000
        self.critic_iter = critic_iter
        self.mse_loss = torch.nn.MSELoss()

        self.gan_pgd: PGD_Optimizer = PGD_Optimizer(epsilon=1.0, iteration=500, output=0)