def __init__(self, params): super(Attacker, self).__init__() self.params = params self.target_model = Classifier( (params.img_sz, params.img_sz, params.img_fm)) self.adv_generator = AutoEncoder(params) self.attrib_gen = AttEncoderModule(0.)
class Attacker(nn.Module): """ Defines a attack system which can be optimized. Input passes through a pretrained fader network for modification. Input -> (Fader network) -> (Target model) -> Classification label Since Fader network requires that the attribute vector elements (alpha_i) be converted to (alpha_i, 1-alpha_i), we use the Mod alpha class to handle this change while preserving gradients. """ def __init__(self, params): super(Attacker, self).__init__() self.params = params self.target_model = Classifier( (params.img_sz, params.img_sz, params.img_fm)) self.adv_generator = AutoEncoder(params) self.attrib_gen = AttEncoderModule(0.) def restore(self, legacy=False): self.target_model.load_state_dict(torch.load(self.params.model)) if legacy: old_model_state_dict = torch.load(self.params.fader) old_model_state_dict.update(_LEGACY_STATE_DICT_PATCH) model_state_d = old_model_state_dict else: model_state_d = torch.load(self.params.fader) self.adv_generator.load_state_dict(model_state_d, strict=False) def forward(self, x, attrib_vector=None): self.attrib_vec = self.attrib_gen() l_z = self.adv_generator.encode(x) recon = self.adv_generator.decode(l_z, self.attrib_vec)[-1] cl_label = self.target_model(recon) return recon, cl_label
def __init__(self, params): super(Attacker, self).__init__() self.params = params self.target_model = Classifier( (params.img_sz, params.img_sz, params.img_fm)) self.adv_generator = AutoEncoder(params) self.eps = params.eps self.projection = params.proj_flag self.attrib_gen = AttEncoderModule(1.0, 1.0, 1.0, self.projection, self.eps)
class Attacker(nn.Module): """ Defines a attack system which can be optimized. Input passes through a pretrained fader network for modification. Input -> (Fader network) -> (Target model) -> Classification label Since Fader network requires that the attribute vector elements (alpha_i) be converted to (alpha_i, 1-alpha_i), we use the Mod alpha class to handle this change while preserving gradients. """ def __init__(self, params): super(Attacker, self).__init__() self.params = params self.target_model = Classifier( (params.img_sz, params.img_sz, params.img_fm)) self.adv_generator_1 = AutoEncoder(params.f1_params) self.adv_generator_2 = AutoEncoder(params.f2_params) self.adv_generator_3 = AutoEncoder(params.f3_params) self.attrib_1 = AttEncoderModule(0.) self.attrib_2 = AttEncoderModule(0.) self.attrib_3 = AttEncoderModule(0.) def restore(self, legacy=False): self.target_model.load_state_dict(torch.load(self.params.model)) model_state_d_1 = torch.load(self.params.fader1) model_state_d_2 = torch.load(self.params.fader2) model_state_d_3 = torch.load(self.params.fader3) self.adv_generator_1.load_state_dict(model_state_d_1, strict=False) self.adv_generator_2.load_state_dict(model_state_d_2, strict=False) self.adv_generator_3.load_state_dict(model_state_d_3, strict=False) def forward(self, x, attrib_vector=None): if attrib_vector is None: self.attrib_v1 = self.attrib_1() self.attrib_v2 = self.attrib_2() self.attrib_v3 = self.attrib_3() else: self.attrib_v1 = attrib_vector[:,:2] self.attrib_v2 = attrib_vector[:,2:4] self.attrib_v3 = attrib_vector[:,4:] # print(attrib_vector.size()) l_z_1 = self.adv_generator_1.encode(x) recon_1 = self.adv_generator_1.decode(l_z_1, self.attrib_v1)[-1] l_z_2 = self.adv_generator_2.encode(recon_1) recon_2 = self.adv_generator_2.decode(l_z_2, self.attrib_v2)[-1] l_z_3 = self.adv_generator_3.encode(recon_2) recon_3 = self.adv_generator_3.decode(l_z_3, self.attrib_v3)[-1] cl_label = self.target_model(recon_3) return recon_3, cl_label