def __init__(self, params):
     super(Attacker, self).__init__()
     self.params = params
     self.target_model = Classifier(
         (params.img_sz, params.img_sz, params.img_fm))
     self.adv_generator = AutoEncoder(params)
     self.attrib_gen = AttEncoderModule(0.)
class Attacker(nn.Module):
    """
    Defines a attack system which can be optimized.
    Input passes through a pretrained fader network for modification.
    Input -> (Fader network) -> (Target model) -> Classification label

    Since Fader network requires that the attribute vector elements (alpha_i) be converted to (alpha_i, 1-alpha_i),
    we use the Mod alpha class to handle this change while preserving gradients.
    """
    def __init__(self, params):
        super(Attacker, self).__init__()
        self.params = params
        self.target_model = Classifier(
            (params.img_sz, params.img_sz, params.img_fm))
        self.adv_generator = AutoEncoder(params)
        self.attrib_gen = AttEncoderModule(0.)

    def restore(self, legacy=False):
        self.target_model.load_state_dict(torch.load(self.params.model))
        if legacy:
            old_model_state_dict = torch.load(self.params.fader)
            old_model_state_dict.update(_LEGACY_STATE_DICT_PATCH)
            model_state_d = old_model_state_dict
        else:
            model_state_d = torch.load(self.params.fader)
        self.adv_generator.load_state_dict(model_state_d, strict=False)

    def forward(self, x, attrib_vector=None):
        self.attrib_vec = self.attrib_gen()
        l_z = self.adv_generator.encode(x)
        recon = self.adv_generator.decode(l_z, self.attrib_vec)[-1]
        cl_label = self.target_model(recon)
        return recon, cl_label
 def __init__(self, params):
     super(Attacker, self).__init__()
     self.params = params
     self.target_model = Classifier(
         (params.img_sz, params.img_sz, params.img_fm))
     self.adv_generator = AutoEncoder(params)
     self.eps = params.eps
     self.projection = params.proj_flag
     self.attrib_gen = AttEncoderModule(1.0, 1.0, 1.0, self.projection,
                                        self.eps)
class Attacker(nn.Module):
    """
    Defines a attack system which can be optimized.
    Input passes through a pretrained fader network for modification.
    Input -> (Fader network) -> (Target model) -> Classification label

    Since Fader network requires that the attribute vector elements (alpha_i) be converted to (alpha_i, 1-alpha_i),
    we use the Mod alpha class to handle this change while preserving gradients.
    """

    def __init__(self, params):
        super(Attacker, self).__init__()
        self.params = params
        self.target_model = Classifier(
            (params.img_sz, params.img_sz, params.img_fm))
        self.adv_generator_1 = AutoEncoder(params.f1_params)
        self.adv_generator_2 = AutoEncoder(params.f2_params)
        self.adv_generator_3 = AutoEncoder(params.f3_params)
        self.attrib_1 = AttEncoderModule(0.)
        self.attrib_2 = AttEncoderModule(0.)
        self.attrib_3 = AttEncoderModule(0.)

    def restore(self, legacy=False):
        self.target_model.load_state_dict(torch.load(self.params.model))
        model_state_d_1 = torch.load(self.params.fader1)
        model_state_d_2 = torch.load(self.params.fader2)
        model_state_d_3 = torch.load(self.params.fader3)
        self.adv_generator_1.load_state_dict(model_state_d_1, strict=False)
        self.adv_generator_2.load_state_dict(model_state_d_2, strict=False)
        self.adv_generator_3.load_state_dict(model_state_d_3, strict=False)
    
    def forward(self, x, attrib_vector=None):
        if attrib_vector is None:
            self.attrib_v1 = self.attrib_1()
            self.attrib_v2 = self.attrib_2()
            self.attrib_v3 = self.attrib_3()
        else:
            self.attrib_v1 = attrib_vector[:,:2]
            self.attrib_v2 = attrib_vector[:,2:4]
            self.attrib_v3 = attrib_vector[:,4:]
           # print(attrib_vector.size())
        l_z_1 = self.adv_generator_1.encode(x)
        recon_1 = self.adv_generator_1.decode(l_z_1, self.attrib_v1)[-1]
        l_z_2 = self.adv_generator_2.encode(recon_1)
        recon_2 = self.adv_generator_2.decode(l_z_2, self.attrib_v2)[-1]
        l_z_3 = self.adv_generator_3.encode(recon_2)
        recon_3 = self.adv_generator_3.decode(l_z_3, self.attrib_v3)[-1]
        cl_label = self.target_model(recon_3)
        return recon_3, cl_label
 def __init__(self, params, params_gen, input_logits):
     super(Attacker, self).__init__()
     self.params = params
     if self.params.dtype == 'celeba':
         self.sorted_attr = _SORTED_ATTR
     elif self.params.dtype == 'bdd':
         self.sorted_attr = _BDD_ATTR
     self.ctype = params.ctype
     if self.ctype == 'simple':
         self.target_model = Classifier(
             (params.img_sz, params.img_sz, params.img_fm))
     elif self.ctype == 'resnet':
         self.target_model = ResNet()
     else:
         raise Exception('Unknown classfiier type : {}'.format(self.ctype))
     self.adv_generator = Generator(params_gen.enc_dim, params_gen.enc_layers, params_gen.enc_norm, params_gen.enc_acti,
                                    params_gen.dec_dim, params_gen.dec_layers, params_gen.dec_norm, params_gen.dec_acti,
                                    params_gen.n_attrs, params_gen.shortcut_layers, params_gen.inject_layers, params_gen.img_size)
     self.eps = params.eps
     self.projection = params.proj_flag
     self.input_logits = torch.tensor(input_logits).requires_grad_(False)
     # print(self.input_logits)
     self.attrib_gen = AttEncoderModule(
         self.input_logits, params.attk_attribs, params_gen.thres_int, self.projection, self.eps, self.sorted_attr)
Exemplo n.º 6
0
from load_mnist import setup_data_loaders
from simple_classifier import Classifier, train_classifier


def transform(x):
    x = x.squeeze()
    batch = x.shape[0]
    x = x.reshape(batch, 784)
    return x


train_loader, test_loader = setup_data_loaders()
model = Classifier(784, 200)
train_classifier(model, train_loader, test_loader, transform=transform)
Exemplo n.º 7
0
        transforms.Resize((256, 256)),
        transforms.Lambda(lambda x:
                          (2.0 * np.asarray(x, dtype=np.float32) / 255.0 - 1)),
        transforms.ToTensor()
    ])
    ds = CelebA_Dataset(args.attrib_path,
                        args.data_dir,
                        test,
                        args.test_attribute,
                        transform=custom_transforms)
    return DataLoader(ds, batch_size=args.batch_size, shuffle=shuffle)


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

cl = Classifier(input_size=(256, 256, 3))

cl.load_state_dict(
    torch.load(
        "/data/work2/AdversarialFaderNetworks/new_class_model/best_model.pth"))

cl.to(device)

cl.eval()

test_loader = get_data_loader(args, 'test')

correct = 0
total = 0

with torch.no_grad():
Exemplo n.º 8
0
batch_size = 16
epochs = 10

# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = load_data()

x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

clf = Classifier(784, 10)  # 784 is the number of pixels in an image

clf.fit(x_train,
        y_train,
        batch_size=batch_size,
        epochs=epochs,
        validation_data=(x_test, y_test))

score = clf.evaluate(x_test, y_test)

print(score)
Exemplo n.º 9
0
import tensorflow as tf
import numpy as np
from simple_autoencoder import Autoencoder
from simple_classifier import Classifier
import DatasetLoader as loader

print(tf.__version__)
encodeLength = 8

# dataset
x, y, lengths, lengthMax = loader.loadDefault()

# encoding/embedding
encoder = Autoencoder(lengthMax, encodeLength)
# load autoencoder
encoder.restore()
# encoder.fit(tracesRaw, epochs=10)

tracesEnc = encoder.encode(x)
tracesDec = encoder.decode(tracesEnc)

encoder.terminate()
print(tracesEnc[0])
print(tracesDec[0])

print(x, y, lengthMax)

# classifier
classifier = Classifier(encodeLength)
classifier.fit(tracesEnc, y)