示例#1
0
    def adversarialTrainLoader(self, level=0):
        print(level * "   " + "Creating the adversarial data loader (according to the current mixture)...")
        attack = LINFPGD(self,
                         eps=self.config['eps'] / 255,
                         eps_iter=self.config['eps_iter'] / 255,
                         nb_iter=self.config['nb_iter'],
                         rand_init=self.config['rand_init'],
                         clip_min=self.config['clip_min'],
                         clip_max=self.config['clip_max'])

        # Training Data set
        if self.config['dataset'] == "cifar10":
            data_class = CIFAR10
        elif self.config['dataset'] == "cifar100":
            data_class = CIFAR100

        train_loader = DataLoader(
            data_class(
                root=self.config['dataroot'],
                train=True,
                download=True,
                transform=transforms.Compose([
                    transforms.ToTensor()
                ])
            ),
            batch_size=self.train_loader.batch_size,
            shuffle=True,
            num_workers=2,
            drop_last=True
        )

        batch_size = self.train_loader.batch_size

        for i in range(len(train_loader)):
            train_loader.dataset.data[np.arange(i * batch_size, (i + 1) * batch_size)] = attack.perturb(
                train_loader.dataset.data[np.arange(i * batch_size, (i + 1) * batch_size)].cuda()).cpu()
        # Testing Dataset
        test_loader = DataLoader(
            data_class(
                root=self.config['dataroot'],
                train=False,
                download=True,
                transform=transforms.Compose([
                    transforms.ToTensor()
                ])
            ),
            batch_size=self.test_loader.batch_size,
            shuffle=True,
            num_workers=2,
            drop_last=True
        )

        batch_size = self.test_loader.batch_size
        for i in range(len(test_loader)):
            test_loader.dataset.data[np.arange(i * batch_size, (i + 1) * batch_size)] = attack.perturb(
                test_loader.dataset.data[np.arange(i * batch_size, (i + 1) * batch_size)].cuda()).cpu()
        return train_loader, test_loader
示例#2
0
        self.fit(X, y, X_valid, y_valid)
        return self.predict(X)

    def predict_proba(self, X: np.ndarray):
        return self

    def predict(self, X):
        self.eval()
        with torch.no_grad():
            y = self(X)
        return y


if __name__ == '__main__':
    writer = SummaryWriter()
    loader = DataLoader()

    dfmnet = DFMNET(INPUT_DIM,
                    OUTPUT_DIM,
                    batch_size=BATCH_SIZE,
                    n_epochs=N_EPOCHS,
                    writer=None)

    train_x, train_y = loader.getStandardTrainDataSet()
    dfmnet.fit(train_x, train_y)

    for tag in loader.dataset_tags:
        print("Test: ", tag)
        x_data, y_data = loader.getStandardTestDataSet(tag)
        x_data_torch = torch.from_numpy(x_data).type(torch.float).to(DEVICE)
        pred = dfmnet.predict(x_data_torch)
if config['dataset'] == 'cifar10':
    custom_data_class = CIFAR10
    original_data_class = datasets.CIFAR10
    config['number_of_class'] = 10

elif config['dataset'] == 'cifar100':
    custom_data_class = CIFAR100
    original_data_class = datasets.CIFAR100
    config['number_of_class'] = 100

train_loader = DataLoader(custom_data_class(root=config['dataroot'],
                                            train=True,
                                            download=True,
                                            transform=transforms.Compose(
                                                [transforms.ToTensor()])),
                          batch_size=config['batch_size'],
                          shuffle=True,
                          num_workers=0,
                          drop_last=True)

test_loader = tDataLoader(original_data_class(root=config['dataroot'],
                                              train=False,
                                              transform=transforms.Compose(
                                                  [transforms.ToTensor()])),
                          batch_size=config['test_batch_size'],
                          shuffle=False,
                          num_workers=0,
                          drop_last=True)

# This classifier is used as a base for all task-specific classifiers