示例#1
0
def mcdropout_test(model, args, test_loader, stochastic_passes=100):
    """
    Carry out basic tests on the BCNN.

    :param model: A trained BCNN
    :type model: Torch Model
    :param args: Arguments object
    :param test_loader: Testing dataset
    :param stochastic_passes: Number of stochastic passes to maker per image
    :type stochastic_passes: int
    """
    with torch.no_grad():
        model.train()
        test_loss = 0
        correct = 0
        for data, target in tqdm(test_loader, desc='Bacthing Test Data'):
            if args.cuda:
                data, target = data.cuda(), target.cuda()
            data, target = Variable(data), Variable(target)
            output_list = []
            for i in range(stochastic_passes):
                output_list.append(torch.unsqueeze(model(data), 0))
            # Calculate the predic
            output_mean = torch.cat(output_list, 0).mean(0)
            test_loss += F.nll_loss(F.log_softmax(output_mean, 0), target, reduction="sum").item()  # sum up batch loss
            pred = output_mean.data.max(1, keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(target.data.view_as(pred)).cpu().sum()

        test_loss /= len(test_loader.dataset)
        uf.box_print('MC Dropout Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))
示例#2
0
def test(model, args, test_loader):
    """
    Test a CNN performance

    :param model: A trained BCNN
    :type model: Torch Model
    :param args: Arguments object
    :param test_loader: Testing dataset
    """
    with torch.no_grad():
        model.eval()
        test_loss = 0
        correct = 0
        # Data and target are a single pair of images and labels.
        for data, target in tqdm(test_loader, desc='Batching Test Data'):
            if args.cuda:
                data, target = data.cuda(), target.cuda()
            pred, tloss = make_prediction(data, target)
            test_loss += tloss
            correct += pred.eq(target.data.view_as(pred)).cpu().sum()

        test_loss /= len(test_loader.dataset)
        uf.box_print(
            'Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
                test_loss, correct, len(test_loader.dataset),
                100. * correct / len(test_loader.dataset)))
示例#3
0
def fgsm_test(model, adversary, args, test_loader):
    """
    Evaluate a standard neural network's performance when the images being evaluated have adversarial attacks inflicted upon them.

    :param model: A trained CNN
    :type model: Torch Model
    :param adversary: An adversarial object for which attacks can be crafted
    :param args: Arguments object
    :param test_loader: Testing dataset
    """
    model.eval()
    test_loss = 0
    correct = 0
    # Data and target are a single pair of images and labels.
    for data, target in tqdm(test_loader, desc='Batching Test Data'):
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data = adversary.fgsm(data, target)
        data, target = Variable(data), Variable(target)
        output = model(data)
        test_loss += F.nll_loss(F.log_softmax(output, 0), target, reduction="sum").item()  # sum up batch loss
        pred = output.data.max(1, keepdim=True)[1]  # get the index of the max log-probability
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    uf.box_print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
 def __init__(self, model, epsilon, limits=(-1, 1)):
     self.net = model
     self.eps = 0.9
     self.lim = limits
     self.cost = nn.CrossEntropyLoss()
     self.counter = 0
     uf.box_print('Creating Adversaries with Epsilon = {}'.format(self.eps))
示例#5
0
def test(model, args, test_loader):
    """
    Test a CNN performance

    :param model: A trained BCNN
    :type model: Torch Model
    :param args: Arguments object
    :param test_loader: Testing dataset
    """
    with torch.no_grad():
        model.eval()
        test_loss = 0
        correct = 0
        # Data and target are a single pair of images and labels.
        for data, target in tqdm(test_loader, desc='Batching Test Data'):
            if args.cuda:
                data, target = data.cuda(), target.cuda()
            data, target = Variable(data), Variable(target)
            output = model(data)
            test_loss += F.nll_loss(F.log_softmax(output, 0), target, reduction="sum").item()  # sum up batch loss
            pred = output.data.max(1, keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(target.data.view_as(pred)).cpu().sum()

        test_loss /= len(test_loader.dataset)
        uf.box_print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))
 def fit(self, X, y):
     # One hot enconde label
     enc = LabelEncoder()
     enc.fit(y)
     enc_out = enc.transform(y)
     out = np_utils.to_categorical(enc_out)
     uf.box_print('Fitting KNN with K={}'.format(self.k))
     self.model.fit(X, out)
示例#7
0
    def __init__(self, model, epsilon, limits=(-1, 1)):
        """
        Initial parameters for the adversary.

        :param model: The neural network being attacks
        :type model: PyTorch Model
        :param epsilon: The magnitude for which the image should be perturbed
        :type epsilon: float
        :param limits: The l-infinity bound for perturbations
        :type limits: 2-tuple
        """
        self.net = model
        self.eps = epsilon
        self.lim = limits
        self.cost = nn.CrossEntropyLoss()
        self.counter = 0
        uf.box_print('Creating Adversaries with Epsilon = {}'.format(self.eps))
示例#8
0
def fgsm_test_mc(model, adversary, args, test_loader, epsilon=1.0):
    uf.box_print('Calcualting MC-Dropout Values for Adversarial Images')
    model.train()
    passes = 100
    results = []
    for data, target in tqdm(test_loader, desc='Batching Test Data'):
        adv = 'No Adversary'
        rand = np.random.rand()
        if rand < epsilon:
            data = adversary.fgsm(data, target)
            adv = 'Adversary'
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        output_list = []
        for i in range(passes):
            output_list.append(torch.unsqueeze(F.softmax(model(data)), 0))
        output_mean = torch.cat(output_list, 0).mean(0)
        output_var = torch.cat(output_list, 0).var(0).mean().item()
        confidence = output_mean.data.cpu().numpy().max()
        predict = output_mean.data.cpu().numpy().argmax()
        results.append([predict, confidence, target.item(), adv])
    results_df = pd.DataFrame(results, columns=['prediction', 'confidence', 'truth', 'adv_status'])
    results_df.to_csv('results/fgsm_{}_bnn.csv'.format(epsilon), index=False)
示例#9
0
        model_standard.cuda()
        model_dropout.cuda()

    if not os.path.exists('checkpoint'):
        os.makedirs('checkpoint')

    # Train both models
    if args.mode == 0:
        optimizer_standard = optim.SGD(model_standard.parameters(),
                                       lr=args.lr,
                                       momentum=args.momentum)
        optimizer_dropout = optim.SGD(model_dropout.parameters(),
                                      lr=args.lr,
                                      momentum=args.momentum)

        uf.box_print('Train standard Lenet')
        start = time.time()
        for epoch in range(1, args.epochs + 1):
            train(model_standard, optimizer_standard, epoch, args,
                  train_loader)
        end = time.time() - start
        uf.box_print('Training Time for Standard Model: {}'.format(end))
        test(model_standard, args, test_loader)

        uf.box_print('Train Lenet with dropout at all layer')
        start = time.time()
        for epoch in range(1, args.epochs + 1):
            train(model_dropout, optimizer_dropout, epoch, args, train_loader)
        end = time.time() - start
        uf.box_print('BCNN Training Time: {}'.format(end))
        mcdropout_test(model_dropout, args, test_loader)
示例#10
0
def main():
    args = build_parser()
    kwargs = action_args(args)
    # Setup GPU if necessary
    torch.backends.cudnn.benchmark, dtype = uf.gpu_setup(args.cuda)
    torch.set_default_tensor_type(dtype)

    train_loader, test_loader = load_data(args, kwargs)
    model_standard = LeNetStandard()
    model_dropout = LeNetDropout()
    if args.cuda:
        model_standard.cuda()
        model_dropout.cuda()

    if not os.path.exists('checkpoint'):
        os.makedirs('checkpoint')

    # Train both models
    if args.mode == 0:
        optimizer_standard = optim.SGD(model_standard.parameters(), lr=args.lr, momentum=args.momentum)
        optimizer_dropout = optim.SGD(model_dropout.parameters(), lr=args.lr, momentum=args.momentum)

        uf.box_print('Train standard Lenet')
        start = time.time()
        for epoch in range(1, args.epochs + 1):
            train(model_standard, optimizer_standard, epoch, args, train_loader)
        end = time.time()-start
        uf.box_print('Training Time for Standard Model: {}'.format(end))
        test(model_standard, args, test_loader)

        uf.box_print('Train Lenet with dropout at all layer')
        start = time.time()
        for epoch in range(1, args.epochs + 1):
            train(model_dropout, optimizer_dropout, epoch, args, train_loader)
        end = time.time()-start
        uf.box_print('BCNN Training Time: {}'.format(end))
        mcdropout_test(model_dropout, args, test_loader)

        uf.box_print('Save checkpoint/'+'LeNet_stadard'+str(epoch)+'.pth.tar')
        state = {'state_dict': model_standard.state_dict()}
        filename = 'src/vision/checkpoint/'+'LeNet_stadard'+str(epoch)+'.pth.tar'
        torch.save(state, filename)

        uf.box_print('Save checkpoint/'+'LeNet_dropout'+str(epoch)+'.pth.tar')
        state = {'state_dict': model_dropout.state_dict()}
        filename = 'src/vision/checkpoint/'+'LeNet_dropout'+str(epoch)+'.pth.tar'
        torch.save(state, filename)

    # Test models on clean MNIST dataset
    elif args.mode == 1:
        ckpt_standard = torch.load('src/vision/checkpoint/LeNet_stadard5.pth.tar')
        model_standard.load_state_dict(ckpt_standard['state_dict'])
        test(model_standard, args, test_loader)

        ckpt_dropout = torch.load('src/vision/checkpoint/LeNet_dropout5.pth.tar')
        model_dropout.load_state_dict(ckpt_dropout['state_dict'])
        mcdropout_test(model_dropout, args, test_loader)

    # Test uncertainty on MNIST images rotated through 180 degrees
    elif args.mode == 2:
        ckpt_dropout = torch.load('src/vision/checkpoint/LeNet_dropout5.pth.tar')
        model_dropout.load_state_dict(ckpt_dropout['state_dict'])
        uncertainty_test(model_dropout, args, test_loader)

    # Test models on adversarial images
    elif args.mode == 3:
        ckpt_standard = torch.load('src/vision/checkpoint/LeNet_stadard5.pth.tar')
        model_standard.load_state_dict(ckpt_standard['state_dict'])
        adv = Adversary(model_standard, args.fgsmeps)
        fgsm_test(model_standard, adv, args, test_loader)
        print('Total Fooled: {}'.format(adv.counter))

    elif args.mode == 4:
        ckpt_dropout = torch.load('src/vision/checkpoint/LeNet_dropout5.pth.tar')
        model_dropout.load_state_dict(ckpt_dropout['state_dict'])
        adv = Adversary(model_dropout, args.fgsmeps)
        fgsm_test_mc(model_dropout, adv, args, test_loader, epsilon=0.5)

    elif args.mode == 5:
        X_train, y_train, X_test, y_test = load_numpy()
        knn_model =  KNN(10)
        knn_model.fit(X_train, y_train)
        acc = knn_model.predict(X_test, y_test)
        print('KNN Accuracy: {}'.format(acc))

    else:
        print('--mode argument is invalid \ntrain mode (0) or test mode (1) uncertainty test mode (2)')
def fgsm_test_mc(model,
                 adversary,
                 args,
                 test_loader,
                 epsilon=1.0,
                 model_name='bcnn',
                 data_name='MNIST'):
    """
    Test a BCNN against adversaries. Through the epsilon parameter here (not to be confused with the epsilon parameter used in FGSM), the proportion of images perturbed can be tested so as to compare uncertainty values calculated from original and perturbed images.

    :param model: A BCNN
    :type model: Torch Model
    :param adversary: Adversary object capable of perturbing images
    :type adversary: Adversary Object
    :param args: User defined arguments to control testing parameters
    :type args: Argparser object
    :param test_loader: Set of test data to be experimented upon
    :type test_loader: Torch DataLoader
    :param epsilon: Value between 0 and 1 to control the proportion of images perturbed. Epsilon = 1 implies that every image is perturbed.
    :type epsilon: float
    """
    uf.box_print('Calculating MC-Dropout Values for Adversarial Images')
    model.train()
    passes = 100
    results = []
    for i, (data,
            target) in enumerate(tqdm(test_loader, desc='Batching Test Data')):
        if epsilon >= 1.0:
            orig_pred, orig_conf = mc_make_prediction(data, target, model,
                                                      passes)

            # Perturb image
            data = adversary.fgsm(data, target, i)

            # Make prediction on perturbed image
            adv_pred, adv_conf = mc_make_prediction(data, target, model,
                                                    passes)
            results.append([
                int(orig_pred), orig_conf,
                int(adv_pred), adv_conf,
                target.item()
            ])
            results_df = pd.DataFrame(results,
                                      columns=[
                                          'Original Prediction',
                                          'Original Confidence',
                                          'Adversary Prediction',
                                          'Adversary Confidence', 'Truth'
                                      ])
            results_df.epsilon = adversary.eps
            results_df.to_csv(
                'results/experiment3/{}/{}_fgsm_{}_{}.csv'.format(
                    model_name, model_name, adversary.eps, data_name))
        else:
            adv = 'No Adversary'
            rand = np.random.rand()
            if rand < epsilon:
                data = adversary.fgsm(data, target)
                adv = 'Adversary'
            if args.cuda:
                data, target = data.cuda(), target.cuda()
            data, target = Variable(data), Variable(target)
            output_list = []
            for i in range(passes):
                output_list.append(torch.unsqueeze(F.softmax(model(data)), 0))
            output_mean = torch.cat(output_list, 0).mean(0)
            output_var = torch.cat(output_list, 0).var(0).mean().item()
            confidence = output_mean.data.cpu().numpy().max()
            predict = output_mean.data.cpu().numpy().argmax()
            results.append(
                [predict.item(),
                 confidence.item(),
                 target.item(), adv])
            results_df = pd.DataFrame(
                results,
                columns=['prediction', 'confidence', 'truth', 'adv_status'])
            results_df.to_csv('results/fgsm_{}_bnn.csv'.format(epsilon),
                              index=False)
示例#12
0
        pred = np.argmax(model.predict(data))
        sto_results.append(pred)
    mean_res = np.round(np.mean(sto_results), 0).astype(int)
    var_res = np.var(sto_results)
    return mean_res, var_res


if __name__ == '__main__':
    args = build_parser()

    # Load Data
    train, test = get_data(small=args.small)
    test_sample, _ = train.next()
    data_min, data_max = np.amax(test_sample), np.amin(test_sample)
    if args.train:
        uf.box_print('Training Model')

        # Compile model
        model = lenet_all()
        model.compile(Adam(lr=0.001),
                      loss="categorical_crossentropy",
                      metrics=["accuracy"])

        # Define Callbacks
        tbCallBack = TensorBoard(log_dir='./Graph',
                                 histogram_freq=0,
                                 write_graph=True,
                                 write_images=True)
        callbacks = [
            EarlyStopping(monitor='val_loss', patience=3),
            ModelCheckpoint(
示例#13
0

if __name__ == '__main__':
    args = build_parser()

    # Load Data
    xray_data = KDataset()
    xray_data.load_data('data/chest_xray/')

    # Initialise model
    bcnn = KerasBCNN(xray_data)
    bcnn.build_model(0.5)

    # train, test = get_data()
    if args.train:
        uf.box_print('Training Model')

        # Compile model
        bcnn.compile_model(Adam(lr=0.001),
                           loss='categorical_crossentropy',
                           metric=['accuracy'])
        bcnn.train_model(50)

    else:
        uf.box_print('Loading Weights')
        bcnn.load_model('src/vision/checkpoint/best_model.h5')

    if args.adversary:
        uf.box_print('Crafting {} Adversaries with epsilon = {}'.format(
            args.observations, args.epsilon))
        # Retrieve the tensorflow session