Example #1
0
File: spm.py Project: CS534/SVM
        code_level_0 = 0.25 * np.asarray(code[0]).flatten()
        code_level_1 = 0.25 * np.asarray(code[1:5]).flatten()
        code_level_2 = 0.5 * np.asarray(code[5:]).flatten()
        return np.concatenate((code_level_0, code_level_1, code_level_2))


VOC_SIZE = 100
PYRAMID_LEVEL = 1

DSIFT_STEP_SIZE = 4
# DSIFT_STEP_SIZE is related to the function
# extract_DenseSift_descriptors in utils.py
# and build_spatial_pyramid in spm.py

if __name__ == '__main__':
    x_train, y_train = load_cifar10_data(dataset='train')
    x_test, y_test = load_cifar10_data(dataset='test')

    print("Dense SIFT feature extraction")
    x_train_feature = [extract_DenseSift_descriptors(img) for img in x_train]
    x_test_feature = [extract_DenseSift_descriptors(img) for img in x_test]
    x_train_kp, x_train_des = zip(*x_train_feature)
    x_test_kp, x_test_des = zip(*x_test_feature)

    print("Train/Test split: {:d}/{:d}".format(len(y_train), len(y_test)))
    print("Codebook Size: {:d}".format(VOC_SIZE))
    print("Pyramid level: {:d}".format(PYRAMID_LEVEL))
    print("Building the codebook, it will take some time")
    codebook = build_codebook(x_train_des, VOC_SIZE)
    # import cPickle
    #
Example #2
0
from mlp_hn_clf import HighwayClassifier
import numpy as np
import tensorflow as tf
import utils

if __name__ == '__main__':
    (X_train, y_train), (X_test, y_test) = utils.load_cifar10_data()

    X_train = (X_train / 255.0).mean(axis=3).reshape(-1, 32 * 32)
    X_test = (X_test / 255.0).mean(axis=3).reshape(-1, 32 * 32)

    y_train = y_train.ravel()
    y_test = y_test.ravel()

    clf = HighwayClassifier(32 * 32, 10, 20)
    log = clf.fit(X_train,
                  y_train,
                  n_epoch=30,
                  en_exp_decay=False,
                  val_data=(X_test, y_test))
    pred = clf.predict(X_test)

    final_acc = (pred == y_test).mean()
    print("final testing accuracy: %.4f" % final_acc)
Example #3
0
def attack_bayes():
    """
    Perform BayesOpt attack
    """

    # get dataset and list of indices of images to attack
    if args.dset == 'mnist':
        test_dataset = load_mnist_data()
        samples = np.arange(args.start, args.start + args.num_attacks)

    elif args.dset == 'cifar10':
        test_dataset = load_cifar10_data()
        samples = np.arange(args.start, args.start + args.num_attacks)

    elif args.dset == 'imagenet':
        if args.arch == 'inception_v3':
            test_dataset = load_imagenet_data(299, 299)
        else:
            test_dataset = load_imagenet_data()

        # see readme
        samples = np.load('random_indices_imagenet.npy')
        samples = samples[args.start:args.start + args.num_attacks]

    print("Length of sample_set: ", len(samples))
    results_dict = {}
    # loop over images, attacking each one if it is initially correctly classified
    for idx in samples[:args.num_attacks]:
        image, label = test_dataset[idx]
        image = image.unsqueeze(0).to(device)
        print(f"Image {idx:d}   Original label: {label:d}")
        predicted_label = torch.argmax(cnn_model.predict_scores(image))
        print("Predicted label: ", predicted_label.item())

        # ignore incorrectly classified images
        if label == predicted_label:
            # itr, success = bayes_opt(image, label)
            retry = 0
            while retry < 5:
                try:
                    itr, success = bayes_opt(image, label)
                    break
                except:
                    print('Retry_{}'.format(retry))
                    retry += 1
            print(itr, success)
            if success:
                results_dict[idx] = itr
            else:
                results_dict[idx] = 0

        sys.stdout.flush()

    # results saved as dictionary, with entries of the form
    # dataset idx : 0 if unsuccessfully attacked, # of queries if successfully attacked
    print('RESULTS', results_dict)
    if args.save:
        pickle.dump(
            results_dict,
            open(
                f"{args.dset:s}{args.arch:s}_{args.start:d}_{args.iter:d}_{args.dim:d}_{args.eps:.2f}_{args.num_attacks:d}.pkl",
                'wb'))
Example #4
0
from utils import extract_sift_descriptors
from utils import build_codebook
from utils import input_vector_encoder
from classifier import svm_classifier

import numpy as np


VOC_SIZE = 100



if __name__ == '__main__':

    # Training
    x_train, y_train = load_cifar10_data(dataset='train')
    x_test, y_test = load_cifar10_data(dataset='test')

    print "SIFT feature extraction"
    x_train = [extract_sift_descriptors(img) for img in x_train]
    x_test = [extract_sift_descriptors(img) for img in x_test]

    # Remove None in SIFT extraction
    x_train = [each for each in zip(x_train, y_train) if each[0] != None]
    x_train, y_train = zip(*x_train)
    x_test = [each for each in zip(x_test, y_test) if each[0] != None]
    x_test, y_test = zip(*x_test)

    print "Train/Test split: {:d}/{:d}".format(len(y_train), len(y_test))
    print "Codebook Size: {:d}".format(VOC_SIZE)