Example #1
0
EPOCHS = 10
FGSM_PARAMS = {'eps': 0.05,
               'clip_min': 0.,
               'clip_max': 1.,
               }

# initialize keras/tf session
sess = tf.Session(graph=tf.get_default_graph())
K.set_session(sess)

# get dataset
(train_images, train_labels), (test_images, test_labels) = get_scaled_fashion_mnist()

# load models
model_1 = get_vanilla_NN()
model_2 = get_QNN(32)

# train models
model_1.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
model_2.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)

# evaluate models on the test set
_, test_acc_1 = model_1.evaluate(test_images, test_labels, verbose=0)
_, test_acc_2 = model_2.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy NN_1: " + str(test_acc_1))
print("Test accuracy NN_2: " + str(test_acc_2))

# filter samples correctly classified by both models
test_images, test_labels = filter_correctly_classified_samples(test_images, test_labels, [model_1, model_2])
print("From now on using " + str(test_images.shape[0]) + " samples that are correctly classified by both networks.")
Example #2
0
    'abort_early': True,  # if we stop improving, abort gradient descent early
    'learning_rate':
    1e-2,  # larger values converge faster to less accurate results
    'initial_const': 1e-3,  # the initial constant c to pick as a first guess
}

# initialize keras/tf session
sess = tf.Session(graph=tf.get_default_graph())
K.set_session(sess)

# get dataset
(train_images, train_labels), (test_images, test_labels) = get_fashion_mnist()
#(train_images, train_labels), (test_images, test_labels) = get_mnist()

# load models
model_2bits_1 = get_QNN(2)
model_4bits_1 = get_QNN(4)
model_8bits_1 = get_QNN(8)
model_16bits_1 = get_QNN(16)
model_32bits_1 = get_QNN(32)
model_vanilla_nn_1 = get_vanilla_NN()

model_2bits_2 = get_QNN(2)
model_4bits_2 = get_QNN(4)
model_8bits_2 = get_QNN(8)
model_16bits_2 = get_QNN(16)
model_32bits_2 = get_QNN(32)
model_vanilla_nn_2 = get_vanilla_NN()

# train models
print("Training models...")
Example #3
0
FGSM_PARAMS = {
    'eps': 0.05,
    'clip_min': 0.,
    'clip_max': 1.,
}

# initialize keras/tf session
sess = tf.Session(graph=tf.get_default_graph())
K.set_session(sess)

# get dataset
(train_images, train_labels), (test_images,
                               test_labels) = get_scaled_fashion_mnist()

# load models
model_2bits = get_QNN(2)

# train models
model_2bits.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)

_, test_acc = model_2bits.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of NN with 2 bits: " + str(test_acc))

# save weights
print("Saving weights")
model_2bits.save_weights("2bits-weights.h5")

# load not quantized model and quantized weights
print(
    "Loading the weights in same architecture NN but no quantization involved")
model = get_vanilla_NN()