# Make the predictions_and_gradients function inception_predictions_and_gradients = make_predictions_and_gradients(sess, graph) def load_image(img_path): # "/Users/pin-jutien/Integrated-Gradients/Images/70bfca4555cca92e.jpg" image = tf.image.decode_jpeg(tf.read_file(img_path), channels=3) img = sess.run(image) return img img = load_image("/Users/pin-jutien/Integrated-Gradients/Images/70bfca4555cca92e.jpg") # Determine top label and score. top_label_id, score = top_label_id_and_score(img, inception_predictions_and_gradients) print("Top label: %s, score: %f" % (labels[top_label_id], score)) # Compute attributions based on just the gradients. _, gradients = inception_predictions_and_gradients([img], top_label_id) attributions = random_baseline_integrated_gradients( img, top_label_id, inception_predictions_and_gradients, steps=50, num_random_trials=10) Visualize( attributions, img, clip_above_percentile=95,
softMaxLabelStore = np.zeros(1000) softMaxConfStore = np.zeros(1000) actualLabelStore = np.zeros(1000) print(sameFlag.shape[0]) for i in range(5): labels, images, path = next(image_iterator) _, attrs, path = next(attr_iterator) for j in range(0, batch_size): count = i * batch_size + j topLabel, px = top_label_id_and_layer( images[j], inception_predictions_and_gradients) topLabel, softMaxScore = top_label_id_and_score( images[j], inception_predictions_and_gradients) softMaxLabelStore[count] = topLabel softMaxConfStore[count] = softMaxScore actualLabelStore[count] = labels[j] attributions = attrs[j] img = (images[j] + 1.0) / 2.0 * 255.0 #img = np.uint8((images[j]+1.0)/2.0*255.0) #targeted attack is successfull only when the new label is equal to the target label if topLabel == labels[j]: counterSame += 1 sameFlag[count] = 1 elif topLabel == targetLabel: counterPert += 1
counterSame = 0 counterPert = 0 counterSamePos = np.zeros(len(percent)); counterPertPos = np.zeros(len(percent)); counterSameNeg = np.zeros(len(percent)); counterPertNeg = np.zeros(len(percent)); scoreStore = np.zeros((3,1000,len(percent),2)) for i in range(20): labels, images , path= next(image_iterator) _, attrs , path= next(attr_iterator) for j in range(0,batch_size): topLabel, score = top_label_id_and_score(images[j], inception_predictions_and_gradients) attributions = attrs[j] img = (images[j]+1.0)/2.0*255.0 #img = np.uint8((images[j]+1.0)/2.0*255.0) if topLabel == labels[j]: counterSame += 1 else: counterPert += 1 count = i*batch_size+j; for k in range(len(percent)): #mask = np.array(Visualize( #attributions, img,