def main(argv=None): imgnames = filter( lambda x: x.lower().endswith(".jpg") or x.lower().endswith(".png"), os.listdir(FLAGS.srcimgs)) imgs = np.asarray(map( lambda x: preprocess_yadav(x), map( lambda x: cv2.resize(read_img(os.path.join(FLAGS.srcimgs, x)), (FLAGS.img_cols, FLAGS.img_rows)), imgnames)), dtype=np.float32) print 'Loaded images from %s' % FLAGS.srcimgs sys.stdout.flush() results = [] with tf.Session() as sess: model = YadavModel(train=False) saver = tf.train.Saver() saver.restore(sess, FLAGS.weights) print 'Loaded model from %s' % FLAGS.weights sys.stdout.flush() output = sess.run(model.labels_pred, feed_dict={ model.features: imgs, model.keep_prob: 1.0 }) for i in range(len(imgs)): results.append((imgnames[i], top3_as_string(output, i))) for i in range(len(results)): print results[i][0], results[i][1]
def main(argv=None): d = {} # added imgnames = filter(lambda x: x.lower().endswith(".jpg") or x.lower().endswith(".png"), os.listdir(FLAGS.srcimgs)) imgs = np.asarray(map(lambda x: preprocess_yadav(x), map(lambda x: cv2.resize(read_img(os.path.join(FLAGS.srcimgs, x)), (FLAGS.img_cols, FLAGS.img_rows)), imgnames)) , dtype=np.float32) imgs = imgs[:,:,:] # imgs[:5000,:,:] takes first 5000 and imgs[5000:,:,:] will do the rest print 'Loaded images from %s'%FLAGS.srcimgs sys.stdout.flush() results = [] with tf.Session() as sess: model = YadavModel(train=False) saver = tf.train.Saver() saver.restore(sess, FLAGS.weights) print 'Loaded model from %s'%FLAGS.weights sys.stdout.flush() print("anticipation of softmax production") #too many images causes below line to fail for some reason output = sess.run(model.labels_pred, feed_dict={model.features: imgs, model.keep_prob: 1.0}) print("completion of softmax production") for i in range(len(imgs)): d.update({imgnames[i]: list(output[i])}) # added results.append((imgnames[i], top3_as_string(output, i))) for i in range(len(results)): print results[i][0], results[i][1] df = pd.DataFrame(d).T # added df.to_csv(FLAGS.srcimgs+"/predictions.csv") # added
def main(argv=None): imgnames = filter(lambda x: x.lower().endswith(".jpg") or x.lower().endswith(".png"), os.listdir(FLAGS.srcimgs)) imgs = np.asarray(map(lambda x: preprocess_yadav(x), map(lambda x: cv2.resize(read_img(os.path.join(FLAGS.srcimgs, x)), (FLAGS.img_cols, FLAGS.img_rows)), imgnames)) , dtype=np.float32) print 'Loaded images from %s'%FLAGS.srcimgs sys.stdout.flush() results = [] with tf.Session() as sess: model = YadavModel(train=False) saver = tf.train.Saver() saver.restore(sess, FLAGS.weights) print 'Loaded model from %s'%FLAGS.weights sys.stdout.flush() output = sess.run(model.labels_pred, feed_dict={model.features: imgs, model.keep_prob: 1.0}) # index = np.argmax(output, axis=1)[0] index = 5 print 'index:', index heatmap = sess.run(model.heatmap, feed_dict={model.features: imgs, model.keep_prob: 1.0, model.index: index}) heatmap = np.array(heatmap) print 'heatmap.shape1', heatmap.shape heatmap = np.sum(np.maximum(heatmap[0], 0), axis=0) print 'heatmap.shape1', heatmap.shape pool_grad = np.mean(heatmap, axis=(0, 1)) print 'pool_grad:', pool_grad print 'pool_grad.shape', pool_grad.shape for i in range(32): # heatmap[:, :, i] *= np.mean(heatmap[:, :, i]) heatmap[:, :, i] *= pool_grad[i] heatmap = np.mean(heatmap, axis=-1) heatmap = np.maximum(heatmap, 0) heatmap /= np.max(heatmap) #heatmap visualation plt.matshow(heatmap) plt.savefig("heatmap.png") ori_img = cv2.imread("/home/yebin/work/gtsrb-cnn-attack/heat_map_test/6.png") ori_img = cv2.resize(ori_img, (256, 256)) heatmap = cv2.resize(heatmap, (256, 256)) heatmap = np.uint8(255 * heatmap) cv2.imwrite("gray.png", heatmap) # cv2.imwrite("heatmap256.png", heatmap) heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET) cv2.imwrite("heatmap256.png", heatmap) superimposed_img = heatmap * 0.4 + ori_img cv2.imwrite("heatmap_and_img.png", superimposed_img) for i in range(len(imgs)): results.append((imgnames[i], top3_as_string(output, i))) for i in range(len(results)): print results[i][0], results[i][1]
def main(argv=None): with tf.device(FLAGS.device): with tf.Session() as sess: print "Noise loaded from", FLAGS.model_path print "Mask", FLAGS.attack_mask print "Source image", FLAGS.src_image bimg = cv2.resize(read_img(FLAGS.src_image), (FLAGS.img_rows, FLAGS.img_cols)) / 255.0 - 0.5 noise= tf.Variable(tf.random_uniform( \ [FLAGS.img_rows, FLAGS.img_cols, FLAGS.nb_channels], -0.5, 0.5), \ name='noiseattack/noise', collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'adv_var']) saver = tf.train.Saver(var_list=[noise]) saver.restore(sess, FLAGS.model_path) noise_val = sess.run(noise) write_img('noise.png', (noise_val) * 255.0) mask = read_img(FLAGS.attack_mask) / 255.0 noise_val = noise_val * mask write_img(FLAGS.output_path, (bimg + noise_val + 0.5) * 255) print "Wrote image to", FLAGS.output_path
def main(argv=None): with tf.device(FLAGS.device): with tf.Session() as sess: print "Noise loaded from", FLAGS.model_path print "Mask", FLAGS.attack_mask print "Source image", FLAGS.big_image if FLAGS.resize_method == "area": resize_met = tf.image.ResizeMethod.AREA elif FLAGS.resize_method == "bicubic": resize_met = tf.image.ResizeMethod.BICUBIC elif FLAGS.resize_method == "bilinear": resize_met = tf.image.ResizeMethod.BILINEAR elif FLAGS.resize_method == "nearestneighbor": resize_met = tf.image.ResizeMethod.NEAREST_NEIGHBOR else: raise Exception( "resize method needs to be one of: area, bicubic, bilinear, nearestneighbor" ) bimg = cv2.resize( read_img(FLAGS.big_image), (FLAGS.resize_rows, FLAGS.resize_cols)) / 255.0 - 0.5 print 'bimg shape', bimg.shape noise= tf.Variable(tf.random_uniform( \ [FLAGS.img_rows, FLAGS.img_cols, FLAGS.nb_channels], -0.5, 0.5), \ name='noiseattack/noise', collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'adv_var']) mask = tf.placeholder(tf.float32, \ shape= \ (FLAGS.img_rows, \ FLAGS.img_cols, \ FLAGS.nb_channels), \ name="noiseattack/noise_mask") saver = tf.train.Saver(var_list=[noise]) saver.restore(sess, FLAGS.model_path) if not FLAGS.resize_noise_only: if FLAGS.downsize_first: noise_val = sess.run(tf.image.resize_images( \ tf.image.resize_images(noise * mask,(32,32)), \ size=(bimg.shape[0], bimg.shape[1]), \ method=resize_met), \ feed_dict={mask: read_img(FLAGS.attack_mask)/255.0} ) else: noise_val = sess.run(tf.image.resize_images( \ noise*mask, size=(bimg.shape[0], bimg.shape[1]), \ method=resize_met), \ feed_dict={mask: read_img(FLAGS.attack_mask)/255.0} ) else: noise_val = sess.run(tf.image.resize_images(noise, \ size=(bimg.shape[0], bimg.shape[1]), \ method=resize_met)) print 'noise shape', noise_val.shape mask = read_img(FLAGS.attack_mask) / 255.0 print 'mask shape', mask.shape noise_val = noise_val * mask write_img(FLAGS.output_path, (bimg + noise_val + 0.5) * 255) print "Wrote image to", FLAGS.output_path
def main(argv=None): with tf.device(FLAGS.device): print "Parameters" for k in sorted(FLAGS.__dict__["__flags"].keys()): print k, FLAGS.__dict__["__flags"][k] op, model_obj, sess, pholders, varops = setup_attack_graph() model = varops['adv_pred'] data = map( lambda z: preprocess_yadav(z), map( lambda y: read_img(os.path.join(FLAGS.attack_srcdir, y)), filter(lambda x: x.endswith(".png"), os.listdir(FLAGS.attack_srcdir)))) num_images = len(data) feed_dict = { pholders['image_in']: data, pholders['attack_target']: get_adv_target(nb_inputs=num_images), pholders['noise_mask']: read_img(FLAGS.attack_mask) / 255.0, model_obj.keep_prob: 1.0 } if FLAGS.printability_optimization: feed_dict[pholders['printable_colors']] = get_print_triplets() # used to save checkpoints after each epoch saver = tf.train.Saver(max_to_keep=5) clean_model_loss = model_loss(pholders['attack_target'], varops['adv_pred'], mean=True) latest_misrate = FLAGS.min_rate_to_save latest_loss = 10000 for i in xrange(FLAGS.attack_epochs): print 'Epoch %d' % i, sys.stdout.flush() _, train_loss, mod_loss, noisy_in, noisy_classes = sess.run( \ (op, \ varops['adv_loss'], \ varops['loss'], \ varops['noisy_inputs'], \ varops['adv_pred']) \ , feed_dict=feed_dict) if FLAGS.regloss != "none": reg_loss = sess.run(varops['reg_loss'], feed_dict=feed_dict) else: reg_loss = 0 clean_loss, clean_classes = sess.run( (clean_model_loss, model), feed_dict={ pholders['image_in']: data, pholders['attack_target']: get_adv_target(nb_inputs=num_images), pholders['noise_mask']: np.zeros([ FLAGS.input_rows, FLAGS.input_cols, FLAGS.nb_channels ]), model_obj.keep_prob: 1.0 }) print "adversarial loss %.5f reg loss %.5f model loss %.5f model loss on clean img: %.5f" % ( train_loss, reg_loss, mod_loss, clean_loss), sys.stdout.flush() if FLAGS.printability_optimization: print "noise NPS %.5f" % sess.run(varops['printer_error'], feed_dict=feed_dict), num_misclassified = 0 for j in xrange(num_images): clean_classification = np.argmax(clean_classes[j]) noise_classification = np.argmax(noisy_classes[j]) if clean_classification != noise_classification and noise_classification == FLAGS.target_class: num_misclassified += 1 proportion_misclassified = float(num_misclassified) / float( num_images) print 'percent misclassified images %.1f' % ( proportion_misclassified * 100.0) if proportion_misclassified > latest_misrate or \ (proportion_misclassified == latest_misrate and train_loss < latest_loss) \ or ("octagon" in FLAGS.attack_mask and train_loss < latest_loss): latest_misrate = proportion_misclassified latest_loss = train_loss saver.save(sess, os.path.join('optimization_output', FLAGS.checkpoint, 'model', FLAGS.checkpoint), global_step=i) if FLAGS.save_all_noisy_images: write_img( os.path.join( 'optimization_output', FLAGS.checkpoint, "noisy_images", "noisyimg_%s_epoch_%d.png" % (FLAGS.checkpoint, i)), ((noisy_in[0] + 0.5) * 255).astype(int))