def __call__(self, x_input): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None x_input = image_normalize(x_input, normalization_method[3]) with slim.arg_scope(inception.inception_v4_arg_scope()): _, end_points = inception.inception_v4( x_input, num_classes=self.num_classes, is_training=False, reuse=reuse) self.built = True output = end_points['Predictions'] # Strip off the extra reshape op at the output probs = output.op.inputs[0] return output
def __call__(self, x_input): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None x_input = image_normalize(x_input, normalization_method[10]) x_input = tf.image.resize_images(x_input, [224, 224]) with slim.arg_scope(vgg.vgg_arg_scope()): _, end_points = vgg.vgg_19(x_input, num_classes=1000, is_training=False) end_points['predictions'] = tf.nn.softmax(end_points['vgg_19/fc8']) end_points['predictions'] = \ tf.concat([tf.zeros([tf.shape(x_input)[0], 1]), tf.reshape(end_points['predictions'], [-1, 1000])], axis=1) self.built = True output = end_points['predictions'] return output
def __call__(self, x_input): """Constructs model and return probabilities for given input.""" reuse = True if self.built else None x_input = image_normalize(x_input, normalization_method[6]) x_input = tf.image.resize_images(x_input, [224, 224]) with slim.arg_scope(resnet_v1.resnet_arg_scope()): _, end_points = resnet_v1.resnet_v1_152( x_input, num_classes=self.num_classes - 1, is_training=False, reuse=reuse) self.built = True end_points['predictions'] = \ tf.concat([tf.zeros([tf.shape(x_input)[0], 1]), tf.reshape(end_points['predictions'], [-1, 1000])], axis=1) output = end_points['predictions'] # Strip off the extra reshape op at the output return output
def main(_): batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3] num_classes = 1001 ensemble_type = FLAGS.ensemble_type tf.logging.set_verbosity(tf.logging.INFO) checkpoint_path_list = [ FLAGS.checkpoint_path_inception_v1, FLAGS.checkpoint_path_inception_v2, FLAGS.checkpoint_path_inception_v3, FLAGS.checkpoint_path_inception_v4, FLAGS.checkpoint_path_inception_resnet_v2, FLAGS.checkpoint_path_resnet_v1_101, FLAGS.checkpoint_path_resnet_v1_152, FLAGS.checkpoint_path_resnet_v2_101, FLAGS.checkpoint_path_resnet_v2_152, FLAGS.checkpoint_path_vgg_16, FLAGS.checkpoint_path_vgg_19 ] normalization_method = [ 'default', 'default', 'default', 'default', 'global', 'caffe_rgb', 'caffe_rgb', 'default', 'default', 'caffe_rgb', 'caffe_rgb' ] pred_list = [] for idx, checkpoint_path in enumerate(checkpoint_path_list, 1): with tf.Graph().as_default(): if int(FLAGS.test_idx) == 20 and idx in [3]: continue if int(FLAGS.test_idx) in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ] and int(FLAGS.test_idx) != idx: continue # Prepare graph if idx in [1, 2, 6, 7, 10, 11]: _x_input = tf.placeholder(tf.float32, shape=batch_shape) x_input = tf.image.resize_images(_x_input, [224, 224]) else: _x_input = tf.placeholder(tf.float32, shape=batch_shape) x_input = _x_input x_input = image_normalize(x_input, normalization_method[idx - 1]) if idx == 1: with slim.arg_scope(inception.inception_v1_arg_scope()): _, end_points = inception.inception_v1( x_input, num_classes=num_classes, is_training=False) elif idx == 2: with slim.arg_scope(inception.inception_v2_arg_scope()): _, end_points = inception.inception_v2( x_input, num_classes=num_classes, is_training=False) elif idx == 3: with slim.arg_scope(inception.inception_v3_arg_scope()): _, end_points = inception.inception_v3( x_input, num_classes=num_classes, is_training=False) elif idx == 4: with slim.arg_scope(inception.inception_v4_arg_scope()): _, end_points = inception.inception_v4( x_input, num_classes=num_classes, is_training=False) elif idx == 5: with slim.arg_scope(inception.inception_resnet_v2_arg_scope()): _, end_points = inception.inception_resnet_v2( x_input, num_classes=num_classes, is_training=False) elif idx == 6: with slim.arg_scope(resnet_v1.resnet_arg_scope()): _, end_points = resnet_v1.resnet_v1_101(x_input, num_classes=1000, is_training=False) elif idx == 7: with slim.arg_scope(resnet_v1.resnet_arg_scope()): _, end_points = resnet_v1.resnet_v1_152(x_input, num_classes=1000, is_training=False) elif idx == 8: with slim.arg_scope(resnet_v2.resnet_arg_scope()): _, end_points = resnet_v2.resnet_v2_101( x_input, num_classes=num_classes, is_training=False) elif idx == 9: with slim.arg_scope(resnet_v2.resnet_arg_scope()): _, end_points = resnet_v2.resnet_v2_152( x_input, num_classes=num_classes, is_training=False) elif idx == 10: with slim.arg_scope(vgg.vgg_arg_scope()): _, end_points = vgg.vgg_16(x_input, num_classes=1000, is_training=False) end_points['predictions'] = tf.nn.softmax( end_points['vgg_16/fc8']) elif idx == 11: with slim.arg_scope(vgg.vgg_arg_scope()): _, end_points = vgg.vgg_19(x_input, num_classes=1000, is_training=False) end_points['predictions'] = tf.nn.softmax( end_points['vgg_19/fc8']) #end_points = tf.reduce_mean([end_points1['Predictions'], end_points2['Predictions'], end_points3['Predictions'], end_points4['Predictions']], axis=0) #predicted_labels = tf.argmax(end_points, 1) # Run computation saver = tf.train.Saver(slim.get_model_variables()) session_creator = tf.train.ChiefSessionCreator( scaffold=tf.train.Scaffold(saver=saver), checkpoint_filename_with_path=checkpoint_path, master=FLAGS.master) pred_in = [] filenames_list = [] with tf.train.MonitoredSession( session_creator=session_creator) as sess: for filenames, images in load_images(FLAGS.input_dir, batch_shape): #if idx in [1,2,6,7,10,11]: # # 16x299x299x3 # images = zoom(images, (1, 0.7491638795986622, 0.7491638795986622, 1), order=2) filenames_list.extend(filenames) end_points_dict = sess.run(end_points, feed_dict={_x_input: images}) if idx in [6, 7, 10, 11]: end_points_dict['predictions'] = \ np.concatenate([np.zeros([FLAGS.batch_size, 1]), np.array(end_points_dict['predictions'].reshape(-1, 1000))], axis=1) try: pred_in.extend(end_points_dict['Predictions'].reshape( -1, num_classes)) except KeyError: pred_in.extend(end_points_dict['predictions'].reshape( -1, num_classes)) pred_list.append(pred_in) if ensemble_type == 'mean': pred = np.mean(pred_list, axis=0) labels = np.argmax( pred, axis=1 ) # model_num X batch X class_num ==(np.mean)==> batch X class_num ==(np.argmax)==> batch elif ensemble_type == 'vote': pred = np.argmax( pred_list, axis=2 ) # model_num X batch X class_num ==(np.mean)==> batch X class_num ==(np.argmax)==> batch labels = np.median(pred, axis=0) with tf.gfile.Open(FLAGS.output_file, 'w') as out_file: for filename, label in zip(filenames_list, labels): out_file.write('{0},{1}\n'.format(filename, label))
def main(_): # Images for inception classifier are normalized to be in [-1, 1] interval, # eps is a difference between pixels so it should be in [0, 2] interval. # Renormalizing epsilon from [0, 255] to [0, 2]. batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3] num_classes = 1001 total_labels = load_total_labels('images.csv') checkpoint_path_list = [FLAGS.checkpoint_path_inception_v1, FLAGS.checkpoint_path_inception_v2, FLAGS.checkpoint_path_inception_v3, FLAGS.checkpoint_path_inception_v4, FLAGS.checkpoint_path_inception_resnet_v2, FLAGS.checkpoint_path_resnet_v1_101, FLAGS.checkpoint_path_resnet_v1_152, FLAGS.checkpoint_path_resnet_v2_101, FLAGS.checkpoint_path_resnet_v2_152, FLAGS.checkpoint_path_vgg_16, FLAGS.checkpoint_path_vgg_19] tf.logging.set_verbosity(tf.logging.INFO) graph_list = [] sess_list = [] x_input_list = [] y_list = [] prob_list = [] loss_list = [] x_adv_list = [] type_list = [] for i in range(len(checkpoint_path_list)): graph = tf.Graph() with graph.as_default(): x_input_list.append(tf.placeholder(tf.float32, shape=batch_shape)) type_list.append(tf.placeholder(tf.string, shape=[None])) y_list.append(tf.placeholder(tf.float32, shape=[FLAGS.batch_size, num_classes])) shift = 0 if i == 0: model = InceptionV1(num_classes) scale = 2.0 * FLAGS.max_epsilon / 255.0 if i == 1: model = InceptionV2(num_classes) scale = 2.0 * FLAGS.max_epsilon / 255.0 if i == 2: model = InceptionV3(num_classes) scale = 2.0 * FLAGS.max_epsilon / 255.0 if i == 3: model = InceptionV4(num_classes) scale = 2.0 * FLAGS.max_epsilon / 255.0 if i == 4: model = InceptionResnetV2(num_classes) scale = 4.3 * FLAGS.max_epsilon / 255.0 if i == 5: model = ResnetV1_101(num_classes) scale = FLAGS.max_epsilon if i == 6: model = ResnetV1_152(num_classes) scale = FLAGS.max_epsilon if i == 7: model = ResnetV2_101(num_classes) scale = 2.0 * FLAGS.max_epsilon / 255.0 if i == 8: model = ResnetV2_152(num_classes) scale = 2.0 * FLAGS.max_epsilon / 255.0 if i == 9: model = Vgg_16(num_classes) scale = FLAGS.max_epsilon if i == 10: model = Vgg_19(num_classes) scale = FLAGS.max_epsilon #prob_list.append(model(x_input_list[i])) input_image = image_normalize(x_input_list[i], normalization_method[i]) fgsm = FastGradientMethod(model) x_adv = fgsm.generate(input_image, y=y_list[i], scale=scale, shift=shift, clip_min=None, clip_max=None) x_adv = image_invert(x_adv, normalization_method[i]) x_adv_list.append(x_adv) #loss_list.append(tf.nn.softmax_cross_entropy_with_logits(labels=y_list[i], logits=prob_list[i])) graph_list.append(graph) for i in range(len(checkpoint_path_list)): config = tf.ConfigProto() config.gpu_options.allow_growth = True graph = graph_list[i] sess_list.append(tf.Session(graph=graph, config=config)) for i in range(len(checkpoint_path_list)): graph = graph_list[i] sess = sess_list[i] with sess.as_default(): with graph.as_default(): model_saver = tf.train.Saver(tf.global_variables()) model_saver.restore(sess, checkpoint_path_list[i]) for filenames, images in load_images(FLAGS.input_dir, batch_shape): print("make adversarial images [%s]"%filenames[0]) x_fgsm_list = [] for i in xrange(len(checkpoint_path_list)): graph = graph_list[i] sess = sess_list[i] with sess.as_default(): y_labels = load_labels(filenames, total_labels, FLAGS.batch_size) x_fgsm = sess.run(x_adv_list[i], feed_dict={x_input_list[i]: images, y_list[i]: y_labels}) x_fgsm_list.append(x_fgsm) #save_images(x_fgsm, filenames, FLAGS.output_dir, i) x_fgsm_ens = np.mean(x_fgsm_list, axis=0) save_images(x_fgsm_ens, filenames, FLAGS.output_dir)