def build_masking_graph(model_name, var_init=None): with tf.Graph().as_default() as graph: with tf.name_scope('input'): input = tf.placeholder(tf.float32, [1, img_size, img_size, 3], name='m_input') if var_init == None: mask_var = tf.Variable(tf.truncated_normal( (1, img_size, img_size, 1), 4, 0.1), name='mask_var') else: mask_var = tf.Variable(tf.constant(var_init, dtype=tf.float32, shape=(1, img_size, img_size, 1)), name='mask_var') noise_background = tf.random_uniform([1, img_size, img_size, 3], 0, 255) sig_mask_op = tf.sigmoid(mask_var, name='mask_sigmoid') masked_input = tf.multiply(input, sig_mask_op) + tf.multiply( noise_background, 1 - sig_mask_op) cfg = tf.ConfigProto() cfg.gpu_options.allow_growth = True sess = tf.InteractiveSession(graph=graph, config=cfg) load_model(model_name, masked_input) return sess, graph, mask_var, sig_mask_op, masked_input
def build_masking_graph(model_name, var_init=None): with tf.Graph().as_default() as graph: with tf.name_scope('input'): input = tf.placeholder(tf.float32, [1, img_size, img_size, 3], name='m_input') if var_init == None: mask_var = tf.Variable(tf.truncated_normal( (1, img_size, img_size, 1), 4, 0.1), name='mask_var') else: mask_var = tf.Variable(tf.constant(var_init, dtype=tf.float32, shape=(1, img_size, img_size, 1)), name='mask_var') # noise_r = tf.truncated_normal([1, img_size, img_size, 1], 122.46, 70.63) # noise_g = tf.truncated_normal([1, img_size, img_size, 1], 114.26, 68.61) # noise_b = tf.truncated_normal([1, img_size, img_size, 1], 101.37, 71.93) # noise_background = tf.clip_by_value(tf.concat([noise_r, noise_g, noise_b], axis=3), 0, 255) noise_background = tf.random_uniform([1, img_size, img_size, 3], 0, 255) sig_mask_op = tf.sigmoid(mask_var, name='mask_sigmoid') pool_octave = tf.placeholder(tf.int32, shape=[], name='pool_octave') # mask_pooled_ops = tf.stack([make_shrink_and_expand(2 ** s, sig_mask_op, img_size) for s in range(5, -1, -1)]) # sig_mask_op = mask_pooled_ops[pool_octave] masked_input = tf.multiply(input, sig_mask_op) + tf.multiply( noise_background, 1 - sig_mask_op) cfg = tf.ConfigProto() cfg.gpu_options.allow_growth = True sess = tf.InteractiveSession(graph=graph, config=cfg) load_model(model_name, masked_input) return sess, graph, mask_var, sig_mask_op, masked_input, noise_background
dir = 'test_data_3' # imgs, img_filenames = prepare_imgs('test_data_final_2_large', dir) imgs, img_filenames = prepare_imgs(dir, dir) # s=50 # imgs = imgs[2*s:3*s] # img_filenames = img_filenames[2*s:3*s] imgs = (2 * imgs / 255) - 1 tf.reset_default_graph() cfg = tf.ConfigProto() cfg.gpu_options.allow_growth = True sess = tf.Session(config=cfg) with DeepExplain(session=sess, graph=sess.graph) as de: load_model(model_name, tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'), True) labels, ids = top_labels_and_ids(sess, imgs) # print(labels, ids) input_n = input_node() output_n = presoftmax() target_n = tf.reduce_max(output_n, 1) attributions = {} BATCH_SIZE = 10 for i in range(0, len(imgs), BATCH_SIZE): print('iter %s' % i) batch = imgs[i:i + BATCH_SIZE] batch_filenames = img_filenames[i:i + BATCH_SIZE]
for method_name, curve in curve_records.items(): aopc = np.mean(curve) print('%s: %s' % (method_name, aopc)) root_dir = 'result/methods_final_2_large' input_img_dir = 'test_data_final_2_large' input_imgs, _ = prepare_imgs(input_img_dir, 'lerf') cfg = tf.ConfigProto() cfg.gpu_options.allow_growth = True sess = tf.Session(config=cfg) input_ph = tf.placeholder(tf.float32, [None, 299, 299, 3], name='m_input') load_model('inception_v3', input_ph) _, ids = top_labels_and_ids(sess, input_imgs[:100]) _, ids2 = top_labels_and_ids(sess, input_imgs[100:]) ids = np.append(ids, ids2) average_score_drop_of_methods = {} for sub_dir in glob.glob(root_dir + '/*'): method_name = os.path.basename(sub_dir) if method_name != 'occlusion': continue maps = load_saliency_maps(sub_dir) record_morfs(method_name, input_imgs, ids, maps, average_score_drop_of_methods)