def masking_graph_cost(sig_mask_op):
    with tf.name_scope('loss'):
        mask_mean = tf.reduce_mean(sig_mask_op, name='mask_loss')

        feat_map = presoftmax()
        truth_feat_map = tf.placeholder(dtype=tf.float32,
                                        name='target_feat_map')
        weight = tf.placeholder(dtype=tf.float32, name='loss_weight')

        alpha = tf.placeholder(dtype=tf.float32, name='alpha')
        beta = tf.placeholder(dtype=tf.float32, name='beta')

        feat_map_diff = (truth_feat_map - feat_map)
        feat_map_loss = tf.reduce_mean(tf.square(feat_map_diff) * weight,
                                       name='feat_loss')

        # target_softmax = output_label_tensor(label)
        feat_map_loss = alpha * feat_map_loss
        # total_loss = tf.add(feat_map_loss, total_var(sig_mask_op)*0.5 + beta * mask_mean, name='total_loss')
        total_loss = tf.add(feat_map_loss, beta * mask_mean, name='total_loss')
    return total_loss, feat_map, (feat_map_loss, mask_mean)
def masking_graph_cost(sig_mask_op):
    with tf.name_scope('loss'):
        mask_mean = tf.reduce_mean(sig_mask_op, name='mask_loss')

        # feat_map = mid_volume()
        # feat_map = last_volume()
        feat_map = presoftmax()
        truth_feat_map = tf.placeholder(dtype=tf.float32,
                                        name='target_feat_map')

        alpha = tf.placeholder(dtype=tf.float32, name='alpha')
        beta = tf.placeholder(dtype=tf.float32, name='beta')

        feat_map_diff = (truth_feat_map - feat_map)

        feat_map_loss = tf.reduce_mean(tf.square(feat_map_diff),
                                       name='feat_loss')

        feat_map_loss = alpha * feat_map_loss

        total_loss = tf.add(feat_map_loss, beta * mask_mean, name='total_loss')
    return total_loss, feat_map, (feat_map_loss, mask_mean)
imgs = (2 * imgs / 255) - 1
tf.reset_default_graph()
cfg = tf.ConfigProto()
cfg.gpu_options.allow_growth = True
sess = tf.Session(config=cfg)

with DeepExplain(session=sess, graph=sess.graph) as de:
    load_model(model_name,
               tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'), True)

    labels, ids = top_labels_and_ids(sess, imgs)

    # print(labels, ids)

    input_n = input_node()
    output_n = presoftmax()
    target_n = tf.reduce_max(output_n, 1)

    attributions = {}
    BATCH_SIZE = 10
    for i in range(0, len(imgs), BATCH_SIZE):
        print('iter %s' % i)
        batch = imgs[i:i + BATCH_SIZE]
        batch_filenames = img_filenames[i:i + BATCH_SIZE]
        # attributions['saliency'] = de.explain('saliency', target_n, input_n, batch)
        # attributions['intgrad'] = de.explain('intgrad', target_n, input_n, batch)
        # attributions['elrp'] = de.explain('elrp', target_n, input_n, batch)
        # attributions['deeplift'] = de.explain('deeplift', target_n, input_n, batch)
        attributions['occlusion'] = de.explain('occlusion',
                                               target_n,
                                               input_n,
Exemple #4
0
def eval_img(sess, img, target_id):
    return run_network(sess, presoftmax(), [img])[0][target_id]