예제 #1
0
def main():

    for sal in sal_type:

        tf.reset_default_graph()
        sess = tf.Session()
        vgg = prepare_vgg(sal, None, 'trained', sess)

        batch_img, fns = list_load("./../data_imagenet", images)

        # TF Graph
        saliency = tf.gradients(vgg.maxlogit, vgg.imgs)[0]

        saliency_vals, prob_vals = sess.run([saliency, vgg.probs],
                                            feed_dict={vgg.images: batch_img})

        for idx, sal in enumerate(saliency_vals):

            # normalize
            min = np.min(sal)
            sal -= min
            max = np.max(sal)
            sal /= max
            sal *= 225

            print(class_names[np.argmax(prob_vals[idx])])

        prob_vals2 = sess.run(vgg.probs, feed_dict={vgg.images: saliency_vals})

        for prob in prob_vals2:

            print(class_names[np.argmax(prob)])

        sess.close()
예제 #2
0
def main():

    for init in model_type:
        tf.reset_default_graph()
        sess = tf.Session()
        vgg = prepare_vgg('PlainSaliency', None, init, sess)
        for layer in layers:
            result = sparse_ratio(vgg, sess, layer, 'tabby.png')
            print('The sparse ratio of layer {} with {} weights is {}'.format(
                layer, init, result))
        sess.close()
def main():

    for sal in sal_type:
        for init in model_type:
            tf.reset_default_graph()
            sess = tf.Session()
            vgg = prepare_vgg(sal, None, init, sess)

            batch_img, fns = list_load("./../data_imagenet", images)
            job(vgg, sal, sess, init, batch_img, fns)

            sess.close()
예제 #4
0
def main():

    for sal in sal_type:
        for init in model_type:
            tf.reset_default_graph()
            sess = tf.Session()
            vgg = prepare_vgg(sal, None, init, sess)

            batch_img, fns = list_load("./../data_imagenet", images)
            for idx, image in enumerate(batch_img):
                job(vgg, sal, sess, init, np.expand_dims(image, axis=0), [fns[idx]])

            sess.close()
예제 #5
0
def main():

    for sal in sal_type:
        for init in model_type:
            tf.reset_default_graph()
            sess = tf.Session()
            vgg = prepare_vgg(sal, None, init, sess)

            batch_img, fns = list_load("./../data_imagenet", images)
            for idx, image in enumerate(batch_img):
                noise_image = image + np.random.normal(
                    loc=0.0, scale=10.0, size=[224, 224, 3])
                job(vgg, sal, sess, init, np.expand_dims(noise_image, axis=0),
                    [fns[idx] + '_noisy'])

            sess.close()
예제 #6
0
def main():

    for sal in sal_type:
        for idx, layer in enumerate(layers):

            tf.reset_default_graph()
            sess = tf.Session()
            vgg = prepare_vgg(sal, idx, 'only', sess)

            batch_img, fns = list_load("./../data_imagenet", images)

            # TF Graph
            saliency = tf.gradients(vgg.maxlogit, vgg.imgs)[0]
            saliency_vals = sess.run(saliency,
                                     feed_dict={vgg.images: batch_img})

            for index, name in enumerate(fns):
                save_dir = 'value_only/{}/{}'.format(name, layer)
                simple_plot(saliency_vals[index], name + '_only_' + layer,
                            save_dir)

            sess.close()
예제 #7
0
def main():

    num_iterations = 100
    step_size = 1e-1
    image_name = 'Dog_1.JPEG'

    # how we would like the "saliency map" be different
    diff_type = 'plain'  # 'centermass', 'plain'

    # define the special gradient for the "saliency map" calculation if necessary
    gradient_type = 'PlainSaliency'  # 'PlainSaliency', 'GuidedBackprop'

    # how we would like to visualize the result gradient
    viz_type = 'gradcam'  # 'abs', 'plain', 'gradcam'

    # for gradcam only
    target_layer = 'pool5'

    # load the image
    batch_img, fns = list_load("./../data_imagenet", [image_name])

    sess = tf.Session()

    # prepare the networks
    vgg_attack = prepare_vgg(gradient_type, 'softplus', 'maxpool', None,
                             'trained', sess)  # used for attack
    vgg = prepare_vgg(gradient_type, 'relu', 'maxpool', None, 'trained',
                      sess)  # used for probing

    print('Two Networks Prepared ... ')

    sal = sal_maxlogit(vgg_attack, viz_type, target_layer)
    D = sal_diff(diff_type, vgg_attack, batch_img, sal, sess)

    # gradient
    Dx = tf.gradients(D, vgg_attack.images)[0]

    # the signed gradient
    Dx_sign = tf.sign(Dx)

    # record the results for each iteration
    dict_step_to_image = {}
    dict_step_to_dissimilarity = {}
    dict_step_to_salmap = {}
    dict_step_to_prediction = {}
    dict_step_to_perturbation = {}

    for step in range(num_iterations):

        print('Step {}'.format(step))

        if step == 0:
            dict_step_to_image[0] = batch_img
            dict_step_to_dissimilarity[0] = 0
            dict_step_to_salmap[0] = sess.run(
                sal_maxlogit(vgg, viz_type, target_layer),
                feed_dict={vgg.images: batch_img})
            dict_step_to_prediction[0] = np.argmax(
                sess.run(vgg.probs, feed_dict={vgg.images: batch_img}))
            dict_step_to_perturbation[0] = np.zeros(batch_img.shape)
            continue

        Dx_sign_val, D_val = sess.run(
            [Dx_sign, D],
            feed_dict={vgg_attack.images: dict_step_to_image[step - 1]})

        sal_map_val, probs_val = sess.run(
            [sal_maxlogit(vgg, viz_type, target_layer), vgg.probs],
            feed_dict={vgg.images: dict_step_to_image[step - 1]})

        dict_step_to_image[step] = dict_step_to_image[
            step - 1] + step_size * Dx_sign_val
        dict_step_to_perturbation[step] = step_size * Dx_sign_val
        dict_step_to_salmap[step] = sal_map_val
        dict_step_to_dissimilarity[step] = D_val
        dict_step_to_prediction[step] = np.argmax(probs_val)

    evaluate(image_name, diff_type, viz_type, dict_step_to_image,
             dict_step_to_dissimilarity, dict_step_to_salmap,
             dict_step_to_prediction, dict_step_to_perturbation,
             num_iterations)

    sess.close()