Exemple #1
0
def main():

    batch_img, fns = list_load("./../data_imagenet", images)

    for sal in sal_type: # for each gradient type
        for init in model: # random or trained

            print(sal)
            print(init)

            tf.reset_default_graph() # erase the graph
            sess = tf.Session() # start a new session
            vgg16 = prepare_keras_vgg16(sal, init, sess)

            graph = tf.get_default_graph()

            input = graph.get_tensor_by_name('input_1:0')
            logits = graph.get_tensor_by_name('predictions/BiasAdd:0')

            num_to_viz = 50

            # shape = (num_to_viz, num_input_images, 224, 224, 3)
            # TF Graph
            saliencies = super_saliency(logits, input, num_to_viz)

            # shape = (num_input_images, num_to_viz, 224, 224, 3)
            saliencies_val = sess.run(saliencies, feed_dict={input: batch_img})
            saliencies_val_trans = np.transpose(saliencies_val, (1, 0, 2, 3, 4))

            for idx, name in enumerate(fns):
                save_dir = "vgg_keras/{}/{}/{}/".format(name, init, sal)
                for index, image in enumerate(saliencies_val_trans[idx]):
                    simple_plot(image, name + str(index), save_dir)

            sess.close()
Exemple #2
0
def main():

    for sal in sal_type:

        tf.reset_default_graph()
        sess = tf.Session()
        vgg = prepare_vgg(sal, None, 'trained', sess)

        batch_img, fns = list_load("./../data_imagenet", images)

        # TF Graph
        saliency = tf.gradients(vgg.maxlogit, vgg.imgs)[0]

        saliency_vals, prob_vals = sess.run([saliency, vgg.probs],
                                            feed_dict={vgg.images: batch_img})

        for idx, sal in enumerate(saliency_vals):

            # normalize
            min = np.min(sal)
            sal -= min
            max = np.max(sal)
            sal /= max
            sal *= 225

            print(class_names[np.argmax(prob_vals[idx])])

        prob_vals2 = sess.run(vgg.probs, feed_dict={vgg.images: saliency_vals})

        for prob in prob_vals2:

            print(class_names[np.argmax(prob)])

        sess.close()
Exemple #3
0
def main():

    # the length of the logits vector
    # the only requirement is a positive integer
    # can be randomized
    output_dim = 100

    # reset graph & start session
    tf.reset_default_graph()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # prepare a randomly initialized shallow CNN with the gradient has been overwritten to "GBP"
    # in terms of the GBP reconstruction, this model can be any as long as it's a ConvNet.
    model = prepare_GBP_shallow_CNN(sess=sess, output_dim=output_dim)

    # tf operation for GBP reconstruction
    tfOp_gbp_reconstruction = GBP_Reconstruction(model, output_dim)

    # [num_examples, 32, 32, 3]
    data_dir = './Plots/SVHN'
    names = [
        "TEST_1_DeepFool.png",
        "TEST_1_FGSM.png",
        "TEST_1_IterGS.png",
        "TEST_1_IterG.png",
        "TEST_0_DeepFool.png",
        "TEST_0_FGSM.png",
        "TEST_0_IterGS.png",
        "TEST_0_IterG.png",
        "TEST_2_DeepFool.png",
        "TEST_2_FGSM.png",
        "TEST_2_IterGS.png",
        "TEST_2_IterG.png",
    ]
    X_test_ori, _ = list_load(data_dir, names, size=(32, 32))

    # map each training example to its corresponding GBP reconstruction
    # X_train_gbp = Map(tfOp_gbp_reconstruction, model.layers_dic['images'], X_train_ori, sess)
    X_test_gbp = Map(tfOp_gbp_reconstruction, model.layers_dic['images'],
                     X_test_ori, sess)

    # save to pickle
    f = open('./{}.pkl'.format('SVHN-Plot'), 'wb')
    # pkl.dump((X_train_gbp, y_train), f, -1)
    pkl.dump(X_test_gbp, f, -1)
    f.close()

    # # visualization
    # grid_plot([3, 6], X_train_ori[:18], 'Original_CIFAR10', './Visualization', 'Examples_Ori_CIFAR10')
    # grid_plot([3, 6], X_train_gbp[:18], 'GBP_CIFAR10', './Visualization', 'Examples_GBP_CIFAR10')

    for index, image in enumerate(X_test_gbp):
        simple_plot(image, names[index] + 'GBP', './Plots/SVHN/')
def main():

    for sal in sal_type:
        for init in model_type:
            tf.reset_default_graph()
            sess = tf.Session()
            vgg = prepare_vgg(sal, None, init, sess)

            batch_img, fns = list_load("./../data_imagenet", images)
            job(vgg, sal, sess, init, batch_img, fns)

            sess.close()
Exemple #5
0
def main():

    for sal in sal_type:
        for init in model_type:
            tf.reset_default_graph()
            sess = tf.Session()
            vgg = prepare_vgg(sal, None, init, sess)

            batch_img, fns = list_load("./../data_imagenet", images)
            for idx, image in enumerate(batch_img):
                job(vgg, sal, sess, init, np.expand_dims(image, axis=0), [fns[idx]])

            sess.close()
Exemple #6
0
def main():

    for sal in sal_type:
        for n in N:
            for init in model_type:
                tf.reset_default_graph()
                sess = tf.Session()
                resnet = prepare_resnet(sal, init, sess, n)

                batch_img, fns = list_load("./../data_imagenet", images)
                job(resnet, sal, sess, init, batch_img, fns, n)

                sess.close()
Exemple #7
0
def main():

    for sal in sal_type:
        for init in model_type:
            tf.reset_default_graph()
            sess = tf.Session()
            vgg = prepare_vgg(sal, None, init, sess)

            batch_img, fns = list_load("./../data_imagenet", images)
            for idx, image in enumerate(batch_img):
                noise_image = image + np.random.normal(
                    loc=0.0, scale=10.0, size=[224, 224, 3])
                job(vgg, sal, sess, init, np.expand_dims(noise_image, axis=0),
                    [fns[idx] + '_noisy'])

            sess.close()
Exemple #8
0
def main():

    for sal in sal_type:

        for pool in [True, False]: # with and without pooling

            tf.reset_default_graph()
            sess = tf.Session()
            cnn3 = prepare_cnn3(sal, sess, pool=pool)

            batch_img, fns = list_load("./../data_imagenet", images)

            for idx, image in enumerate(batch_img):

                job(cnn3, sal, sess, pool, np.expand_dims(image, axis=0), [fns[idx]])

            sess.close()
Exemple #9
0
def main():

    for sal in sal_type:
        for idx, layer in enumerate(layers):

            tf.reset_default_graph()
            sess = tf.Session()
            vgg = prepare_vgg(sal, idx, 'only', sess)

            batch_img, fns = list_load("./../data_imagenet", images)

            # TF Graph
            saliency = tf.gradients(vgg.maxlogit, vgg.imgs)[0]
            saliency_vals = sess.run(saliency,
                                     feed_dict={vgg.images: batch_img})

            for index, name in enumerate(fns):
                save_dir = 'value_only/{}/{}'.format(name, layer)
                simple_plot(saliency_vals[index], name + '_only_' + layer,
                            save_dir)

            sess.close()
Exemple #10
0
def sparse_ratio(vgg, sess, layer_name, image_name, h_idx=None, v_idx=None):
    """
    Notice that the sparse ratio will be calculated w.r.t the entire batch!
    """

    # get the target layer tensor
    if h_idx == None and v_idx == None:
        target_tensor = vgg.layers_dic[layer_name]

    # get the target "depth row" from the layer tensor
    # corresponding to one image patch filtered by all the filters
    elif h_idx != None and v_idx != None:
        target_tensor = vgg.layers_dic[layer_name][:, h_idx, v_idx]

    else:
        raise Exception("Error in sparse_ratio !")

    batch_img, fns = list_load("./../data_imagenet", [image_name])

    target_tensor_val = sess.run(target_tensor,
                                 feed_dict={vgg.images: batch_img})
    target_tensor_val[target_tensor_val > 0] = 1.0
    return np.sum(target_tensor_val) / np.size(target_tensor_val)
Exemple #11
0
def main():

    num_iterations = 100
    step_size = 1e-1
    image_name = 'Dog_1.JPEG'

    # how we would like the "saliency map" be different
    diff_type = 'plain'  # 'centermass', 'plain'

    # define the special gradient for the "saliency map" calculation if necessary
    gradient_type = 'PlainSaliency'  # 'PlainSaliency', 'GuidedBackprop'

    # how we would like to visualize the result gradient
    viz_type = 'gradcam'  # 'abs', 'plain', 'gradcam'

    # for gradcam only
    target_layer = 'pool5'

    # load the image
    batch_img, fns = list_load("./../data_imagenet", [image_name])

    sess = tf.Session()

    # prepare the networks
    vgg_attack = prepare_vgg(gradient_type, 'softplus', 'maxpool', None,
                             'trained', sess)  # used for attack
    vgg = prepare_vgg(gradient_type, 'relu', 'maxpool', None, 'trained',
                      sess)  # used for probing

    print('Two Networks Prepared ... ')

    sal = sal_maxlogit(vgg_attack, viz_type, target_layer)
    D = sal_diff(diff_type, vgg_attack, batch_img, sal, sess)

    # gradient
    Dx = tf.gradients(D, vgg_attack.images)[0]

    # the signed gradient
    Dx_sign = tf.sign(Dx)

    # record the results for each iteration
    dict_step_to_image = {}
    dict_step_to_dissimilarity = {}
    dict_step_to_salmap = {}
    dict_step_to_prediction = {}
    dict_step_to_perturbation = {}

    for step in range(num_iterations):

        print('Step {}'.format(step))

        if step == 0:
            dict_step_to_image[0] = batch_img
            dict_step_to_dissimilarity[0] = 0
            dict_step_to_salmap[0] = sess.run(
                sal_maxlogit(vgg, viz_type, target_layer),
                feed_dict={vgg.images: batch_img})
            dict_step_to_prediction[0] = np.argmax(
                sess.run(vgg.probs, feed_dict={vgg.images: batch_img}))
            dict_step_to_perturbation[0] = np.zeros(batch_img.shape)
            continue

        Dx_sign_val, D_val = sess.run(
            [Dx_sign, D],
            feed_dict={vgg_attack.images: dict_step_to_image[step - 1]})

        sal_map_val, probs_val = sess.run(
            [sal_maxlogit(vgg, viz_type, target_layer), vgg.probs],
            feed_dict={vgg.images: dict_step_to_image[step - 1]})

        dict_step_to_image[step] = dict_step_to_image[
            step - 1] + step_size * Dx_sign_val
        dict_step_to_perturbation[step] = step_size * Dx_sign_val
        dict_step_to_salmap[step] = sal_map_val
        dict_step_to_dissimilarity[step] = D_val
        dict_step_to_prediction[step] = np.argmax(probs_val)

    evaluate(image_name, diff_type, viz_type, dict_step_to_image,
             dict_step_to_dissimilarity, dict_step_to_salmap,
             dict_step_to_prediction, dict_step_to_perturbation,
             num_iterations)

    sess.close()