コード例 #1
0
ファイル: keras_vgg.py プロジェクト: coolclear/Research
def main():

    batch_img, fns = list_load("./../data_imagenet", images)

    for sal in sal_type: # for each gradient type
        for init in model: # random or trained

            print(sal)
            print(init)

            tf.reset_default_graph() # erase the graph
            sess = tf.Session() # start a new session
            vgg16 = prepare_keras_vgg16(sal, init, sess)

            graph = tf.get_default_graph()

            input = graph.get_tensor_by_name('input_1:0')
            logits = graph.get_tensor_by_name('predictions/BiasAdd:0')

            num_to_viz = 50

            # shape = (num_to_viz, num_input_images, 224, 224, 3)
            # TF Graph
            saliencies = super_saliency(logits, input, num_to_viz)

            # shape = (num_input_images, num_to_viz, 224, 224, 3)
            saliencies_val = sess.run(saliencies, feed_dict={input: batch_img})
            saliencies_val_trans = np.transpose(saliencies_val, (1, 0, 2, 3, 4))

            for idx, name in enumerate(fns):
                save_dir = "vgg_keras/{}/{}/{}/".format(name, init, sal)
                for index, image in enumerate(saliencies_val_trans[idx]):
                    simple_plot(image, name + str(index), save_dir)

            sess.close()
コード例 #2
0
ファイル: prepare_data.py プロジェクト: coolclear/Research
def main():

    # the length of the logits vector
    # the only requirement is a positive integer
    # can be randomized
    output_dim = 100

    # reset graph & start session
    tf.reset_default_graph()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # prepare a randomly initialized shallow CNN with the gradient has been overwritten to "GBP"
    # in terms of the GBP reconstruction, this model can be any as long as it's a ConvNet.
    model = prepare_GBP_shallow_CNN(sess=sess, output_dim=output_dim)

    # tf operation for GBP reconstruction
    tfOp_gbp_reconstruction = GBP_Reconstruction(model, output_dim)

    # [num_examples, 32, 32, 3]
    data_dir = './Plots/SVHN'
    names = [
        "TEST_1_DeepFool.png",
        "TEST_1_FGSM.png",
        "TEST_1_IterGS.png",
        "TEST_1_IterG.png",
        "TEST_0_DeepFool.png",
        "TEST_0_FGSM.png",
        "TEST_0_IterGS.png",
        "TEST_0_IterG.png",
        "TEST_2_DeepFool.png",
        "TEST_2_FGSM.png",
        "TEST_2_IterGS.png",
        "TEST_2_IterG.png",
    ]
    X_test_ori, _ = list_load(data_dir, names, size=(32, 32))

    # map each training example to its corresponding GBP reconstruction
    # X_train_gbp = Map(tfOp_gbp_reconstruction, model.layers_dic['images'], X_train_ori, sess)
    X_test_gbp = Map(tfOp_gbp_reconstruction, model.layers_dic['images'],
                     X_test_ori, sess)

    # save to pickle
    f = open('./{}.pkl'.format('SVHN-Plot'), 'wb')
    # pkl.dump((X_train_gbp, y_train), f, -1)
    pkl.dump(X_test_gbp, f, -1)
    f.close()

    # # visualization
    # grid_plot([3, 6], X_train_ori[:18], 'Original_CIFAR10', './Visualization', 'Examples_Ori_CIFAR10')
    # grid_plot([3, 6], X_train_gbp[:18], 'GBP_CIFAR10', './Visualization', 'Examples_GBP_CIFAR10')

    for index, image in enumerate(X_test_gbp):
        simple_plot(image, names[index] + 'GBP', './Plots/SVHN/')
コード例 #3
0
def job(vgg, sal_type, sess, init, batch_img, fns):

    # TF Graph
    saliencies = tf.gradients(vgg.maxlogit, vgg.imgs)[0]

    # shape = (num_to_viz, 224, 224, 3)
    saliencies_val = sess.run(saliencies, feed_dict={vgg.images: batch_img})

    # predictions
    probs_val = sess.run(vgg.probs, feed_dict={vgg.images: batch_img})
    predictions = [class_names[np.argmax(vec)] for vec in probs_val]

    save_dir = './adv_results/'
    for idx, name in enumerate(fns):
        simple_plot(batch_img[idx], name, save_dir)
        simple_plot(saliencies_val[idx], name + '_{}'.format(predictions[idx]), save_dir)
コード例 #4
0
ファイル: deconv.py プロジェクト: coolclear/Research
def job(vgg, sal_type, sess, init, batch_img, fns):

    # first: pick one layer
    # second: pick num_to_viz neurons from this layer
    # third: calculate the saliency map w.r.t self.imgs for each picked neuron

    num_to_viz = 100
    for layer_name in layers:

        # shape = (num_to_viz, num_input_images, 224, 224, 3)
        # TF Graph
        saliencies = super_saliency(vgg.layers_dic[layer_name], vgg.imgs, num_to_viz)

        # shape = (num_input_images, num_to_viz, 224, 224, 3)
        saliencies_val = sess.run(saliencies, feed_dict={vgg.images: batch_img})
        saliencies_val_trans = np.transpose(saliencies_val, (1, 0, 2, 3, 4))

        for idx, name in enumerate(fns):
            save_dir = "results/{}/{}/{}".format(name, sal_type, layer_name)
            for index, image in enumerate(saliencies_val_trans[idx]):
                simple_plot(image, name + '_' + layer_name + '_' + str(index), save_dir)
コード例 #5
0
ファイル: deconv_maxpool.py プロジェクト: coolclear/Research
def job(cnn3, sal_type, sess, if_pool, batch_img, fns):

    # first: pick one layer
    # second: pick num_to_viz neurons from this layer
    # third: calculate the saliency map w.r.t self.imgs for each picked neuron

    num_to_viz = 10
    layer_name = 'FC'

    # shape = (num_to_viz, num_input_images, 224, 224, 3)
    # TF Graph
    saliencies = super_saliency(cnn3.layers_dic[layer_name], cnn3.layers_dic['Input'], num_to_viz)

    # shape = (num_input_images, num_to_viz, 224, 224, 3)
    saliencies_val = sess.run(saliencies, feed_dict={cnn3.images: batch_img})
    saliencies_val_trans = np.transpose(saliencies_val, (1, 0, 2, 3, 4))

    for idx, name in enumerate(fns):
        save_dir = "01272018/{}/{}/Pooling_{}/".format(name, sal_type, if_pool)
        for index, image in enumerate(saliencies_val_trans[idx]):
            simple_plot(image, name + '_' + sal_type + '_' + str(if_pool) + '_' + str(index), save_dir)
コード例 #6
0
ファイル: value_of_layer.py プロジェクト: coolclear/Research
def main():

    for sal in sal_type:
        for idx, layer in enumerate(layers):

            tf.reset_default_graph()
            sess = tf.Session()
            vgg = prepare_vgg(sal, idx, 'only', sess)

            batch_img, fns = list_load("./../data_imagenet", images)

            # TF Graph
            saliency = tf.gradients(vgg.maxlogit, vgg.imgs)[0]
            saliency_vals = sess.run(saliency,
                                     feed_dict={vgg.images: batch_img})

            for index, name in enumerate(fns):
                save_dir = 'value_only/{}/{}'.format(name, layer)
                simple_plot(saliency_vals[index], name + '_only_' + layer,
                            save_dir)

            sess.close()