コード例 #1
0
ファイル: visualize.py プロジェクト: shiyxg/tremor
def train_cnn():
    g = cnn_graph()
    '''
    g = {
        'images': images,
        'labels': labels_r,
        'learning_rate': learning_rate,
        'logits': logits,
        'loss': loss,
        'train': train,
        'summary': summary
    }
    '''
    images = g['images']
    labels = g['labels']
    learning_rate = g['learning_rate']
    train = g['train']
    loss = g['loss']
    logits = g['logits']
    is_training = g['is_training']
    with tf.Session() as sess:
        pwd = '/home/pkushi/CNNlog/CNN/BN_test'
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver()
        saver.restore(
            sess,
            '/home/pkushi/CNNlog/CNN/tremor_with_norm_BN_test5/para_6000/')

        # saver.save(sess,'/home/pkushi/CNNlog/vis.c')

        mean = tf.get_collection('mean')[-1]
        var = tf.get_collection('var')[-1]
        BN_reuslt = tf.get_collection('BN_result')[-1]
        wx_result = tf.get_collection('wx_result')[-1]
        conv_result = tf.get_collection('conv_result')
        fetchVariables = [
            loss, logits, mean, var, BN_reuslt, wx_result, conv_result
        ]

        for i in range(1):
            trainBatch = data_test.get_tremors(NUM)
            # train
            feedData = {
                images: trainBatch[0],
                labels: trainBatch[1],
                learning_rate: 1e-3,
                is_training: False
            }
            tf_cnnvis.deconv_visualization(
                sess,
                feedData,
                input_tensor=None,
                layers='p',
                path_logdir='/home/pkushi/CNNlog/vis/log',
                path_outdir='/home/pkushi/CNNlog/vis/output')
            print(1)

    return sess
コード例 #2
0
def visualize_features(graph, session, example, input_images_path,
                       output_images_path, log_dir):
    image = load_example(input_images_path, example)
    placeholder = graph.get_tensor_by_name('vgg/images:0')

    feed_dictionary = {placeholder: image}

    tf_cnnvis.deconv_visualization(session,
                                   feed_dictionary,
                                   input_tensor=None,
                                   layers='r',
                                   path_logdir=log_dir,
                                   path_outdir=output_images_path)
コード例 #3
0
def visualize_by_deconvolution(session, input_tensor, feed_dict,
                               output_dir_path):
    import tf_cnnvis
    layers = ['r', 'p', 'c']
    return tf_cnnvis.deconv_visualization(
        sess_graph_path=session,
        value_feed_dict=feed_dict,
        input_tensor=input_tensor,
        layers=layers,
        path_logdir=os.path.join(output_dir_path, 'vis_log_deconv'),
        path_outdir=os.path.join(output_dir_path, 'vis'))
コード例 #4
0
def visualize_saliency_of_output(FLAGS, model, input_images=[]):
    """
  Extract the salience map of the output control node(s) for a predefined set of visualization images.
  You can define the input_images as a list, if nothing is defined it uses the predefined images.
  It saves the images in FLAGS.summary_dir+FLAGS.log_tag+'/saliency_maps/....png'.
  FLAGS: the settings defined in main.py
  model: the loaded pilot-model object
  input_images: a list of strings with absolute paths to images used to extract the maps 
  overlay: plot activations in overlay over input image 
  """
    if len(input_images) == 0:
        # use predefined images
        img_dir = '/esat/opal/kkelchte/docker_home/pilot_data/visualization_images'
        input_images = sorted([img_dir + '/' + f for f in os.listdir(img_dir)])

    print("[tools.py]: extracting saliency maps of {0} in {1}".format(
        [os.path.basename(i) for i in input_images],
        os.path.dirname(input_images[0])))

    inputs = load_images(input_images, model.input_size[1:])

    print 'shape: ', inputs.shape

    if 'nfc' in FLAGS.network:
        inputs = np.concatenate([inputs] * FLAGS.n_frames, axis=-1)

    # extract deconvolution
    import tf_cnnvis

    # layers = ['c']
    # layers=['MobilenetV1_1/control/Conv2d_1c_1x1/Conv2D']
    # layers=['MobilenetV1_1/control/Conv2d_1c_1x1/Conv2D','MobilenetV1_1/AvgPool_1a/AvgPool']

    # layers = [str(i.name) for i in model.sess.graph.get_operations() if 'outputs' in i.name and not 'activations' in i.name and not 'gradients' in i.name]
    layers = [
        model.endpoints['eval']['outputs'].name[:-2]
    ]  #cut out :0 in the end to change name from tensor to operation name
    # layers = ['outputs']

    # results = tf_cnnvis.activation_visualization(sess_graph_path = model.sess,
    #                                               value_feed_dict = {model.inputs : inputs},
    #                                               layers=layers)
    results = tf_cnnvis.deconv_visualization(
        sess_graph_path=model.sess,
        value_feed_dict={model.inputs: inputs},
        layers=layers)

    # Normalize deconvolution within 0:1 range
    num_rows = 0
    clean_results = {}
    # Loop over layers
    for k in results.keys():
        clean_results[k] = []
        # Loop over channels
        for c in range(len(results[k])):
            num_rows += 1
            clean_results[k].append(np.zeros((results[k][c].shape[0:3])))
            # Loop over images
            for i in range(results[k][c].shape[0]):
                clean_results[k][c][i] = deprocess_image(results[k][c][i],
                                                         one_channel=True)
    if num_rows > 6:
        print(
            "[tools.py]: There are too many columns to create a proper image.")
        return

    # create one combined image with each input image on each column
    fig, axes = plt.subplots(num_rows + 1,
                             min(len(input_images), 5),
                             figsize=(23, 4 * (2 * len(results.keys()) + 1)))
    # fig, axes = plt.subplots(num_columns+1,min(len(input_images),5),figsize=(23, 4*(2*len(results.keys())+1)))
    # add original images in first row
    for i in range(axes.shape[1]):
        axes[0, i].set_title(os.path.basename(input_images[i]).split('.')[0])
        axes[0, i].imshow(matplotlibprove(inputs[i]), cmap='inferno')
        axes[0, i].axis('off')

    # experts=np.asarray([[k]*(FLAGS.action_quantity if FLAGS.discrete else 1) for v in sorted(model.factor_offsets.values()) for k in model.factor_offsets.keys() if model.factor_offsets[k]==v]).flatten()

    # add deconvolutions over the columns
    row_index = 1
    for k in results.keys():  # go over layers
        for c in range(len(results[k])):  # add each channel in 2 new column
            for i in range(axes.shape[1]):  # fill row going over input images
                # axes[row_index, i].set_title(k.split('/')[1]+'/'+k.split('/')[2]+'_'+str(c))
                axes[row_index, i].set_title(k + '_' + str(c))
                # axes[row_index, i].set_title(experts[c])

                axes[row_index, i].imshow(
                    np.concatenate(
                        (inputs[i],
                         np.expand_dims(clean_results[k][c][i], axis=2)),
                        axis=2))
                axes[row_index, i].axis('off')
            # row_index+=2
            row_index += 1
    # plt.show()
    plt.savefig(FLAGS.summary_dir + FLAGS.log_tag + '/saliency_maps.jpg',
                bbox_inches='tight')
コード例 #5
0
    TESTING = False
    if TESTING:
        for x in vars:
            print(x)
        for x in stuff:
            print(x)
    tf_cnnvis.activation_visualization(sess,
                                       value_feed_dict={t_input: input_img},
                                       layers='r',
                                       path_logdir='actLog',
                                       path_outdir='Output')

    tf_cnnvis.deconv_visualization(sess,
                                   value_feed_dict={t_input: input_img},
                                   layers='r',
                                   path_logdir='deconLog',
                                   path_outdir='Output')

    for i in range(1, 8):
        if i == 1:
            layer = 'conv2d/Relu'
        else:
            layer = 'conv2d_%d/Relu' % (i)
        tf_cnnvis.deepdream_visualization(sess,
                                          value_feed_dict={t_input: input_img},
                                          layer=layer,
                                          classes=[0, 1, 2, 3, 4],
                                          path_logdir='Log',
                                          path_outdir='Output')
コード例 #6
0
saver = tf.train.import_meta_graph("{}.meta".format(modelpath))

with tf.Session() as sess:
    saver.restore(sess, modelpath)
    g = sess.graph

    max_pool = g.get_tensor_by_name('CNN_MODEL/MaxPool:0')
    X = g.get_tensor_by_name('CNN_MODEL/input:0')
    y = g.get_tensor_by_name("CNN_OP/y_true:0")
    X_in = g.get_tensor_by_name('CNN_MODEL/truediv:0')

    # init global vars
    sess.run(tf.global_variables_initializer())
    saver.restore(sess, modelpath)

    _input = [frame]
    _label = [label]

    feed_dict = {X: _input, y: _label}

    layers = [
        "CNN_MODEL/MaxPool", "CNN_MODEL/MaxPool_1", "CNN_MODEL/MaxPool_2"
    ]

    deconv_visualization(sess_graph_path=sess,
                         value_feed_dict=feed_dict,
                         input_tensor=X_in,
                         layers=layers,
                         path_logdir=os.path.join("Log", "UCF4"),
                         path_outdir=os.path.join("Output", "UCF4"))
コード例 #7
0
def visualize(sess, model, dataset):
    """Save png of deconvolution image from first image in test set"""
    deconv_visualization(sess, {model.input: dataset.x_test[0:1, :, :, :]})