예제 #1
0
        if config["plot_reduce"]:
            vector_plot = vector_plot[...,highlight]
            highlight = None
    except KeyError:
        pass

    if do_plot:
        # undo model pre-processing of load_data
        images = data - np.min(data)
        images = images / np.max(images)
        directory = os.path.join("models/saved", config['config_name'])

        # animation
        # todo figure out why not .mp4?
        plot_cnn_output(vector_plot, directory, config["config_name"] + "_" + resp_type + ".gif",
                        title=config["config_name"],
                        image=orig_data,
                        video=True, highlight=highlight)

    return vector_plot, highlight


if __name__ == "__main__":
    # load config
    # t0001: human_anger, t0002: human_fear, t0003: monkey_anger, t0004: monkey_fear  --> plot cnn_output
    # t0005: human_anger, t0006: human_fear, t0007: monkey_anger, t0008: monkey_fear  --> plot difference, stride3, highlight max
    # t0009: human_anger, t0010: human_fear, t0011: monkey_anger, t0012: monkey_fear  --> plot difference, first, highlight max
    # t0013: human_anger, t0014: human_fear, t0015: monkey_anger, t0016: monkey_fear  --> plot difference, first, reduce max
    # t0017: human_anger  --> plot difference, stride3, reduce max
    # t0100: human_anger  --> plot maximum
    # t0104: human_anger  --> plot weighted average
    # t0108: human_anger  --> plot 10 biggest values (maximum10)
예제 #2
0
# color_seq = np.arange(len(dyn_eyebrow_preds))
# color_seq[:40] = 0
# color_seq[40:80] = 1
# color_seq[80:] = 0

# for c2 and c3
color_seq = np.arange(len(dyn_eyebrow_preds[1:]))
color_seq[:40] = 0
color_seq[40:80] = 1
color_seq[80:190] = 0
color_seq[190:230] = 2
color_seq[230:] = 0

plot_cnn_output(dyn_eyebrow_preds,
                os.path.join("models/saved", config["config_name"]),
                "00_human_train_dyn_eyebrow_feature_maps_output.gif",
                verbose=True,
                video=True)
plot_ft_map_pos(calculate_position(dyn_eyebrow_preds[1:],
                                   mode="weighted average",
                                   return_mode="xy float"),
                fig_name="00_human_train_dyn_eyebrow_pos.png",
                path=os.path.join("models/saved", config["config_name"]),
                color_seq=color_seq)

plot_cnn_output(test_dyn_eyebrow_preds,
                os.path.join("models/saved", config["config_name"]),
                "00_monkey_test_dyn_eyebrow_feature_maps_output.gif",
                verbose=True,
                video=True)
plot_ft_map_pos(calculate_position(test_dyn_eyebrow_preds[1:],
예제 #3
0
    # plot_cnn_output(preds, os.path.join("models/saved", config["config_name"]),
    #                 config['v4_layer'] + "_eye_brow.gif",
    #                 image=raw_data,
    #                 video=True,
    #                 highlight=feature_map_of_eyebrow)
    #
    # plot_cnn_output(preds, os.path.join("models/saved", config["config_name"]),
    #                 config['v4_layer'] + "_eye_lips.gif",
    #                 image=raw_data,
    #                 video=True,
    #                 highlight=feature_map_of_lips)
    #
    # plot selection of feature maps
    plot_cnn_output(preds_eyebrow,
                    os.path.join("models/saved", config["config_name"]),
                    config['v4_layer'] + "_eye_brow_selection.gif",
                    image=raw_data,
                    video=True,
                    verbose=False)
    print("[TEST] Finished plotting eyebrow selection")
    #
    # plot_cnn_output(preds_lips, os.path.join("models/saved", config["config_name"]),
    #                 config['v4_layer'] + "_lips_selection.gif",
    #                 image=raw_data,
    #                 video=True)

    # plot dynamic selection
    plot_cnn_output(dyn_preds_eyebrow,
                    os.path.join("models/saved", config["config_name"]),
                    config['v4_layer'] + "_dyn_eye_brow_selection.gif",
                    image=raw_data,
                    video=True,
def predict_expression(expression, ft_idx, test_num=None):
    # set config
    config["train_expression"] = [expression]

    # -----------------------------------------------------------------------------------------------------------------
    # human avatar
    config["train_avatar"] = "human_orig"

    # load data
    data = load_data(config)[0]

    # predict and filter responses
    print("[PE] Human {} loaded".format(expression))
    print("[PE] shape data", np.shape(data))
    print("[PE] Start predictions")
    preds_human = model.predict(data)
    preds_human = get_feat_map_filt_preds(preds_human,
                                          ft_idx,
                                          ref_type="self0",
                                          norm=1000,
                                          activation='ReLu',
                                          filter='spatial_mean',
                                          verbose=True)
    print("[PE] shape preds Human", np.shape(preds_human))
    print("[PE] min max preds Human", np.amin(preds_human),
          np.amax(preds_human))

    # get xy positions
    preds_human_pos = calculate_position(preds_human,
                                         mode="weighted average",
                                         return_mode="xy float")
    print("[PE] shape preds Human positions", np.shape(preds_human_pos))

    # # filter feat maps
    # preds_human_cleaned = filter_feature_maps(preds_human)
    # preds_human_pos_cleaned = calculate_position(preds_human_cleaned, mode="weighted average", return_mode="xy float")

    # -----------------------------------------------------------------------------------------------------------------
    # Monkey Avatar
    config["train_avatar"] = "monkey_orig"

    # load data
    data = load_data(config)[0]
    print("[PE] Monkey {} loaded".format(expression))
    print("[PE] shape data", np.shape(data))
    print("[PE] Start predictions")
    preds_monkey = model.predict(data)
    preds_monkey = get_feat_map_filt_preds(preds_monkey,
                                           ft_idx,
                                           ref_type="self0",
                                           norm=1000,
                                           activation='ReLu',
                                           filter='spatial_mean',
                                           verbose=True)
    print("[PE] shape preds Human", np.shape(preds_monkey))
    print("[PE] min max preds Human", np.amin(preds_monkey),
          np.amax(preds_monkey))

    # get xy positions
    preds_monkey_pos = calculate_position(preds_monkey,
                                          mode="weighted average",
                                          return_mode="xy float")
    print("[PE] shape preds Monkey positions", np.shape(preds_monkey_pos))

    # # filter feat maps
    # preds_monkey_cleaned = filter_feature_maps(preds_monkey)
    # preds_monkey_pos_cleaned = calculate_position(preds_monkey_cleaned, mode="weighted average", return_mode="xy float")

    # -----------------------------------------------------------------------------------------------------------------
    # plot raw feature maps
    print("[PE] Create plot")

    # plot human
    cnn_output_name = 'test_human_cnn_output.gif'
    if test_num is not None:
        cnn_output_name = 'test{}_human_cnn_output.gif'.format(test_num)

    plot_cnn_output(preds_human, save_path, cnn_output_name, video=True)

    # # plot human filtered feature maps
    # cnn_output_name = 'test_human_filtered_cnn_output.gif'
    # if test_num is not None:
    #     cnn_output_name = 'test{}_human_filtered_cnn_output.gif'.format(test_num)
    #
    # plot_cnn_output(preds_human_cleaned, save_path, cnn_output_name, video=True)

    # plot monkey
    cnn_output_name = 'test_monkey_cnn_output.gif'
    if test_num is not None:
        cnn_output_name = 'test{}_monkey_cnn_output.gif'.format(test_num)

    plot_cnn_output(preds_monkey, save_path, cnn_output_name, video=True)

    # # plot monkey cleaned
    # cnn_output_name = 'test_monkey_filtered_cnn_output.gif'
    # if test_num is not None:
    #     cnn_output_name = 'test{}_monkey_filtered_cnn_output.gif'.format(test_num)
    #
    # plot_cnn_output(preds_monkey_cleaned, save_path, cnn_output_name, video=True)

    # plot slice predictions
    # plot raw responses of the dynamic
    max_pred = np.amax([preds_human, preds_monkey])
    plt.figure()
    plt.subplot(2, 2, 1)
    plt.plot(
        preds_human[:, :, slice_pos_eyebrow,
                    0])  # slice over the 10 column to try to get the eyebrow
    plt.ylim(0, max_pred)
    plt.title("Human Avatar Eyebrow")
    plt.subplot(2, 2, 2)
    plt.plot(
        preds_monkey[:, :, slice_pos_eyebrow,
                     0])  # slice over the 10 column to try to get the eyebrow
    plt.ylim(0, max_pred)
    plt.title("Monkey Avatar Eyebrow")
    plt.subplot(2, 2, 3)
    plt.plot(preds_human[:, :, slice_pos_lips,
                         1])  # slice over the 10 column to try to get the lips
    plt.ylim(0, max_pred)
    plt.title("Human Avatar Lips")
    plt.subplot(2, 2, 4)
    plt.plot(
        preds_monkey[:, :, slice_pos_lips,
                     1])  # slice over the 10 column to try to get the lips
    plt.ylim(0, max_pred)
    plt.title("Monkey Avatar Lips")
    plt.suptitle(expression)

    if test_num is not None:
        plt.savefig(
            os.path.join(
                save_path,
                "test{}_Hum_vs_Monk_{}_expression_slice_eb{}_lips_eb{}".format(
                    test_num, expression, slice_pos_eyebrow, slice_pos_lips)))
    else:
        plt.savefig(
            os.path.join(
                save_path,
                "test_Hum_vs_Monk_{}_expression_slice_eb{}_lips_eb{}".format(
                    expression, slice_pos_eyebrow, slice_pos_lips)))

    # plot positions
    # set color to represent time
    color_seq = np.arange(len(preds_human_pos))

    plt.figure()
    plt.subplot(2, 2, 1)
    plt.scatter(preds_human_pos[:, 1, 0],
                preds_human_pos[:, 0, 0],
                c=color_seq)
    plt.xlim(12, 15)
    plt.colorbar()
    plt.title("Human Avatar Eyebrow")
    plt.subplot(2, 2, 2)
    plt.scatter(preds_monkey_pos[:, 1, 0],
                preds_monkey_pos[:, 0, 0],
                c=color_seq)
    plt.xlim(12, 15)
    plt.colorbar()
    plt.title("Monkey Avatar Eyebrow")
    plt.subplot(2, 2, 3)
    plt.scatter(preds_human_pos[:, 1, 1],
                preds_human_pos[:, 0, 1],
                c=color_seq)
    plt.xlim(12, 15)
    plt.colorbar()
    plt.title("Human Avatar Lips")
    plt.subplot(2, 2, 4)
    plt.scatter(preds_monkey_pos[:, 1, 1],
                preds_monkey_pos[:, 0, 1],
                c=color_seq)
    plt.xlim(12, 15)
    plt.colorbar()
    plt.title("Monkey Avatar Lips")

    plt.suptitle(expression + " xy-pos")

    if test_num is not None:
        plt.savefig(
            os.path.join(
                save_path,
                "test{}_Hum_vs_Monk_{}_expression_pos_eb{}_lips_eb{}".format(
                    test_num, expression, slice_pos_eyebrow, slice_pos_lips)))
    else:
        plt.savefig(
            os.path.join(
                save_path,
                "test_Hum_vs_Monk_{}_expression_pos_eb{}_lips_eb{}".format(
                    expression, slice_pos_eyebrow, slice_pos_lips)))
예제 #5
0
# get IT responses of the model
it_test = nb_model._get_it_resp(dyn_test_pos)

# test by training new ref
nb_model._fit_reference([dyn_test_pos, data[1]], config['batch_size'])
it_ref_test = nb_model._get_it_resp(dyn_test_pos)

# --------------------------------------------------------------------------------------------------------------------
# plots
# ***********************       test 00 raw output      ******************

# raw activity
plot_cnn_output(preds,
                os.path.join("models/saved", config["config_name"]),
                "00_feature_maps_output.gif",
                verbose=True,
                video=True)
plot_cnn_output(dyn_preds,
                os.path.join("models/saved", config["config_name"]),
                "00_dyn_feature_maps_output.gif",
                verbose=True,
                video=True)
plot_cnn_output(test_preds,
                os.path.join("models/saved", config["config_name"]),
                "00_test_feature_maps_output.gif",
                verbose=True,
                video=True)
plot_cnn_output(test_dyn_preds,
                os.path.join("models/saved", config["config_name"]),
                "00_test_dyn_feature_maps_output.gif",
예제 #6
0
    #
    # template = np.concatenate((eyebrow_template, lips_template), axis=3)
    # template = eyebrow_template
    # print("[TRAIN] shape template", np.shape(template))
    # template[template < 0.1] = 0

    # compute positions
    pos = calculate_position(template,
                             mode="weighted average",
                             return_mode="xy float flat")
    print("[TRAIN] shape pos", np.shape(pos))

    if plot_intermediate:
        plot_cnn_output(template,
                        os.path.join("models/saved", config["config_name"]),
                        "00_template.gif",
                        verbose=True,
                        video=True)

        test_pos_2d = np.reshape(pos, (len(pos), -1, 2))
        plot_ft_map_pos(test_pos_2d,
                        fig_name="00b_human_pos.png",
                        path=os.path.join("models/saved",
                                          config["config_name"]))

        # test_max_preds = np.expand_dims(np.amax(test_preds, axis=3), axis=3)
        test_max_preds = np.expand_dims(np.amax(eyebrow_preds, axis=3), axis=3)
        # test_max_preds = np.expand_dims(np.amax(lips_preds, axis=3), axis=3)
        preds_plot = test_max_preds / np.amax(test_max_preds) * 255
        print("shape preds_plot", np.shape(preds_plot))
        plot_ft_pos_on_sequence(pos,
예제 #7
0
print("[TEST 4] shape preds_pos", np.shape(preds_pos))
dyn_preds_pos = calculate_position(dyn_eye_brow_mean_pred,
                                   mode="weighted average",
                                   return_mode="array")
print("[TEST 4] shape dyn_preds_pos", np.shape(dyn_preds_pos))

# concatenate prediction and position for plotting
results = np.concatenate(
    (eye_brow_mean_pred, preds_pos, dyn_eye_brow_mean_pred, dyn_preds_pos),
    axis=3)
print("[TEST 4] shape results", np.shape(results))

# plot means feature maps and positions
plot_cnn_output(results,
                os.path.join("models/saved", config["config_name"]),
                "test4_" + config['v4_layer'] + "_mean_feature_map.gif",
                image=raw_seq,
                video=True,
                verbose=False)
print("[Test 4] Finish plotting results")
print()

# # ----------------------------------------------------------------------------------------------------------------------
# # test 5 - test mean feature maps on lips
# print("[TEST 5] Test on lips units")
# lips_ft_idx = [79, 120, 125, 0, 174, 201, 193, 247, 77, 249, 210, 149, 89, 197, 9, 251, 237, 165, 101, 90, 27, 158, 154,
#                10, 168, 156, 44, 23, 34, 85, 207]
#
# # load c3 expression to test mouth movements
# config["train_expression"] = ["c3"]
# data = load_data(config)
# raw_seq = load_data(config, get_raw=True)[0]
예제 #8
0
# create alternative format with tf.keras.applications.vgg19.preprocess_input
# format should then be RGB float32 [0..1]
# furthermore, imagenet average should be subtracted
image_pre = tf.keras.applications.vgg19.preprocess_input(np.copy(image))
print("new shape:", image_pre.shape)
print("new format:", image_pre.dtype)
print("new value range:", [np.min(image_pre), np.max(image_pre)])
#new shape: (224, 224, 3)
#new format: float64
#new value range: [-123.68, 116.061]


### CHECK OUTPUT ###
# create model
model = tf.keras.applications.VGG19(include_top=False, weights=config['weights'], input_shape=(224,224,3))
model = tf.keras.Model(inputs=model.input, outputs=model.get_layer(config['v4_layer']).output)

# check output original format
response = model.predict(np.array([image]))[0]
plot_cnn_output(response,path,"original.png",title="original image", image=image)

# check output preprocessed format
response_pre = model.predict(np.array([image_pre]))[0]
plot_cnn_output(response_pre,path,"preprocessed.png",title="preprocessed image", image=image_pre)

# compare response
diff = response - response_pre
print("norm of difference:", np.linalg.norm(diff))
print("range of difference:", [np.min(diff), np.max(diff)])
plot_cnn_output(diff,path,"difference_response.png")
cat1_monkey = ref_monkey + tun1_monkey
cat2_monkey = ref_monkey + tun2_monkey

# reshape vectors to (28,28,256)
for vector in [
        ref_human, tun1_human, tun2_human, cat1_human, cat2_human, ref_monkey,
        tun1_monkey, tun2_monkey, cat1_monkey, cat2_monkey
]:
    vector.shape = (28, 28, 256)

# plot vectors
path = os.path.join("../../models/saved", configs[0]["save_name"])
if not os.path.exists(path): os.mkdir(path)

plot_cnn_output(ref_human,
                path,
                "ref_human.png",
                title="Average response to neutral expression, human avatar")
plot_cnn_output(tun1_human,
                path,
                "tun1_human.png",
                title="Tuning vector to expression 1, human avatar")
plot_cnn_output(cat1_human,
                path,
                "cat1_human.png",
                title="Average response to expression 1, human avatar")
plot_cnn_output(tun2_human,
                path,
                "tun2_human.png",
                title="Tuning vector to expression 2, human avatar")
plot_cnn_output(cat2_human,
                path,
예제 #10
0
# --------------------------------------------------------------------------------------------------------------------
# plot common feature maps over monkey avatar

config['train_avatar'] = 'monkey_orig'
# load data
data = load_data(config)

# predict feature maps input
preds = model.predict(data[0], verbose=True)
print("[FIT] shape preds", np.shape(preds))
preds_common = preds[..., com]
print("[FIT] shape preds_common", np.shape(preds_common))

plot_cnn_output(preds_common,
                os.path.join("models/saved", config["config_name"]),
                config['train_avatar'] + "_common_PCA" + "_selection.gif",
                video=True,
                verbose=True)

# compute dynamic changes
preds_ref = preds_common[0]
preds_dyn_com = preds_common - np.repeat(
    np.expand_dims(preds_ref, axis=0), len(preds_common), axis=0)
preds_dyn_com[preds_dyn_com < 0] = 0
preds_dyn_com /= np.amax(preds_dyn_com)

plot_cnn_output(preds_dyn_com,
                os.path.join("models/saved", config["config_name"]),
                config['train_avatar'] + "_common_PCA_dyn" + "_selection.gif",
                video=True,
                verbose=True)
# get IT responses of the model
it_test = nb_model._get_it_resp(dyn_test_pos)

# test by training new ref
nb_model._fit_reference([dyn_test_pos, test_data[1]], config['batch_size'])
it_ref_test = nb_model._get_it_resp(dyn_test_pos)

# --------------------------------------------------------------------------------------------------------------------
# plots
# ***********************       test 00 raw output      ******************

# raw activity
plot_cnn_output(max_lips_preds,
                os.path.join("models/saved", config["config_name"]),
                "00_max_feature_maps_lips_output.gif",
                verbose=True,
                video=True)
# plot_cnn_output(preds, os.path.join("models/saved", config["config_name"]),
#                 "00_max_feature_maps_output.gif", verbose=True, video=True)
# plot_cnn_output(dyn_preds, os.path.join("models/saved", config["config_name"]),
#                 "00_dyn_max_feature_maps_output.gif", verbose=True, video=True)
# plot_cnn_output(test_preds, os.path.join("models/saved", config["config_name"]),
#                 "00_test_max_maps_output.gif", verbose=True, video=True)
# plot_cnn_output(test_dyn_preds, os.path.join("models/saved", config["config_name"]),
#                 "00_test_dyn_max_feature_maps_output.gif", verbose=True, video=True)

# # for only c2
# color_seq = np.arange(len(dyn_eyebrow_preds))
# color_seq[:40] = 0
# color_seq[40:80] = 1
cat_feature_map_indexes = get_IoU_per_category(sem_idx_list, cat_ids)
print("[Index] Computed category index for {}".format(cat_ids))
print()

layer_of_interest = config["v4_layer"]
for i, cat_id in enumerate(cat_ids):
    print("idx for:", config['semantic_units'][i])
    # get layer idx
    try:
        # get indexes from dictionary
        feature_map = cat_feature_map_indexes["category_{}".format(
            cat_id)][layer_of_interest]["indexes"]
        print("[Index] feature map indexes for category {} at layer {}: {}".
              format(cat_id, layer_of_interest, feature_map))
        print("[Index] num of selected feature map: {}".format(
            len(feature_map)))

    except:
        print("no index exists for this concept on this layer!")
print()
# --------------------------------------------------------------------------------------------------------------------
# plot

# plot selection of feature maps
plot_cnn_output(preds,
                os.path.join("models/saved", config["config_name"]),
                config['v4_layer'] + "_selection.gif",
                video=True,
                verbose=True)
print("[TEST] Finished plotting ft maps")
print()
예제 #13
0
import tensorflow as tf
import os

from plots_utils.plot_cnn_output import plot_cnn_output
from utils.load_config import load_config
from utils.load_data import load_data

# load config
config = load_config("norm_base_basic_shape_t0002.json")

# make folder
folder = os.path.join("../../models/saved", config["save_name"])
if not os.path.exists(folder):
    os.mkdir(folder)

# cnn
model = tf.keras.applications.VGG19(include_top=False,
                                    weights="imagenet",
                                    input_shape=(224, 224, 3))
model = tf.keras.Model(inputs=model.input,
                       outputs=model.get_layer("block3_pool").output)

# calculate and plot response
images = load_data(config, train=config["subset"])
response = model.predict(np.array(images))
for i, image in enumerate(images):
    plot_cnn_output(response[i],
                    os.path.join(folder, config["sub_folder"]),
                    f"plot{i}.png",
                    title="block3_pool response",
                    image=image)