Ejemplo n.º 1
0
# load config
# t0001: human_anger, t0002: human_fear, t0003: monkey_anger, t0004: monkey_fear  --> plot cnn_output
# t0005: human_anger, t0006: human_fear, t0007: monkey_anger, t0008: monkey_fear  --> plot difference, stride3, highlight max
# t0009: human_anger, t0010: human_fear, t0011: monkey_anger, t0012: monkey_fear  --> plot difference, first, highlight max
# t0013: human_anger, t0014: human_fear, t0015: monkey_anger, t0016: monkey_fear  --> plot difference, first, reduce max
# t0017: human_anger  --> plot difference, stride3, reduce max
# t0100: human_anger  --> plot maximum
# t0104: human_anger  --> plot weighted average
# t0108: human_anger  --> plot 10 biggest values (maximum10)
config = load_config("norm_base_animate_cnn_response_t0001.json", path="configs/norm_base_config")

# load images
images,_ = load_data(config, train=config["dataset"])

# load model
normbase = NormBase(config,(224,224,3))

# calculate vector and options for plot
if config["plot_option"]=='cnn_output':
    # plot cnn_response
    vector_plot = normbase.evaluate_v4(images, flatten=False)
elif config["plot_option"]=='cnn_output_difference':
    # take difference between response and reference, reference has different options
    response = normbase.evaluate_v4(images, flatten=False)
    if config["difference_option"]=='first':
        reference = response[0,...]
    elif 'stride' in config["difference_option"]:
        stride_length=int(config["difference_option"][6:])
        reference = np.roll(response, shift=stride_length, axis=0)
        reference[:stride_length,...] = response[:stride_length,...]
    elif config["difference_option"]=="reference":
Ejemplo n.º 2
0
congig_path = '../../configs/norm_base_config'
# config_name = 'norm_base_monkey_test.json'
config_name = 'norm_base_affectNet_sub8_4000.json'
config_file_path = os.path.join(congig_path, config_name)
print("config_file_path", config_file_path)

# load norm_base_config file
with open(config_file_path) as json_file:
    config = json.load(json_file)

# load data
data = load_data(config, train=False, sort_by=['image'])
print("[Data] -- Data loaded --")

# create model
norm_base = NormBase(config, input_shape=(224, 224, 3))

# "load" model
load_folder = os.path.join("../../models/saved", config['save_name'])
r = np.load(os.path.join(os.path.join(load_folder, "ref_vector.npy")))
t = np.load(os.path.join(os.path.join(load_folder, "tuning_vector.npy")))
norm_base.set_ref_vector(r)
norm_base.set_tuning_vector(t)
print("[MODEL] Set ref vector", np.shape(r))
print("[MODEL] Set tuning vector", np.shape(t))

# predict tuning vector
# it_resp = norm_base.predict(data)
_, it_resp, _ = norm_base.evaluate(data)
print("shape it_resp", np.shape(it_resp))
plt.plot(it_resp)
Ejemplo n.º 3
0
    Example of use
    
    2021/01/04
    - model is trained in separate steps (specified in config)
    """

    config = load_config("norm_base_plotDirections_t0012.json")
    save_name = config["sub_folder"]
    retrain = False

    # model
    try:
        if retrain:
            raise IOError("retrain = True")
        norm_base = NormBase(config,
                             input_shape=(224, 224, 3),
                             save_name=save_name)
    except IOError:
        norm_base = NormBase(config, input_shape=(224, 224, 3))

        norm_base.fit(load_data(config,
                                train=config["train_dim_ref_tun_ref"][0]),
                      fit_dim_red=True,
                      fit_ref=False,
                      fit_tun=False)
        norm_base.fit(load_data(config,
                                train=config["train_dim_ref_tun_ref"][1]),
                      fit_dim_red=False,
                      fit_ref=True,
                      fit_tun=False)
        norm_base.fit(load_data(config,
# load configuration
config_name = "norm_base_investigate_PCA_m0001.json"
config = load_config(config_name)

# fit models with each condition
avatars = ["human_orig", "monkey_orig", "all_orig"]
# avatars = ["human_orig"]
indexes = []
# pca_threshold = [300, 300, 1500]
pca_threshold = [600, 600, 2000]
for i, avatar in enumerate(avatars):
    # modify condition according to loop
    config['train_avatar'] = avatar

    # define and train norm base model
    norm_base = NormBase(config, input_shape=(224, 224, 3))
    norm_base.pca.var_threshold = pca_threshold[i]
    norm_base.fit(load_data(config, train=True),
                  fit_dim_red=True,
                  fit_ref=False,
                  fit_tun=False)

    # get index from the feature having the most variance
    predict_v4 = norm_base.v4_predict
    var_predict = np.std(predict_v4, axis=0)
    index = np.flip(np.argsort(var_predict))[:config['PCA']]

    # save index
    indexes.append(np.array(index))
indexes = np.array(indexes)
from models.NormBase import NormBase

# t0001: 2-norm     t0002: 1-norm   t0003: simplified   t0004: direction-only   t0005: expressitivity-direction
# t0006: 2-norm-monkey_morph

do_reverse = False
do_normalize = False

config = load_config("norm_base_reproduce_ICANN_t0015.json",
                     path="configs/norm_base_config")
save_name = config["sub_folder"]
save_folder = os.path.join("models/saved", config['save_name'], save_name)
accuracy = np.load(os.path.join(save_folder, "accuracy.npy"))
it_resp = np.load(os.path.join(save_folder, "it_resp.npy"))
labels = np.load(os.path.join(save_folder, "labels.npy"))
norm_base = NormBase(config, input_shape=(224, 224, 3))

print("accuracy", accuracy)
print("it_resp.shape", it_resp.shape)
print("labels.shape", labels.shape)

# colors 0=Neutral=Black, 1=Threat=Yellow, 2=Fear=Blue, 3=LipSmacking=Red
colors = config['colors']
titles = config['condition']
seq_length = config['seq_length']
n_cat = config['n_category']
labels = config['labels']

if config['use_ggplot'] is not None:
    use_ggplot = config['use_ggplot']
    if use_ggplot:
config['tau_y'] = 2  # 15
config['tau_d'] = 2

# --------------------------------------------------------------------------------------------------------------------
# train model
# load training data
train_data = load_data(config)
train_data[0] = segment_sequence(train_data[0], config['train_seq_start_idx'],
                                 config['seq_length'])
train_data[1] = segment_sequence(train_data[1], config['train_seq_start_idx'],
                                 config['seq_length'])
print("[LOAD] Shape train_data segmented", np.shape(train_data[0]))
print("[LOAD] Shape train_label segmented", np.shape(train_data[1]))

# fit and save model
norm_base = NormBase(config, input_shape=(224, 224, 3))
expr_neurons, face_neurons, differentiators = norm_base.fit(
    train_data, get_differentiator=True, get_it_resp=True)
norm_base.save()

# --------------------------------------------------------------------------------------------------------------------
# plot
norm_base.plot_it_neurons_per_sequence(face_neurons,
                                       title="01_train",
                                       save_folder=os.path.join(
                                           "models/saved",
                                           config['config_name']),
                                       normalize=True)
# re-arrange condition from fear lip_smack threat to threat fear lip_smack
norm_base.plot_differentiators(differentiators,
                               title="02_train",
Ejemplo n.º 7
0
load_model = True
if load_model:
    load_NB_model = True
    fit_dim_red = False
else:
    load_NB_model = False
    fit_dim_red = True

# set read-out layer
config["v4_layer"] = "block4_conv3"

# declare model
model = NormBase(
    config,
    input_shape=tuple(config['input_shape']),
    load_NB_model=load_NB_model
)  # load model set to True so we get the semantic dictionary loaded

if not load_model:
    # fit model
    data = load_data(config, train=True)
    model.fit(
        data,
        fit_dim_red=
        fit_dim_red,  # if it is the first time to run this script change this to True -> time consuming
        fit_ref=True,
        fit_tun=True)
    model.save()

# show semantic index
save_path = os.path.join("models/saved", config["config_name"])
if not os.path.exists(save_path):
    os.mkdir(save_path)

# load and define model
v4_model = load_extraction_model(config,
                                 input_shape=tuple(config["input_shape"]))
v4_model = tf.keras.Model(inputs=v4_model.input,
                          outputs=v4_model.get_layer(
                              config['v4_layer']).output)
size_ft = tuple(np.shape(v4_model.output)[1:3])
print("[LOAD] size_ft", size_ft)
print("[LOAD] Model loaded")
print()

nb_model = NormBase(config, tuple(config['input_shape']))
# -------------------------------------------------------------------------------------------------------------------
# train

# load data
data = load_data(config)

# predict
preds = v4_model.predict(data[0], verbose=1)
print("[TRAIN] shape prediction", np.shape(preds))

# get feature maps that mimic a semantic selection pipeline
# keep only highest IoU semantic score
eyebrow_preds = preds[..., best_eyebrow_IoU_ft]
print("shape eyebrow semantic feature selection", np.shape(eyebrow_preds))
lips_preds = preds[..., best_lips_IoU_ft]
# ----------------------------------------------------------------------------------------------------------------------
# 4: test model with monkey avatar

# ----------------------------------------------------------------------------------------------------------------------
# 5: test transfer learning, predict on monkey avatar using the human avatar training

# ----------------------------------------------------------------------------------------------------------------------
# 6: show that the vector are orthogonal

# ----------------------------------------------------------------------------------------------------------------------
# 7: train PCA using both avatars
config_name = 'NB_PCA_human_monkey_c2c3_m0001.json'
config = load_config(config_name, path='configs/norm_base_config')

# declare model
model = NormBase(config, input_shape=tuple(config['input_shape']))

# train model
model.fit(load_data(config, train=True),
          fit_dim_red=True,
          fit_ref=True,
          fit_tun=True)
model.save_NB_model(config)
# ----------------------------------------------------------------------------------------------------------------------
# 8: train training vector on human avatar with PCA index from both avatars

# ----------------------------------------------------------------------------------------------------------------------
# 9: predict on monkey avatar using the PCA trained on both avatars and tuning vector on human avatar

# ----------------------------------------------------------------------------------------------------------------------
# 10: show that vectors are at exactly 45°
]

# load config
configs = []
for config_name in config_names:
    configs.append(load_config(config_name))

# train models/load model
# fit reference and tuning vector
norm_base_list = []
for config in configs:
    try:
        if retrain:
            raise IOError("retrain = True")
        norm_base = NormBase(config,
                             input_shape=(224, 224, 3),
                             save_name=config["sub_folder"])
    except IOError:
        norm_base = NormBase(config, input_shape=(224, 224, 3))
        dataset = load_data(config, train=config["dataset"])
        norm_base.fit(dataset)  # no feature selection
        norm_base.save_model(config, config["sub_folder"])
    norm_base_list.append(norm_base)

# extract vectors
ref_human = norm_base_list[0].r
tun1_human, tun2_human = norm_base_list[0].t_mean[[1, 2]]
cat1_human = ref_human + tun1_human
cat2_human = ref_human + tun2_human

ref_monkey = norm_base_list[1].r
Ejemplo n.º 11
0
save_path = os.path.join("models/saved", config["config_name"])
if not os.path.exists(save_path):
    os.mkdir(save_path)

# load and define model
v4_model = load_extraction_model(config,
                                 input_shape=tuple(config["input_shape"]))
v4_model = tf.keras.Model(inputs=v4_model.input,
                          outputs=v4_model.get_layer(
                              config['v4_layer']).output)
size_ft = tuple(np.shape(v4_model.output)[1:3])
print("[LOAD] size_ft", size_ft)
print("[LOAD] Model loaded")
print()

nb_model = NormBase(config, tuple(config['input_shape']))

# --------------------------------------------------------------------------------------------------------------------
# build test case
labels = np.arange(5)  # build one prediction per category
preds = np.zeros((5, 5, 5, 2))
preds[0, 2, 2, :] = 1
preds[1, 1, 1, 0] = 1
preds[1, 4, 2, 1] = 1
preds[2, 0, 0, 0] = 0
preds[2, 3, 2, 1] = 1
print("[TRAIN] preds", np.shape(preds))
print("preds[1]")
print(preds[1, ..., 0])
print("preds[1]")
print(preds[1, ..., 1])
Ejemplo n.º 12
0
test the naive PCA selection on the original morphing space dataset from the eLife paper

run: python -m projects.facial_shape_expression_recognition_transfer.01b_NB_morphing_space
"""

train_model = False
predict_it_resp = False

# load config
config_name = 'NB_morphing_space_m0001.json'
config = load_config(config_name, path='configs/norm_base_config')

# --------------------------------------------------------------------------------------------------------------------
# declare model
model = NormBase(config,
                 input_shape=tuple(config['input_shape']),
                 load_NB_model=True)

# train model
if train_model:
    print("[FIT] Train model]")
    data = load_data(config, train=True)
    model.fit(data, fit_dim_red=True, fit_ref=True, fit_tun=True)
    model.save()

# --------------------------------------------------------------------------------------------------------------------
# predict model
if predict_it_resp:
    data = load_data(config, train=False)
    print("[Test] Predict model")
    accuracy, it_resp, labels = model.evaluate(data)
selection_human_expression = [
    frame_neutral_human_expression, frame_expression_human_expression,
    150 + frame_neutral_human_expression,
    150 + frame_expression_human_expression
]
selection_monkey_expression = [
    frame_neutral_monkey_expression, frame_expression_monkey_expression,
    150 + frame_neutral_monkey_expression,
    150 + frame_expression_monkey_expression
]
images = np.concatenate([
    images_human_fear[selection_human_expression],
    images_monkey_threat[selection_monkey_expression]
])  # (8, 224, 224, 3)
# calculate response
norm_base = NormBase(config, (224, 224, 3))
response = norm_base.evaluate_v4(images, flatten=False)  # (8, 28, 28, 256)
# reduce response to selected feature map
response = response[..., n_feature_map]  # (8, 28, 28)
response = np.expand_dims(response, axis=-1)  # (8,28,28,1)

# calculate arrays based on position for different modes
position_array_list = []
position_xy_list = []
for i, mode in enumerate(mode_list):
    position_array_list.append(
        calculate_position(response, mode=mode, return_mode="array"))
    position_xy_list.append(
        calculate_position(response, mode=mode, return_mode="xy float"))

# create plot
test a face transfer using norm base mechanism

run: python -m projects.facial_shape_expression_recognition_transfer.03_face_part_semantic_feature_map_selection
"""

# load config
config_name = 'NB_morph_space_semantic_pattern_m0002.json'
config = load_config(config_name, path='configs/norm_base_config')

full_train = False

# --------------------------------------------------------------------------------------------------------------------
# train model
if full_train:
    # declare model
    model = NormBase(config, input_shape=tuple(config['input_shape']))

    # load data
    data = load_data(config)

    # fit model
    face_neurons = model.fit(data)

    # save model
    model.save()
else:
    model = NormBase(config, input_shape=tuple(config['input_shape']), load_NB_model=True)

    # load data
    data = load_data(config)
    # plot_sequence(np.array(data[0]).astype(np.uint8), video_name='01_train_sequence.mp4',
Ejemplo n.º 15
0
import os

from utils.load_config import load_config
from utils.load_data import load_data
from models.NormBase import NormBase
from utils.plot_cnn_output import plot_cnn_output
from utils.calculate_position import calculate_position

# load config
config = load_config("norm_base_config_plot_frequency_time_t0001.json")

# load data --> 150 x 224 x 224 x 3
images,_ = load_data(config, train=1)

# load NormBase
norm_base = NormBase(config, (224,224,3))

# (train NormBase)--> maybe necessary, but not atm

# get_preds --> 150 x 512, x and y concatenated
positions = norm_base.get_preds(images)

# calculate fft over time and leave out constant term
rfft = np.fft.rfft(positions, axis=0)
rfft_abs = np.abs(rfft)
power_spectrum = np.square(rfft_abs)[1:]
#calculate frequencies and leave out constant term
frequencies = np.fft.rfftfreq(positions.shape[0], d=1/30)[1:]
print("frequencies", frequencies)

# plot fft discarding constant offset (at zero frequency)
Ejemplo n.º 16
0
save_path = os.path.join("models/saved", config["config_name"])
if not os.path.exists(save_path):
    os.mkdir(save_path)

# load and define model
v4_model = load_extraction_model(config,
                                 input_shape=tuple(config["input_shape"]))
v4_model = tf.keras.Model(inputs=v4_model.input,
                          outputs=v4_model.get_layer(
                              config['v4_layer']).output)
size_ft = tuple(np.shape(v4_model.output)[1:3])
print("[LOAD] size_ft", size_ft)
print("[LOAD] Model loaded")
print()

nb_model_eyebrow = NormBase(config, tuple(config['input_shape']))
nb_model_lips = NormBase(config, tuple(config['input_shape']))
# -------------------------------------------------------------------------------------------------------------------
# train

# load data
data = load_data(config)

# predict
preds = v4_model.predict(data[0], verbose=1)
print("[TRAIN] shape prediction", np.shape(preds))

# get feature maps that mimic a semantic selection pipeline
# keep only highest IoU semantic score
eyebrow_preds = preds[..., best_eyebrow_IoU_ft]
print("shape eyebrow semantic feature selection", np.shape(eyebrow_preds))
Ejemplo n.º 17
0
def evaluate_model(config, save_name, legacy=False):
    """
    This function runs and evaluates the model with given config.
    The model is saved to models/saved/config['save_name']/save_name.
    If already calculated the result is loaded instead of calculated.
    :param config:
    :param save_name:
    :param legacy: set to True to use old version
    :return:
    """
    if not os.path.exists(os.path.join("models/saved", config['save_name'])):
        os.mkdir(os.path.join("models/saved", config['save_name']))
    # folder for save and load
    save_folder = os.path.join("models/saved", config['save_name'], save_name)
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    if not legacy:
        try:
            # load results if available
            accuracy = np.load(os.path.join(save_folder, "accuracy.npy"))
            it_resp = np.load(os.path.join(save_folder, "it_resp.npy"))
            labels = np.load(os.path.join(save_folder, "labels.npy"))
        except IOError:
            try:
                norm_base = NormBase(config,
                                     input_shape=(224, 224, 3),
                                     save_name=save_name)
            except IOError:
                norm_base = NormBase(config, input_shape=(224, 224, 3))
                data_train = load_data(config)
                norm_base.fit(data_train)
                norm_base.save_model(config, save_name)
            data_test = load_data(config, train=False)
            accuracy, it_resp, labels = norm_base.evaluate(data_test)

        return accuracy, it_resp, labels
    else:
        try:
            # load results if available
            accuracy = np.load(os.path.join(save_folder, "accuracy.npy"))
            it_resp = np.load(os.path.join(save_folder, "it_resp.npy"))
            labels = np.load(os.path.join(save_folder, "labels.npy"))
            print("[MODEL] it_resp is available and is loaded from {}".format(
                save_folder))
            # load vectors if available
            ref_vector = np.load(os.path.join(save_folder, "ref_vector.npy"))
            tun_vector = np.load(os.path.join(save_folder,
                                              "tuning_vector.npy"))
        except IOError:
            # calculate results if not available
            print("[LOOP] start training")
            # create model
            norm_base = NormBase(config, input_shape=(224, 224, 3))
            try:
                # load vectors if available
                ref_vector = np.load(
                    os.path.join(save_folder, "ref_vector.npy"))
                tun_vector = np.load(
                    os.path.join(save_folder, "tuning_vector.npy"))
                print(
                    "[MODEL] ref_vector and tun_vector are available and loaded from {}"
                    .format(save_folder))

                norm_base.set_ref_vector(ref_vector)
                norm_base.set_tuning_vector(tun_vector)
                print("[MODEL] Set ref vector", np.shape(ref_vector))
                print("[MODEL] Set tuning vector", np.shape(tun_vector))
            except IOError:
                # calculate vectors if not available
                # load train data
                data_train = load_data(config)
                print("[Data] -- Data loaded --")

                # train model
                norm_base.fit(data_train, batch_size=config['batch_size'])
                ref_vector = norm_base.r
                tun_vector = norm_base.t

                # save model
                np.save(os.path.join(save_folder, "ref_vector"), ref_vector)
                np.save(os.path.join(save_folder, "tuning_vector"), tun_vector)

            print("[LOOP] start prediction")
            # load test data
            data_test = load_data(config, train=False, sort_by=['image'])
            print("[Data] -- Data loaded --")

            # evaluate
            accuracy, it_resp, labels = norm_base.evaluate(data_test)
            np.save(os.path.join(save_folder, "accuracy"), accuracy)
            np.save(os.path.join(save_folder, "it_resp"), it_resp)
            np.save(os.path.join(save_folder, "labels"), labels)

        return accuracy, it_resp, labels, ref_vector, tun_vector
    "norm_base_investigate_layer_t0003.json"
]

# load config
configs = []
for config_name in config_names:
    configs.append(load_config(config_name))

# train models/load model
norm_base_list = []
for config in configs:
    try:
        if retrain:
            raise IOError("retrain = True")
        norm_base = NormBase(config,
                             input_shape=(224, 224, 3),
                             save_name=config["sub_folder"])
    except IOError:
        norm_base = NormBase(config, input_shape=(224, 224, 3))
        norm_base.fit(load_data(config,
                                train=config["train_dim_ref_tun_ref"][0]),
                      fit_dim_red=True,
                      fit_ref=False,
                      fit_tun=False)
        norm_base.save_model(config, config["sub_folder"])
    norm_base_list.append(norm_base)

# extract PCA
pca1_human = norm_base_list[0].pca.components_[0]
pca1_monkey = norm_base_list[1].pca.components_[0]
congig_path = '../../configs/norm_base_config'
# config_name = 'norm_base_monkey_test.json'
config_name = 'norm_base_affectNet_sub8_4000.json'
config_file_path = os.path.join(congig_path, config_name)
print("config_file_path", config_file_path)

# load norm_base_config file
with open(config_file_path) as json_file:
    config = json.load(json_file)

# load data
data = load_data(config)
print("[Data] -- Data loaded --")

# create model
norm_base = NormBase(config, input_shape=(224, 224, 3))
# norm_base.print_v4_summary()

# train model
m, n = norm_base.fit(data, batch_size=config['batch_size'])

print("shape m", np.shape(m))
print("shape n", np.shape(n))

# save model
save_folder = os.path.join("../../models/saved", config['save_name'])
if not os.path.exists(save_folder):
    os.mkdir(save_folder)
np.save(os.path.join(save_folder, "ref_vector"), m)
np.save(os.path.join(save_folder, "tuning_vector"), n)