validation_steps=64,
                    callbacks=[tb_callback, cp_callback])

print('TRAINING COMPLETE')
model.save(MODEL_STRUCT_PATH)

for fp in glob(os.path.join(TEST_DATA_PATH, '*', '7000.png')):
    print(fp)
    (fn, _) = os.path.splitext(fp)
    arr = numpy.array(
        load_img(fp,
                 target_size=(CHAR_IMG_HEIGHT, CHAR_IMG_WIDTH),
                 grayscale=False,
                 color_mode='rgb',
                 interpolation='nearest'))
    a = get_activations(model, [[arr]], auto_compile=True)
    rp = os.path.join(KERACT_PATH, relative_path(fn, TEST_DATA_PATH))
    display_activations(a, directory=rp, save=True)
    print(f'VISUALIZATION SAVED: {rp}')

print('DONE')

yp = model.predict_generator(test_generator)
yp = numpy.argmax(yp, axis=1)

print('CONFUSION MATRIX:')
print(confusion_matrix(test_generator.classes, yp))
print('Classification Report')
print(
    classification_report(test_generator.classes,
                          yp,
Beispiel #2
0
    def test_nodes_and_layer_name(self):
        model, x = dummy_model_and_inputs()

        self.assertRaises(
            ValueError, lambda: get_activations(
                model, x, nodes_to_evaluate=[], layer_names='unknown'))
 def fun_to_test(auto_compile):
     get_activations(model, x, auto_compile=auto_compile)
Beispiel #4
0
lb = LabelEncoder()
pred = np.round(model.predict(x_test, verbose=1))
pred = pred.squeeze().argmax(axis=1)
new_y_test = y_test.astype(int)

mtx = confusion_matrix(new_y_test, pred)
labels = [
    'Angry', 'Disgusted', 'Fearful', 'Happy', 'Neutral', 'Sad', 'Surprised'
]
# labels = ['Guilt', 'Disgust', 'Happy', 'Fear', 'Anger', 'Surprise', 'Sad']
h = plt.figure()
sb.heatmap(mtx,
           annot=True,
           fmt='d',
           yticklabels=labels,
           xticklabels=labels,
           cbar=False)
plt.title('Confusion matrix')
h.savefig("Network/Confusion.pdf", bbox_inches='tight')

# %%
x = np.expand_dims(x_test[2, :, :], 0)
x.shape

l_weights = keract.get_activations(model, x, layer_names='activation')

plt.figure()
plt.plot(np.squeeze(l_weights['activation']))

# %%
Beispiel #5
0
from keras.layers import Dense
from keras.layers.recurrent import LSTM
from keras.models import Sequential

import keract
import utils
from data import get_mnist_data, num_classes

if __name__ == '__main__':
    x_train, y_train, _, _ = get_mnist_data()

    # (60000, 28, 28, 1) to ((60000, 28, 28)
    # LSTM has (batch, time_steps, input_dim)
    x_train = x_train.squeeze()

    model = Sequential()
    model.add(LSTM(16, input_shape=(28, 28)))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])

    utils.print_names_and_shapes(keract.get_activations(model, x_train[:128]))
    utils.print_names_and_shapes(
        keract.get_gradients_of_trainable_weights(model, x_train[:128],
                                                  y_train[:128]))
    utils.print_names_and_shapes(
        keract.get_gradients_of_activations(model, x_train[:128],
                                            y_train[:128]))
Beispiel #6
0
def extract_features_to_pd():

    # 1. Load Data and CNN Model
    DTL = np.load(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_dtl.npy'))
    bisp_df = pd.read_pickle(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_dtl_uids.pkl'))
    model = load_model(cf.CNN_FILENAME)

    # 2. Extract features
    layer_name = 'fc1'

    #DTL.shape
    #DTL[0].shape

    i = 0

    DTL_i = DTL[(i):(i + 1)]

    l1 = DTL_i[0, :, :, 0]
    show(l1)

    activations = get_activations(model, DTL[0:1])

    import keract
    keract.display_activations(activations,
                               cmap=None,
                               save=False,
                               directory='.',
                               data_format='channels_last',
                               fig_size=(24, 24),
                               reshape_1d_layers=False)

    keract.display_heatmaps(activations, DTL[0:1], save=False)

    a.shape

    DTL_p = preprocess_input(DTL)  # Preprocess image data

    #DTL_p = DTL_p[1:5,:,:,:] # for testing

    # Generate feature extractor using trained CNN
    feature_extractor = Model(
        inputs=model.inputs,
        outputs=model.get_layer(name=layer_name).output,
    )

    features = feature_extractor.predict(DTL_p)

    # 3. Create and format pandas DataFrame
    df = pd.DataFrame(features).add_prefix('cnn_feat_')
    df['uid'] = bisp_df.uid

    # 4. Export
    df.to_pickle(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_cnn_features_all.pkl'))
    df.to_csv(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_cnn_features_all.csv'))
eight_indices = np.where(trainlabels == 8)[0]
nine_indices = np.where(trainlabels == 9)[0]

indices = [
    zero_indices, one_indices, two_indices, three_indices, four_indices,
    five_indices, six_indices, seven_indices, eight_indices, nine_indices
]

#keract_target=target_test[:1]
meanarraylist = []
for anum in indices:
    sumarray = np.zeros(128)
    for anindex in anum:
        keract_inputs = input_train[anindex]
        activations = get_activations(model,
                                      keract_inputs.reshape(1, 28, 28, 1),
                                      layer_names='dense128',
                                      output_format='simple')
        sumarray = np.add(sumarray, activations['dense128'])

    meanarray = sumarray / len(anum)
    meanarraylist.append(meanarray)
#print(meanarraydict)

#activations=get_activations(model, keract_inputs)
#activations=get_activations(model, keract_inputs, layer_names='dense128', output_format='simple')
#display_activations(activations, cmap='gray', save=False)

#heatmaps
from keract import display_heatmaps
#display_heatmaps(activations, keract_inputs, save=False)
Beispiel #8
0
labels = ['Guilt', 'Disgust', 'Happy', 'Neutral', 'Anger', 'Surprise', 'Sad']
history_pred = []
hist_time = []
while True:
    data = np.frombuffer(stream.read(CHUNK), dtype=np.float32)
    x_infer = input_prep(data, smile)
    pred = model.predict(x_infer)
    predi = pred.argmax(axis=1)
    history_pred = np.append(history_pred, predi[0])
    # hist_time = np.append(hist_time, dtime.now().strftime('%H:%M:%S'))
    print(labels[predi[0]] + "  --  (raw data peak: " + str(max(data)) + ")")

    # GET ACTIVATIONS
    layername = 'activation_3'
    l_weights = keract.get_activations(model, x_infer, layer_names=layername)
    w_values = np.squeeze(l_weights[layername])

    # SEND TO MQTT BrOKER
    client.publish('hiper/labinter99_ita', labels[predi[0]])
    for k in range(len(labels)):
        topic_pub = "hiper/labinter_ita_" + labels[k]
        # client.subscribe(topic_pub)
        client.publish(topic_pub, str(w_values[k]))

        # SEND TO MQTT BrOKER
        # for k in range(len(labels)):
        #     mqtt_client.publish_single(float(w_values[k]), topic=labels[k])

        # plot
        # clear_output(wait=True)
Beispiel #9
0
import numpy as np
from Udacity import utils
from matplotlib import pyplot as plt

save_path = 'feature_maps'
img_path = r'D:\Eitan_Netanel\Records\Normal_3\IMG\center_2018_11_30_16_43_27_723.jpg'
model_path = r'C:\Users\netanelgip\PycharmProjects\CarNet\Udacity\normal_turns_reducelr_elu_lr0.6_batch20_saveall\model-016.h5'

# Load the image. the network expects RGB images
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = utils.preprocess(image)  # apply the preprocessing

model = load_model(model_path)
model.summary()
activations = get_activations(model, [image])

layer_names = list(activations.keys())
activation_maps = list(activations.values())
batch_size = activation_maps[0].shape[0]
assert batch_size == 1, 'One image at a time to visualize.'
for i, activation_map in enumerate(activation_maps):
    print('Displaying activation map {}'.format(i))
    shape = activation_map.shape

    layer_dir = path.join(save_path,
                          layer_names[i]).replace('/', '').replace(':', '')
    if not path.exists(layer_dir):
        os.mkdir(layer_dir)

    if len(shape) == 4:
Beispiel #10
0
# image = origin_image.crop((0, 0, 224, 224))
img = image.load_img(img_path, target_size=(image_size, image_size))


img = img_to_array(img)
img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2]))
img = preprocess_input(img)
yhat = model.predict(img)
label = decode_predictions(yhat)
label = label[0][0]
print('{} ({})'.format(label[1], label[2] * 100))

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
activations = keract.get_activations(model, img, layer_name='block5_conv3')
first = activations.get('block5_conv3')
# keract.display_activations(activations)
# keract.display_heatmaps(activations, input_image=image)



grad_trainable_weights = keract.get_gradients_of_activations(model, img, yhat, layer_name='block5_conv3')

print(grad_trainable_weights['block5_conv3'].shape)
grad_trainable_weights = tf.convert_to_tensor(grad_trainable_weights['block5_conv3'])


pooled_grads = K.mean(grad_trainable_weights, axis=(0, 1, 2))

# 我们计算相类输出值关于特征图的梯度。然后,我们沿着除了通道维度之外的轴对梯度进行池化操作。最后,我们用计算出的梯度值对输出特征图加权。
Beispiel #11
0
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        print(model.summary())

        x_train, y_train, x_test, y_test = MNIST.get_mnist_data()

        # checking that the accuracy is the same as before 99% at the first epoch.
        # test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0, batch_size=128)
        # print('')
        # assert test_acc > 0.98

        utils.print_names_and_shapes(
            keract.get_activations(model, x_test[0:200]))  # with 200 samples.
        utils.print_names_and_shapes(
            keract.get_gradients_of_trainable_weights(model, x_train[0:10],
                                                      y_train[0:10]))
        utils.print_names_and_shapes(
            keract.get_gradients_of_activations(model, x_train[0:10],
                                                y_train[0:10]))

        a = keract.get_activations(model, x_test[0:1])  # with just one sample.
        keract.display_activations(a, directory='mnist_activations', save=True)

        # import numpy as np
        # import matplotlib.pyplot as plt
        # plt.imshow(np.squeeze(x_test[0:1]), interpolation='None', cmap='gray')
    else:
        x_train, y_train, x_test, y_test = MNIST.get_mnist_data()
# Fit data
autoencoder.fit(input_train,
                input_train,
                epochs=no_epochs,
                batch_size=batch_size,
                validation_split=validation_split)

# =============================================
# Take a sample for visualization purposes
# =============================================
input_sample = input_test[:1]
reconstruction = autoencoder.predict([input_sample])

# =============================================
# Visualize input-->reconstruction
# =============================================
fig, axes = plt.subplots(1, 2)
fig.set_size_inches(6, 3.5)
input_sample_reshaped = input_sample.reshape((img_width, img_height))
reconsstruction_reshaped = reconstruction.reshape((img_width, img_height))
axes[0].imshow(input_sample_reshaped)
axes[0].set_title('Original image')
axes[1].imshow(reconsstruction_reshaped)
axes[1].set_title('Reconstruction')
plt.show()

# =============================================
# Visualize encoded state with Keract
# =============================================
activations = get_activations(autoencoder, input_sample)
display_activations(activations, cmap="gray", save=False)
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(10, kernel_size=(5, 5), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(no_classes, activation='softmax'))

# Compile the model
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])

# Fit data to model
model.fit(input_train, target_train,
          batch_size=batch_size,
          epochs=no_epochs,
          verbose=verbosity,
          validation_split=validation_split)

# Generate generalization metrics
score = model.evaluate(input_test, target_test, verbose=0)
print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')

# =============================================
# Keract visualizations
# =============================================
from keract import get_activations, display_activations, display_heatmaps
keract_inputs = input_test[:1]
keract_targets = target_test[:1]
activations = get_activations(model, keract_inputs)
display_activations(activations, cmap="gray", save=False)
display_heatmaps(activations, keract_inputs, save=False)
Beispiel #14
0

#loads the model from the saved model file
json_file = open('model.json', 'r')

mapping = {'LeakyRelu': LeakyRelu}
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json, mapping)

# load weights into new model
model.load_weights("model.h5")
#keract requires model compilation
model.compile(loss="mse", optimizer="adam")

while True:
    #generates the noise to be fed into the model
    noise = np.random.normal(0, 1, (1, 100))
    #shows the reshape layer as it has image output
    activations = get_activations(model, noise, model.layers[4].name)
    display_activations(activations, cmap="gray")
    for layer in model.layers:
        #shows only the batch norm layers to avoid seeing conv then batch norm when they are quite similar
        if "norm" in layer.name:
            activations = get_activations(model, noise, layer.name)
            display_activations(activations, cmap="gray")
    #the last layer doesn't have any batch norm but we want to see it anyway
    output = reverse_tanh(model.predict(noise)[0])
    plt.imshow(output)
    plt.show()
Beispiel #15
0
#%%
from keras.models import load_model
from keract import get_activations, display_activations, display_heatmaps
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
#%%
path = Path("/home/killaarsl/Documents/racebaandemo/ADR")

model = load_model(str(path / "tempmodel.h5"))
image = plt.imread(str(path / "434_4_2.png"))
image = image[:, :, :3]
image = np.expand_dims(image, axis=0)

activations = get_activations(model, image)

#%%
display_activations(activations, cmap="gray", save=True)

#%%
display_heatmaps(activations, image, save=False)
Beispiel #16
0
                            patience=15,
                            verbose=1,
                            restore_best_weights=True)

# model.fit(
#     x=x_train,
#     y=y_train,
#     verbose=1,
#     batch_size=batch_size,
#     epochs=epochs,
#     callbacks=[plateau_callback, es_callback],
#     validation_data=(x_test, y_test))
# model.save_weights(filepath=data_model_path)

model.load_weights(filepath=data_model_path)
# score = model.evaluate(
#     x_test,
#     y_test,
#     verbose=0,
#     batch_size=batch_size)
# print("Test performance: ", score)

grads = keract.get_gradients_of_activations(model,
                                            x_test[[12]],
                                            y_test[[12]],
                                            layer_name='heatmap1')
keract.display_heatmaps(grads, x_test[12] * 255.0)

activations = keract.get_activations(model, x_test[[12]])
keract.display_activations(activations)
Beispiel #17
0
               cmap="gray")

# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = make_axes_locatable(ax)
cax = divider.append_axes("bottom", size="5%", pad=0.05)

plt.colorbar(im, cax=cax, orientation="horizontal")

plt.show()

import keract

# Getting all the activations using keract
activations = keract.get_activations(model,
                                     data_events,
                                     layer_names=["Activation"])
#activations = keract.get_activations(model, data_events, layer_names=["activation_133"])
# Plotting using keract
keract.display_heatmaps(
    activations, data_events, cmap="gray", save=True,
    directory='picss')  # to save the files use: save=True, directory='pics' f

#activations = keract.get_activations(model, data_events, layer_names=["conv2d_399", "conv2d_400", "conv2d_401", "max_pooling2d_133"])
activations = keract.get_activations(model,
                                     data_events,
                                     layer_names=[
                                         "TimeConv0", "Dropout0", "TimeConv1",
                                         "Dropout1", "StationConv", "Dropout2",
                                         "Pooling", "DenseFlipout"
                                     ])
Beispiel #18
0
#    print("shape is",acts.shape)
#    print(layer_name, acts.shape, end=' ')
#
#    if acts.shape[0] != 1:
#        print('-> Skipped. First dimension is not 1.')
#        continue
#    if len(acts.shape) <= 2:
#        print('-> Skipped. 2D Activations.')
#        continue


report = metrics.classification_report(y_true=y_preds_thresh, y_pred=np.asarray(lbls), output_dict =True)
print(report)
matrix = metrics.confusion_matrix(y_preds_thresh, np.asarray(lbls))
#print(matrix)
act= keract.get_activations(model,ims)
#print("keys",act.keys())
#keract.display_activations(act)
'''
print("act",act.keys())
one_act = np.reshape(act["conv2d_1/BiasAdd:0"][0],(28,24,24))
print("shape",one_act.shape)
print(one_act[0].shape)
cv2.imshow("t",one_act[0], cv2.IMREAD_GRAYSCALE)
cv2.waitKey()
'''
y_preds_roc = [(x2-x1) for x1,x2 in y_preds]
fpr, tpr, thresholds = metrics.roc_curve(lbls,y_preds_roc)
#print("AUC",metrics.roc_auc_score(lbls,y_preds_roc))

#plt.plot(fpr,tpr)
 def test_display_1(self):
     model, x = dummy_model_and_inputs()
     acts = get_activations(model, x)
     display_activations(acts, save=True)
Beispiel #20
0
 def test_compile_vgg16_model(self):
     model, x = dummy_model_and_inputs()
     model.name = 'vgg16'  # spoof identity here!
     get_activations(model, x, auto_compile=False)
     self.assertTrue(model._is_compiled)
import sys
int_to_char = dict((i, c) for i, c in enumerate(chars))

start = np.random.randint(0, len(dataX)-1)
pattern = dataX[start]
print ("Seed:")
print ("\"", ''.join([int_to_char[value] for value in pattern]), "\"")
# generate characters
for i in range(100):
    out_x = np.reshape(pattern, (1, len(pattern), 1))
    out_x = out_x / float(n_vocab)
    prediction = m.predict(out_x, verbose=0)
    index = np.argmax(prediction)
    result = int_to_char[index]
    seq_in = [int_to_char[value] for value in pattern]
    sys.stdout.write(result)
    pattern.append(index)
    pattern = pattern[1:len(pattern)]
print ("\nDone.")


num_simulations = x.shape[2]
attention_vectors = np.zeros(shape=(num_simulations, seq_length))
for i in range(num_simulations):
    #testing_inputs_1, testing_outputs = get_data_recurrent(1, TIME_STEPS, INPUT_DIM)
    activations = get_activations(m, x, layer_name='attention_weight')
    #activations = K.function([m.layers[0].input], [m.layers[1].output])
    attention_vec = np.mean(activations['attention_weight'], axis=0).squeeze()
    assert np.abs(np.sum(attention_vec) - 1.0) < 1e-5
    attention_vectors[i] = attention_vec
print("attention vector: ",attention_vectors)
Beispiel #22
0
# Section - 5

# Visualise the filters
conv_1 = model.get_layer('block1_conv1')
weights_1 = conv_1.get_weights()
print(weights_1[0].shape)
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plt.imshow(normalize_image(weights_1[0][:, :, :, 0]))
plt.title('first layer - channel 1 weights')
plt.subplot(1,2,2)
plt.imshow(normalize_image(weights_1[0][:, :, :, 20]))
plt.title('first layer - channel 20 weights')

activations = get_activations(model, resident_owls_bogart_GEO)

# layer_name ='block1_conv1_1/Relu:0'
layer_name = list(activations.keys())[1]
print(layer_name)
show_activations(model, resident_owls_bogart_GEO, layer_name, 'GEO')
show_activations(model, resident_owls_bogart_COLOR, layer_name, 'COLOR')
show_activations(model, resident_owls_bogart_FILT, layer_name, 'FILT')

# Section - 6

#load the images
uploaded = files.upload() #please uploaad all the dogs images
uploaded = files.upload() #please uploaad all the cats images
!ls
Beispiel #23
0
import numpy as np
import requests
from PIL import Image
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import decode_predictions
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing.image import img_to_array

import keract

model = VGG16()

url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/14/Gatto_europeo4.jpg/250px-Gatto_europeo4.jpg'
response = requests.get(url)
image = Image.open(BytesIO(response.content))
image = image.crop((0, 0, 224, 224))
image = img_to_array(image)
arr_image = np.array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
yhat = model.predict(image)
label = decode_predictions(yhat)
label = label[0][0]
print('{} ({})'.format(label[1], label[2] * 100))

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
activations = keract.get_activations(model, image, layer_name='block1_conv1')
keract.display_heatmaps(activations, arr_image, save=True)
model.fit(X_train, to_categorical(y_train), epochs=5, batch_size=64)

#model.evaluate(X_test,to_categorical(y_test))

predictions = model.predict(X_test)


def normalize(probs):
    prob_factor = 1 / sum(probs)
    return [prob_factor * p for p in probs]


n = 0
for x_test in X_test[:10]:
    attention_map = get_activations(model,
                                    np.array([x_test]),
                                    layer_names='attention_weight')
    a = attention_map['attention_weight'][0]
    total = 0.0
    for i in range(len(a)):
        if i == focus_position - 1:
            continue
        total += a[i]
    p_max = 0.0
    for i in range(len(a)):
        if i == focus_position - 1:
            continue
        a[i] /= total
        if a[i] > p_max:
            p_max = a[i]
    a = np.array([a])
Beispiel #25
0
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import decode_predictions
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array

import keract
from utils import gpu_dynamic_mem_growth

if __name__ == "__main__":
    # Check for GPUs and set them to dynamically grow memory as needed
    # Avoids OOM from tensorflow greedily allocating GPU memory
    gpu_dynamic_mem_growth()
    model = VGG16()

    image = Image.open('250px-Gatto_europeo4.jpeg')
    image = image.crop((0, 0, 224, 224))
    image = img_to_array(image)
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    image = preprocess_input(image)
    yhat = model.predict(image)
    label = decode_predictions(yhat)
    label = label[0][0]
    print('{} ({})'.format(label[1], label[2] * 100))

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    activations = keract.get_activations(model, image)
    first = activations.get('block1_conv1')
    keract.display_activations(activations, save=True)
Beispiel #26
0

if __name__ == '__main__':
    np.random.seed(123)
    inp_a = np.random.uniform(size=(5, 10))
    inp_b = np.random.uniform(size=(5, 10))
    out_c = np.random.uniform(size=(5, 1))

    # Just for visual purposes.
    np.set_printoptions(precision=2)

    # Activations of all the layers
    print('MULTI-INPUT MODEL')
    m1 = get_multi_inputs_model()
    m1.compile(optimizer='adam', loss='mse')
    utils.print_names_and_values(keract.get_activations(m1, [inp_a, inp_b]))
    utils.print_names_and_values(
        keract.get_gradients_of_trainable_weights(m1, [inp_a, inp_b], out_c))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m1, [inp_a, inp_b], out_c))

    # Just get the last layer!
    utils.print_names_and_values(
        keract.get_activations(m1, [inp_a, inp_b], layer_name='last_layer'))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m1, [inp_a, inp_b],
                                            out_c,
                                            layer_name='last_layer'))
    print('')

    print('SINGLE-INPUT MODEL')
Beispiel #27
0
 def test_nodes_empty(self):
     model, x = dummy_model_and_inputs()
     self.assertRaises(
         ValueError,
         lambda: get_activations(model, x, nodes_to_evaluate=[]))
Beispiel #28
0
                 [[255, 255, 255], [255, 255, 255], [255, 255, 255]]])
obj6 = np.array([[[0, 0, 0], [255, 255, 255], [0, 0, 0]],
                 [[255, 255, 255], [255, 255, 255], [255, 255, 255]]])

x = np.array([obj1, obj2, obj3, obj4, obj5, obj6])
x = x.reshape((1, x.size))
print(x.shape)
model = Sequential()

hidden = Dense(3, input_dim=4, activation='relu', name="hidden")

model.add(hidden)
model.add(Dense(4, name="output"))

model.compile(loss='mean_squared_error', optimizer='adam')
plot_model(model)
model.fit(x, x, epochs=1000)

a = get_activations(model, x)
b = list(a.values())
print(model.predict(np.array([[10, 10, 20, 20]])))
mean = np.mean(b[0])
std = np.std(b[0])
gauss = np.random.normal(mean, std, (1, 4))

model2 = Sequential()
model2.add(
    Dense(4, input_dim=3, weights=model.get_layer("output").get_weights()))

print(model2.predict(list(a.values())[0]))
 def test_compile_vgg16_model(self):
     model, x = dummy_model_and_inputs(name="vgg16")
     get_activations(model, x, auto_compile=False)
     self.assertTrue(model._is_compiled)
        i = i + 1
        yield [x_batch_depth, y_batch_depth]


##### for multimodal
for image_embedding_rgb, image_embedding_depth, subject in test_generator_multimodal(
):
    #    activations = {}
    #    img_abs_path = os.path.join(image_dir_rgb, image)
    #    image_embedding = preprocess_image(img_abs_path)
    #    print(image_embedding)
    #    break
    arr = np.array(
        list(
            get_activations(model_vgg_multimodal,
                            [image_embedding_rgb, image_embedding_depth],
                            layer_name).values())[0])
    #    arr_depth = np.array(list(get_activations(model_vgg_multimodal, image_embedding_depth,layer_name).values())[0])

    all_image_list.append(arr)
    subject_list.append(subject)

all_image_list = np.mean(np.array(all_image_list), axis=1)

subject_list = np.mean(np.array(subject_list), axis=1)
subject_list_label = np.where(subject_list == 1)[1]
#calculate tsne embeddings
X_tsne = TSNE(n_components=2).fit_transform(all_image_list)

dataset_x_tsne = pd.DataFrame({
    't-SNE_Dim1': X_tsne[:, 0],