Beispiel #1
0
def make_heatmap(model, images, labels):
    '''
    Use keract to visualize how the model interacts
    with an image. Picks a sample image
    where an event occurs and draws how each image works.
    TODO: this doesn't actually work here, or where it came
    from anymore. Why is it broken?
    '''
    label = 0
    while label == 0:
        num = random.randint(0, len(labels))
        label = labels[num]

    activation = keract.get_activations(model, images[num:num + 1])
    #keract.display_activations(activation,fig_size=(10,10))
    keract.display_heatmaps(activation, images[num:num + 1])
Beispiel #2
0
 def Activation_Keract(self, patientName, X, AllModels, ROINameCollected): 
     """
     Generate activation maps for each layer
     patientName, Input data, AllModels, ROIOfInterest
     Output: Keract maps saved in folders
     """  
     from keract import get_activations, display_activations, display_heatmaps, display_gradients_of_trainable_weights, get_gradients_of_activations, get_gradients_of_trainable_weights
     # For all models
     for i in range(len(AllModels)):
         # Make sure folders exist
         activationDir = './ActivationMaps' + '/' + patientName + '/' + ROINameCollected + '/' + 'model_' + str(i) + '/' + 'Activation'
         if not os.path.exists(activationDir):
             os.makedirs(activationDir)
         overlayDir = './ActivationMaps' + '/' + patientName + '/' + ROINameCollected + '/' + 'model_' + str(i) + '/' + 'Overlay'
         if not os.path.exists(overlayDir):
             os.makedirs(overlayDir)
         # Get activations
         currActivations = get_activations(AllModels[0], X, layer_names=None, nodes_to_evaluate=None, output_format='full', nested=False, auto_compile=True)
         # Save activations only
         display_activations(currActivations, cmap=None, save=True, directory=activationDir, data_format='channels_last', fig_size=(24, 24), reshape_1d_layers=False)
         # Save acivation overlay
         display_heatmaps(currActivations, X, save=True, directory=overlayDir)
Beispiel #3
0
            beyond_one_stdev_adv2[img_class][j] += 1

#1813 are out of bounds according to their 'new' classes...
#I'd like to know how many are the same...
np.savetxt("beyond_one_stdev_adv2.csv", beyond_one_stdev_adv2, delimiter=",")
beyond_one_stdev_adv2 = genfromtxt('beyond_one_stdev_adv.csv2', delimiter=',')

for i in range(0, 99):
    max_value = max(y_pred[i])
    img_class = int(np.where(y_pred[i] >= max_value)[0][0])
    print(img_class)
#wait, we don't need to calculate the
#one two classified as a 7, one 5classified as a 3
#display_activations(actives, cmap='gray', save=False)

#heatmaps

display_heatmaps(actives, keract_inputs, save=False)
"""
images_arr = np.array(images)
#plot images before attack
fig, axes = plt.subplots(1, 100, figsize = (28,28))
axes = axes.flatten()
for img, ax in zip(images_arr, axes):
    ax.imshow(np.squeeze(img), cmap="gray")
    ax.axis("off")
plt.tight_layout()
plt.show()


epsilons = [0.0, 0.001, 0.01, 0.03, 0.1, 0.3, 0.5, 1.0]"""
Beispiel #4
0
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing.image import img_to_array

model = VGG16()

from PIL import Image
import requests
from io import BytesIO
import numpy as np

url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/14/Gatto_europeo4.jpg/250px-Gatto_europeo4.jpg'
response = requests.get(url)
image = Image.open(BytesIO(response.content))
image = image.crop((0, 0, 224, 224))
image = img_to_array(image)
arr_image = np.array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
yhat = model.predict(image)
label = decode_predictions(yhat)
label = label[0][0]
print('{} ({})'.format(label[1], label[2] * 100))

import keract

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
activations = keract.get_activations(model, image)
keract.display_heatmaps(activations, arr_image)
    metric_vars = [
        i for i in tf.local_variables() if 'auc_roc' in i.name.split('/')[1]
    ]
    for v in metric_vars:
        tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, v)
    with tf.control_dependencies([update_op]):
        value = tf.identity(value)
        return value


# get the path to the image we want to use:
#[train_df,val_df] = pd.read_hdf('cxr_data.h5')
model = load_model('model.h5', custom_objects={'auc_roc': auc_roc})

#pick arbitrary image for now
#path = val_df.iloc[1,'path']
path = 'valid/p10382575/s07/view1_frontal.jpg'
image = Image.open(path)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
image = image.resize((299, 299))
image = image.convert('RGB')
image = np.array(image)
image = image / 255
image = (image - mean) / std
image = np.expand_dims(image, axis=0)
#get activations
activations = get_activations(model, image, layer_name='block14_sepconv2')
display_activations(activations)
display_heatmaps(activations, image)
Beispiel #6
0
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing.image import img_to_array

model = VGG16()

from PIL import Image
import requests
from io import BytesIO
import numpy as np

url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/14/Gatto_europeo4.jpg/250px-Gatto_europeo4.jpg'
response = requests.get(url)
image = Image.open(BytesIO(response.content))
image = image.crop((0, 0, 224, 224))
image = img_to_array(image)
arr_image = np.array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
yhat = model.predict(image)
label = decode_predictions(yhat)
label = label[0][0]
print('{} ({})'.format(label[1], label[2] * 100))

import keract

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
activations = keract.get_activations(model, image, layer_name='block1_conv1')
keract.display_heatmaps(activations, arr_image, save=True)
#heatmaps
from keract import display_heatmaps
#display_heatmaps(activations, keract_inputs, save=False)

#display some more imgs heatmaps
keractlist = []
for aninput in input_test[:2]:
    keractlist.append(aninput)

keract_inputs = keractlist
for i in range(len(keract_inputs)):
    activations = get_activations(model,
                                  keract_inputs[i].reshape(1, 28, 28, 1))
    display_heatmaps(activations,
                     keract_inputs[i].reshape(1, 28, 28, 1),
                     save=False)

#advs heatmaps
keractadvs = []
for aninput in advsinputs[:2]:
    keractadvs.append(aninput)

keract_inputs = keractadvs
for i in range(len(keract_inputs)):
    activations = get_activations(model,
                                  keract_inputs[i].reshape(1, 28, 28, 1))
    display_heatmaps(activations,
                     keract_inputs[i].reshape(1, 28, 28, 1),
                     save=False)
Beispiel #8
0
def extract_features_to_pd():

    # 1. Load Data and CNN Model
    DTL = np.load(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_dtl.npy'))
    bisp_df = pd.read_pickle(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_dtl_uids.pkl'))
    model = load_model(cf.CNN_FILENAME)

    # 2. Extract features
    layer_name = 'fc1'

    #DTL.shape
    #DTL[0].shape

    i = 0

    DTL_i = DTL[(i):(i + 1)]

    l1 = DTL_i[0, :, :, 0]
    show(l1)

    activations = get_activations(model, DTL[0:1])

    import keract
    keract.display_activations(activations,
                               cmap=None,
                               save=False,
                               directory='.',
                               data_format='channels_last',
                               fig_size=(24, 24),
                               reshape_1d_layers=False)

    keract.display_heatmaps(activations, DTL[0:1], save=False)

    a.shape

    DTL_p = preprocess_input(DTL)  # Preprocess image data

    #DTL_p = DTL_p[1:5,:,:,:] # for testing

    # Generate feature extractor using trained CNN
    feature_extractor = Model(
        inputs=model.inputs,
        outputs=model.get_layer(name=layer_name).output,
    )

    features = feature_extractor.predict(DTL_p)

    # 3. Create and format pandas DataFrame
    df = pd.DataFrame(features).add_prefix('cnn_feat_')
    df['uid'] = bisp_df.uid

    # 4. Export
    df.to_pickle(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_cnn_features_all.pkl'))
    df.to_csv(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_cnn_features_all.csv'))
Beispiel #9
0
cax = divider.append_axes("bottom", size="5%", pad=0.05)

plt.colorbar(im, cax=cax, orientation="horizontal")

plt.show()

import keract

# Getting all the activations using keract
activations = keract.get_activations(model,
                                     data_events,
                                     layer_names=["Activation"])
#activations = keract.get_activations(model, data_events, layer_names=["activation_133"])
# Plotting using keract
keract.display_heatmaps(
    activations, data_events, cmap="gray", save=True,
    directory='picss')  # to save the files use: save=True, directory='pics' f

#activations = keract.get_activations(model, data_events, layer_names=["conv2d_399", "conv2d_400", "conv2d_401", "max_pooling2d_133"])
activations = keract.get_activations(model,
                                     data_events,
                                     layer_names=[
                                         "TimeConv0", "Dropout0", "TimeConv1",
                                         "Dropout1", "StationConv", "Dropout2",
                                         "Pooling", "DenseFlipout"
                                     ])
## Plotting using keract
keract.display_heatmaps(
    activations, data_events, save=True, directory='picss',
    cmap="hot")  # to save the files use: save=True, directory='pics'
Beispiel #10
0
#%%
from keras.models import load_model
from keract import get_activations, display_activations, display_heatmaps
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
#%%
path = Path("/home/killaarsl/Documents/racebaandemo/ADR")

model = load_model(str(path / "tempmodel.h5"))
image = plt.imread(str(path / "434_4_2.png"))
image = image[:, :, :3]
image = np.expand_dims(image, axis=0)

activations = get_activations(model, image)

#%%
display_activations(activations, cmap="gray", save=True)

#%%
display_heatmaps(activations, image, save=False)
Beispiel #11
0
                            patience=15,
                            verbose=1,
                            restore_best_weights=True)

# model.fit(
#     x=x_train,
#     y=y_train,
#     verbose=1,
#     batch_size=batch_size,
#     epochs=epochs,
#     callbacks=[plateau_callback, es_callback],
#     validation_data=(x_test, y_test))
# model.save_weights(filepath=data_model_path)

model.load_weights(filepath=data_model_path)
# score = model.evaluate(
#     x_test,
#     y_test,
#     verbose=0,
#     batch_size=batch_size)
# print("Test performance: ", score)

grads = keract.get_gradients_of_activations(model,
                                            x_test[[12]],
                                            y_test[[12]],
                                            layer_name='heatmap1')
keract.display_heatmaps(grads, x_test[12] * 255.0)

activations = keract.get_activations(model, x_test[[12]])
keract.display_activations(activations)
Beispiel #12
0
classes = ["air_conditioner", "car_horn", "children_playing",
           "dog_bark", "drilling", "engine_idling",
           "gun_shot", "jackhammer", "siren", "street_music"]
predicted_classes = model.predict_classes(x_test)
predicted_explanations = []
for x in predicted_classes:
    predicted_explanations.append(classes[x])

# ~~~~~~~~~~~~~~~~~  activations & heatmaps  ~~~~~~~~~~~~~~~~~
from keract import get_activations, display_activations, display_heatmaps

for i in range(837):
    eval_item = x_test[i:i + 1]
    activations = get_activations(model, eval_item, "conv2d_1")
    # display_activations(activations, save=False)
    display_heatmaps(activations, eval_item, "./heatmaps/" + str(predicted_explanations[i]) + "/heatmap" + str(i),
                     save=True)
    print("#" + str(i) + ": ", predicted_explanations[i])
    # TODO interpret heatmap?
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

data = {"SoundID": list(range(1, len(predicted_classes) + 1)), "Label": predicted_classes,
        "Explanation": predicted_explanations}
submissions = pd.DataFrame(data)
submissions.to_csv("submission2.csv", index=False, header=True)

print("DONE")

# results
# [0.061496452033782796, 0.9820139408111572]
# [1.0961064827765294, 0.7335723042488098]