Exemple #1
0
 def visualize(self, model, layer_input, save_image=False, path='.'):
     #keract_inputs = layer_input[:1]
     #keract_inputs = ((layer_input - np.min(layer_input))/ (np.max(layer_input) - np.min(layer_input)) ) + 0.5
     keract_inputs = layer_input
     activations = get_activations(model, keract_inputs)
     display_activations(activations,
                         save=save_image,
                         directory=path,
                         cmap='jet')
Exemple #2
0
    def display_activations(self, layer_name: str=None, st=0, en=None, indices=None, **kwargs):
        # not working currently
        activations = self.activations(st=st, en=en, indices=indices)
        if layer_name is None:
            activations = activations
        else:
            activations = activations[layer_name]

        keract.display_activations(activations=activations,**kwargs)
Exemple #3
0
 def Activation_Keract(self, patientName, X, AllModels, ROINameCollected): 
     """
     Generate activation maps for each layer
     patientName, Input data, AllModels, ROIOfInterest
     Output: Keract maps saved in folders
     """  
     from keract import get_activations, display_activations, display_heatmaps, display_gradients_of_trainable_weights, get_gradients_of_activations, get_gradients_of_trainable_weights
     # For all models
     for i in range(len(AllModels)):
         # Make sure folders exist
         activationDir = './ActivationMaps' + '/' + patientName + '/' + ROINameCollected + '/' + 'model_' + str(i) + '/' + 'Activation'
         if not os.path.exists(activationDir):
             os.makedirs(activationDir)
         overlayDir = './ActivationMaps' + '/' + patientName + '/' + ROINameCollected + '/' + 'model_' + str(i) + '/' + 'Overlay'
         if not os.path.exists(overlayDir):
             os.makedirs(overlayDir)
         # Get activations
         currActivations = get_activations(AllModels[0], X, layer_names=None, nodes_to_evaluate=None, output_format='full', nested=False, auto_compile=True)
         # Save activations only
         display_activations(currActivations, cmap=None, save=True, directory=activationDir, data_format='channels_last', fig_size=(24, 24), reshape_1d_layers=False)
         # Save acivation overlay
         display_heatmaps(currActivations, X, save=True, directory=overlayDir)
Exemple #4
0
def print_convolution_outputs(model, X_test, indexes):
    for i in indexes:
        activations1 = get_activations(model, X_test[i].reshape(1, 32, 32, 3), layer_name="conv1", auto_compile=True)
        activations2 = get_activations(model, X_test[i].reshape(1, 32, 32, 3), layer_name="conv2", auto_compile=True)
        activations3 = get_activations(model, X_test[i].reshape(1, 32, 32, 3), layer_name="conv3", auto_compile=True)

        keract.display_activations(activations1, cmap=None, save=True, directory='./figures/exercise2_b_'+str(i)+'output')
        keract.display_activations(activations2, cmap=None, save=True, directory='./figures/exercise2_b_'+str(i)+'output')
        keract.display_activations(activations3, cmap=None, save=True, directory='./figures/exercise2_b_'+str(i)+'output')
Exemple #5
0
model.summary()

#setting all the initial parameters and math for our model
model.compile(optimizer="adam",
              loss="sparse_categorical_crossentropy",
              metrics=["accuracy"])

#training the model on the training sets
model.fit(x_train, y_train, epochs=150

#testing the model on the testing sets 
model.evaluate(x_test, y_test)

model.save('drive/My Drive/Model Files/working_model')

model = tf.keras.models.load_model('drive/My Drive/Model Files/working_model')

"""**Visualizing our model**"""

!pip install keract

from keract import get_activations, display_activations
img = imageList [1]x
img = np.expand_dims (img, axis=0)
activations = get_activations (model, img)

display_activations (activations, save=False)

#image that the filters are working on
plt.imshow(imageList[1])
Exemple #6
0
from io import BytesIO

import requests
from PIL import Image
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import decode_predictions
from keras.applications.inception_v3 import preprocess_input
from keras.preprocessing.image import img_to_array

import keract

model = InceptionV3()

url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/14/Gatto_europeo4.jpg/250px-Gatto_europeo4.jpg'
response = requests.get(url)
image = Image.open(BytesIO(response.content))
image = image.crop((0, 0, 299, 299))
image = img_to_array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
yhat = model.predict(image)
label = decode_predictions(yhat)
label = label[0][0]
print('{} ({})'.format(label[1], label[2] * 100))  # a tabby is a cat!

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
activations = keract.get_activations(model, image)
keract.display_activations(activations)
def get_anchors(anchors_path):
    '''loads the anchors from a file'''
    with open(anchors_path) as f:
        anchors = f.readline()
    anchors = [float(x) for x in anchors.split(',')]
    return np.array(anchors).reshape(-1, 2)


anchors = get_anchors(anchors_path)
num_anchors = len(anchors)

yolo_model = yolo_body(Input(shape=(None, None, 3)),
                       num_anchors // 3,
                       num_classes=1)
yolo_model.load_weights(model_path)
yolo_model = Model(yolo_model.input, yolo_model.layers[50].output)
yolo_model.compile(loss="mse", optimizer="adam")
print(yolo_model.summary())

# size = 416, 416
X = Image.open(image_path)
X = X.resize((416, 416), Image.BICUBIC)
X = np.array(X, dtype='float32')
# print(X)
X /= 255.
X = np.expand_dims(X, 0)  # Add batch dimension.
activations_rpn = keract.get_activations(yolo_model, X)
dp_activations = keract.display_activations(activations_rpn,
                                            cmap='gray',
                                            directory='activations/attention',
                                            save=True)
Exemple #8
0
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))

#introducing noise
noise_factor = 0.5
x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)

history = autoencoder.fit(x_train_noisy, x_train,
                epochs=10,
                batch_size=256,
                shuffle=True,
                validation_data=(x_test_noisy, x_test_noisy))

[train_loss, train_acc] = autoencoder.evaluate(x_train_noisy, x_train)
[test_loss, test_acc] = autoencoder.evaluate(x_test_noisy, x_test)

input_sample = x_test[:1]
activations = get_activations(encoder, input_sample)
display_activations(activations, cmap="gray", save=False)



# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)

pip install keract
'''
  This module takes the model and a sample and creates images in a folder
  of the activations throughtout the whole net. Seeing the activations is a
  visual tool to check the model behavior, e.g, if some of the latest
  layers do not activate, then the network is probably underfitting.
'''

import matplotlib.pyplot as plt
from keras import backend as K
import numpy as np
from keract import get_activations, display_activations
from buildModelFromDisk import buildModelFromDisk
from buildSampleFromDisk import buildSampleFromDisk

model = buildModelFromDisk()
(img, sample) = buildSampleFromDisk('./tests/cannabis.jpg')

activations = get_activations(model, sample)

[print(k, '->', v.shape, '- Numpy array') for (k, v) in activations.items()]

display_activations(activations, save=True, directory="./activations/")
 def test_display_1(self):
     model, x = dummy_model_and_inputs()
     acts = get_activations(model, x)
     display_activations(acts, save=True)
Exemple #11
0
b = Dense(1, name='fc2')(i2)

c = concatenate([a, b], name='concat')
d = Dense(1, name='out')(c)
model = Model(inputs=[i1, i2], outputs=[d])

# inputs to the model
x = [np.random.uniform(size=(32, 10)), np.random.uniform(size=(32, 10))]

# call to fetch the activations of the model.
activations = get_activations(model, x, auto_compile=True)

# print the activations shapes.
[print(k, '->', v.shape, '- Numpy array') for (k, v) in activations.items()]

# Print output:
# i1 -> (32, 10) - Numpy array
# i2 -> (32, 10) - Numpy array
# fc1 -> (32, 1) - Numpy array
# fc2 -> (32, 1) - Numpy array
# concat -> (32, 2) - Numpy array
# out -> (32, 1) - Numpy array

keract.display_activations(activations,
                           cmap=None,
                           save=False,
                           directory='.',
                           data_format='channels_last',
                           fig_size=(24, 24),
                           reshape_1d_layers=False)
Exemple #12
0
#%%
from keras.models import load_model
from keract import get_activations, display_activations, display_heatmaps
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
#%%
path = Path("/home/killaarsl/Documents/racebaandemo/ADR")

model = load_model(str(path / "tempmodel.h5"))
image = plt.imread(str(path / "434_4_2.png"))
image = image[:, :, :3]
image = np.expand_dims(image, axis=0)

activations = get_activations(model, image)

#%%
display_activations(activations, cmap="gray", save=True)

#%%
display_heatmaps(activations, image, save=False)
Exemple #13
0
def extract_features_to_pd():

    # 1. Load Data and CNN Model
    DTL = np.load(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_dtl.npy'))
    bisp_df = pd.read_pickle(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_dtl_uids.pkl'))
    model = load_model(cf.CNN_FILENAME)

    # 2. Extract features
    layer_name = 'fc1'

    #DTL.shape
    #DTL[0].shape

    i = 0

    DTL_i = DTL[(i):(i + 1)]

    l1 = DTL_i[0, :, :, 0]
    show(l1)

    activations = get_activations(model, DTL[0:1])

    import keract
    keract.display_activations(activations,
                               cmap=None,
                               save=False,
                               directory='.',
                               data_format='channels_last',
                               fig_size=(24, 24),
                               reshape_1d_layers=False)

    keract.display_heatmaps(activations, DTL[0:1], save=False)

    a.shape

    DTL_p = preprocess_input(DTL)  # Preprocess image data

    #DTL_p = DTL_p[1:5,:,:,:] # for testing

    # Generate feature extractor using trained CNN
    feature_extractor = Model(
        inputs=model.inputs,
        outputs=model.get_layer(name=layer_name).output,
    )

    features = feature_extractor.predict(DTL_p)

    # 3. Create and format pandas DataFrame
    df = pd.DataFrame(features).add_prefix('cnn_feat_')
    df['uid'] = bisp_df.uid

    # 4. Export
    df.to_pickle(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_cnn_features_all.pkl'))
    df.to_csv(
        os.path.join(cf.DROPBOX_DIRECTORY, 'Data', 'OPM', 'FinalData',
                     'Individual Datasets', 'bisp_cnn_features_all.csv'))
Exemple #14
0
model.add(Conv2D(10, kernel_size=(5, 5), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(no_classes, activation='softmax'))

#compile the model
model.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy'])

#fit data to model
model.fit(input_train, target_train, batch_size=batch_size, epochs=no_epochs, verbose=verbosity, validation_split=validation_split)

#generate generalization metrics
score=model.evaluate(input_test, target_test, verbose=0)
print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')



#keract visualizations
#for each layer
from keract import get_activations, display_activations
keract_inputs=input_test[:1]
keract_target=target_test[:1]
activations=get_activations(model, keract_inputs)
display_activations(activations, cmap='gray', save=False)


#heatmaps
from keract import display_heatmaps
display_heatmaps(activations, keract_inputs, save=False)

Exemple #15
0

#loads the model from the saved model file
json_file = open('model.json', 'r')

mapping = {'LeakyRelu': LeakyRelu}
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json, mapping)

# load weights into new model
model.load_weights("model.h5")
#keract requires model compilation
model.compile(loss="mse", optimizer="adam")

while True:
    #generates the noise to be fed into the model
    noise = np.random.normal(0, 1, (1, 100))
    #shows the reshape layer as it has image output
    activations = get_activations(model, noise, model.layers[4].name)
    display_activations(activations, cmap="gray")
    for layer in model.layers:
        #shows only the batch norm layers to avoid seeing conv then batch norm when they are quite similar
        if "norm" in layer.name:
            activations = get_activations(model, noise, layer.name)
            display_activations(activations, cmap="gray")
    #the last layer doesn't have any batch norm but we want to see it anyway
    output = reverse_tanh(model.predict(noise)[0])
    plt.imshow(output)
    plt.show()
Exemple #16
0
plt.figure()
plt.imshow(Image.fromarray(u[0, :, :, :], 'RGB'))
plt.axis('off')
pred_class = np.argmax(model.predict(u, verbose=1))
plt.title('Predicted #: {}'.format(pred_class))
plt.savefig('./visuals/00_test_image.png')
print('Done.\n')

#%% feature maps

print('Plotting and saving activations...\n')

keract.display_activations(activations,
                           cmap='viridis',
                           save=True,
                           directory='./visuals/',
                           data_format='channels_last')

# keract.display_heatmaps(activations, u, save=True)

print('\nDone.\n')

#%% Layer Viz - Filters
#Select a convolutional layer
layer = model.layers[0]

#Get weights
kernels, biases = layer.get_weights()

#Normalize kernels into [0, 1] range for proper visualization
Exemple #17
0
        # checking that the accuracy is the same as before 99% at the first epoch.
        # test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0, batch_size=128)
        # print('')
        # assert test_acc > 0.98

        utils.print_names_and_shapes(
            keract.get_activations(model, x_test[0:200]))  # with 200 samples.
        utils.print_names_and_shapes(
            keract.get_gradients_of_trainable_weights(model, x_train[0:10],
                                                      y_train[0:10]))
        utils.print_names_and_shapes(
            keract.get_gradients_of_activations(model, x_train[0:10],
                                                y_train[0:10]))

        a = keract.get_activations(model, x_test[0:1])  # with just one sample.
        keract.display_activations(a, directory='mnist_activations', save=True)

        # import numpy as np
        # import matplotlib.pyplot as plt
        # plt.imshow(np.squeeze(x_test[0:1]), interpolation='None', cmap='gray')
    else:
        x_train, y_train, x_test, y_test = MNIST.get_mnist_data()

        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=MNIST.input_shape))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
 def test_display_2(self):
     acts = {'1_channel': np.random.uniform(size=(1, 32, 32, 1))}
     display_activations(acts, save=True)
Exemple #19
0
                      metrics=['accuracy'])

        print(model.summary())

        x_train, y_train, x_test, y_test = get_mnist_data()

        # checking that the accuracy is the same as before 99% at the first epoch.
        # test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0, batch_size=128)
        # print('')
        # assert test_acc > 0.98

        utils.print_names_and_shapes(get_activations(
            model, x_test[0:200]))  # with 200 samples.

        a = get_activations(model, x_test[0:1])  # with just one sample.
        display_activations(a)

        # import numpy as np
        # import matplotlib.pyplot as plt
        # plt.imshow(np.squeeze(x_test[0:1]), interpolation='None', cmap='gray')
    else:
        x_train, y_train, x_test, y_test = get_mnist_data()

        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=input_shape))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
Exemple #20
0
def TopOpt_Designing(User_Conditions, opts, envs, my_call_back_functions):
    Time_Trial = opts.Time_Trial
    if opts.Progressive_Refinement:
        agent_primer = Agent(envs.env_primer,
                             opts,
                             Increase=False,
                             filename_save=opts.filename_save +
                             str(opts.PR_EX) + 'by' + str(opts.PR_EY),
                             filename_load=opts.filename_load,
                             EX=opts.PR_EX,
                             EY=opts.PR_EY,
                             n_actions=opts.PR_EX * opts.PR_EY,
                             epsilon=0,
                             input_dims=[opts.PR_EX, opts.PR_EY, 3])

        agent_primer2 = Agent(envs.env_primer2,
                              opts,
                              Increase=False,
                              filename_save=opts.filename_save +
                              str(opts.PR2_EX) + 'by' + str(opts.PR2_EY),
                              filename_load=opts.filename_load,
                              EX=opts.PR2_EX,
                              EY=opts.PR2_EY,
                              n_actions=opts.PR2_EX * opts.PR2_EY,
                              epsilon=0,
                              input_dims=[opts.PR2_EX, opts.PR2_EY, 3])
        agent_primer.load_models()
        agent_primer2.load_models()

    agent = Agent(envs.env,
                  opts,
                  Increase=False,
                  filename_save=opts.filename_save + str(opts.Main_EX) + 'by' +
                  str(opts.Main_EY),
                  filename_load=opts.filename_load,
                  EX=opts.Main_EX,
                  EY=opts.Main_EY,
                  n_actions=opts.Main_EX * opts.Main_EY,
                  epsilon=1.0,
                  input_dims=[opts.Main_EX, opts.Main_EY, 3])
    if opts.Load_Checkpoints: agent.load_models()
    figure_file = 'plots/' + opts.filename_save + '_reward.png'
    best_score = envs.env.reward_range[0]
    score_history, per_history, succ_history, Loss_history = [], [], [], []

    if not opts.Load_Checkpoints:
        from pandas import DataFrame
        TrialData = DataFrame(columns=[
            'Episode', 'Reward', 'Successfull Steps', 'Percent Successful',
            'Avg Loss', 'SDEV', 'Epsilon', 'Time'
        ])
    envs.env.reset_conditions()
    if opts.From_App: opts.n_games = 1
    for i in range(opts.n_games):
        Testing = False  #Used to render the environment and track learning of the agent
        if opts.Load_Checkpoints:
            'If the user wants to test the agent, the user will be prompted to input BC and LC elements'
            if opts.From_App:
                App_Inputs(envs.env, envs.env_primer, envs.env_primer2, opts,
                           User_Conditions)

            else:
                User_Inputs(envs.env, opts)

        done = False
        score = 0
        if i % 10 == 0 and i >= 100:
            Testing = True
            if i % 200 == 0:
                'Every 200 episodes, a special BC/LC will be used for monitoring purposes'
                Testing_Inputs(envs.env, opts)
                print('--------Testing Run------')
        envs.env.VoidCheck = list(np.ones((1, envs.env.EX * envs.env.EY))[0])
        if Time_Trial: Start_Time_Trial = time.perf_counter()
        observation = envs.env.reset()
        print(envs.env)
        if opts.Progressive_Refinement:
            ''' Set Up to Complete 3 Iterations of Progressive Refinement'''
            #Progressive Refinement #1 Going from Smallest to Intermediate Mesh Size
            envs.env_primer.VoidCheck = list(
                np.ones((1, envs.env_primer.EX * envs.env_primer.EY))[0])
            Prog_Refine_Act(agent_primer,
                            envs.env,
                            envs.env_primer,
                            opts.Load_Checkpoints,
                            Testing,
                            opts,
                            opts.PR_EX,
                            opts.PR_EY,
                            Time_Trial,
                            opts.From_App,
                            FEA_Skip=1)
            #Progressive Refinement #2 Going for Intermediate to Final Mesh Size
            envs.env_primer2.VoidCheck = Mesh_Transform(
                opts.PR_EX, opts.PR_EY, opts.PR2_EX, opts.PR2_EY,
                envs.env_primer.VoidCheck)
            if opts.From_App:
                del agent_primer
            Prog_Refine_Act(agent_primer2,
                            envs.env,
                            envs.env_primer2,
                            opts.Load_Checkpoints,
                            Testing,
                            opts,
                            opts.PR2_EX,
                            opts.PR2_EY,
                            Time_Trial,
                            opts.From_App,
                            FEA_Skip=1)
            #This outcome will now be used as the final mesh Size
            envs.env.VoidCheck = Mesh_Transform(opts.PR2_EX, opts.PR2_EY,
                                                opts.Main_EX, opts.Main_EY,
                                                envs.env_primer2.VoidCheck)
            if opts.From_App:
                del agent_primer2
            #Removed_Num=Mesh_Triming(env_primer,PR_EX,PR_EY)
            #Uncomment the above line if you want to incorporate mesh trimming

            observation[:, :, 0] = np.reshape(
                FEASolve(envs.env.VoidCheck,
                         opts.Lx,
                         opts.Ly,
                         opts.Main_EX,
                         opts.Main_EY,
                         envs.env.LC_Nodes,
                         envs.env.Load_Directions,
                         envs.env.BC_Nodes,
                         Stress=True)[3], (opts.Main_EX, opts.Main_EY))
        observation_v, observation_h, observation_vh = obs_flip(
            observation, opts.Main_EX, opts.Main_EY)
        Last_Reward = 0
        while not done:
            if i % 1000 == 0 and i >= 1:  #Every 1000 iterations, show the activation maps
                from keract import get_activations, display_activations
                activations = get_activations(
                    agent.q_eval.model,
                    observation.reshape(-1, opts.Main_EX, opts.Main_EY, 3))
                display_activations(activations, save=False)
            action = agent.choose_action(observation, opts.Load_Checkpoints,
                                         Testing)
            observation_, reward, done, It = envs.env.step(
                action,
                observation,
                Last_Reward,
                opts.Load_Checkpoints,
                envs.env,
                FEA_Skip=1,
                PR=False)
            if not opts.Load_Checkpoints:
                observation_v_, observation_h_, observation_vh_ = obs_flip(
                    observation_, opts.Main_EX, opts.Main_EY)
                action_v, action_h, action_vh = action_flip(
                    action, opts.Main_EX, opts.Main_EY)
                agent.store_transition(observation, action, reward,
                                       observation_, done)
                agent.store_transition(observation_v, action_v, reward,
                                       observation_v_, done)
                agent.store_transition(observation_h, action_h, reward,
                                       observation_h_, done)
                agent.store_transition(observation_vh, action_vh, reward,
                                       observation_vh_, done)
            score += reward
            App_Plot = Testing_Info(envs.env,
                                    envs.env_primer,
                                    envs.env_primer2,
                                    opts,
                                    score,
                                    opts.Progressive_Refinement,
                                    opts.From_App,
                                    Fixed=True)
            _ = [fn(App_Plot) for fn in my_call_back_functions]
            Last_Reward = reward
            if Testing and not Time_Trial:
                envs.env.render()
                print('Current Score: ' + str(round(score, 3)))
            observation = observation_
            if not opts.Load_Checkpoints:
                observation_v = observation_v_
                observation_h = observation_h_
                observation_vh = observation_vh_
            if opts.Load_Checkpoints and not Time_Trial: envs.env.render()
        App_Plot = Testing_Info(envs.env,
                                envs.env_primer,
                                envs.env_primer2,
                                opts,
                                score,
                                opts.Progressive_Refinement,
                                opts.From_App,
                                Fixed=True)
        _ = [fn(App_Plot) for fn in my_call_back_functions]
        return App_Plot
        toc = time.perf_counter()

        if Time_Trial and not opts.From_App:
            print('It took ' + str(round(toc - Start_Time_Trial, 1)) +
                  ' seconds to complete this time trial.')

        App_Plot = Testing_Info(envs.env,
                                envs.env_primer,
                                envs.env_primer2,
                                opts,
                                score,
                                opts.Progressive_Refinement,
                                opts.From_App,
                                Fixed=True)
Exemple #21
0
item_prefixes = {
    'primarycap_reshape/Reshape:0': 'model9_primarycap_layer_item',
    'conv/Relu:0': 'model9_conv_layer_item'
}

import pathlib

path = pathlib.Path(folder_path)
path.mkdir(mode=0o766, parents=True, exist_ok=True)

for item_num in items:
    a = get_activations(model, X_train[item_num -
                                       1:item_num])  # with just one sample.
    plt.title('Train image #' + str(item_num))
    plt.imshow(X_train[item_num])
    plt.savefig(folder_path + image_prefix + str(item_num) + '.png',
                format='png')
    retrieved_activations = {}
    for k, v in a.items():
        if k == 'primarycap_reshape/Reshape:0':
            retrieved_activations[k] = v.reshape(1, 8, 8, 32 * 12)
            retrieved_activations[k] = retrieved_activations[k][:, :, :, :48]
        elif k == 'conv/Relu:0':
            retrieved_activations[k] = v[:, :, :, :48]

    dumpclean(a)

    display_activations(retrieved_activations, item_num, folder_path,
                        item_prefixes)
Exemple #22
0
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import decode_predictions
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array

import keract
from utils import gpu_dynamic_mem_growth

if __name__ == "__main__":
    # Check for GPUs and set them to dynamically grow memory as needed
    # Avoids OOM from tensorflow greedily allocating GPU memory
    gpu_dynamic_mem_growth()
    model = VGG16()

    image = Image.open('250px-Gatto_europeo4.jpeg')
    image = image.crop((0, 0, 224, 224))
    image = img_to_array(image)
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    image = preprocess_input(image)
    yhat = model.predict(image)
    label = decode_predictions(yhat)
    label = label[0][0]
    print('{} ({})'.format(label[1], label[2] * 100))

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    activations = keract.get_activations(model, image)
    first = activations.get('block1_conv1')
    keract.display_activations(activations, save=True)
Exemple #23
0
                    validation_steps=64,
                    callbacks=[tb_callback, cp_callback])

print('TRAINING COMPLETE')
model.save(MODEL_STRUCT_PATH)

for dp in glob(os.path.join(TEST_DATA_PATH, '*')):
    for fp in glob(os.path.join(TEST_DATA_PATH, dp, '*')):
        (fn, _) = os.path.splitext(fp)
        arr = numpy.array(
            load_img(fp,
                     target_size=(SIGN_IMG_HEIGHT, SIGN_IMG_WIDTH),
                     grayscale=False,
                     color_mode='rgb',
                     interpolation='nearest'))
        a = get_activations(model, [[arr]], auto_compile=True)
        rp = os.path.join(KERACT_PATH, relative_path(fn, TEST_DATA_PATH))
        display_activations(a, directory=rp, save=True)
        print(f'VISUALIZATION SAVED: {rp}')
        break

print('DONE')

yp = model.predict_generator(test_generator)
yp = numpy.argmax(yp, axis=1)

print('CONFUSION MATRIX:')
print(confusion_matrix(test_generator.classes, yp))
print('Classification Report')
print(classification_report(test_generator.classes, yp, target_names=TYPES))
    raw_image_string = tf.io.read_file(img_path)
    raw_image = tf.image.decode_jpeg(raw_image_string, channels=3)
    raw_image = tf.image.convert_image_dtype(raw_image, tf.float32)
    raw_image = tf.image.resize(raw_image, [680, 460])
    raw_image = tf.clip_by_value(raw_image, 0.0, 1.0)
    image = tf.reshape(raw_image, (-1, 680, 460, 3))

    print('=================================================')
    activations = keract.get_activations(loaded_model, image)
    [
        print(k, '->', v.shape, '- Numpy array')
        for (k, v) in activations.items()
    ]

    if args.mode == 'act_map':
        keract.display_activations(activations, cmap="gray")
    elif args.mode == 'heat_map':
        keract.display_heatmaps(activations, image, save=False)

    print('=================================================')
    if args.v:
        json_path = os.path.join(args.model_dir, "affine_weights.json")
        weights = loaded_model.get_weights()
        print("[INFO] affine transformation params: ")
        [print("\t Theta -> ", w) for (w) in weights[-1]]

    print('=================================================')

    out_image = loaded_model(image)
    IMG_SHAPE = (680, 460, 3)
    sampling_size = (240, 240)