Ejemplo n.º 1
0
    def test_gradients_of_activations(self):
        model, x = dummy_model_and_inputs()
        # important to leave the compile() call to the user. Gradients need this correct.
        model.compile(loss='mse', optimizer='adam')
        y = np.random.uniform(size=len(x))
        grad_acts = get_gradients_of_activations(model, x, y)
        acts = get_activations(model, x)

        grad_acts_nested = get_gradients_of_activations(model,
                                                        x,
                                                        y,
                                                        nested=True)
        acts_nested = get_activations(model, x, nested=True)

        # same support.
        self.assertListEqual(list(acts), list(grad_acts))
        self.assertListEqual(list(grad_acts['i1'].shape),
                             list(acts['i1'].shape))
        self.assertListEqual(list(grad_acts['model'].shape),
                             list(acts['model'].shape))
        self.assertListEqual(list(grad_acts['block'].shape),
                             list(acts['block'].shape))
        self.assertListEqual(list(grad_acts['fc1'].shape),
                             list(acts['fc1'].shape))

        self.assertListEqual(list(acts_nested), list(grad_acts_nested))
        self.assertListEqual(list(grad_acts_nested['i1'].shape),
                             list(acts_nested['i1'].shape))
        self.assertListEqual(list(grad_acts_nested['model/fc1'].shape),
                             list(acts_nested['model/fc1'].shape))
        self.assertListEqual(list(grad_acts_nested['model/fc1'].shape),
                             list(acts_nested['model/fc1'].shape))
        self.assertListEqual(list(grad_acts_nested['block/fc1'].shape),
                             list(acts_nested['block/fc1'].shape))
        self.assertListEqual(list(grad_acts_nested['block/relu'].shape),
                             list(acts_nested['block/relu'].shape))
        self.assertListEqual(list(grad_acts_nested['fc1'].shape),
                             list(acts_nested['fc1'].shape))
Ejemplo n.º 2
0
def main():
    np.random.seed(123)
    inp_a = np.random.uniform(size=(5, 10))
    inp_b = np.random.uniform(size=(5, 10))
    out_c = np.random.uniform(size=(5, 1))

    # Just for visual purposes.
    np.set_printoptions(precision=2)

    # Activations of all the layers
    print('MULTI-INPUT MODEL')
    m1 = get_multi_inputs_model()
    m1.compile(optimizer='adam', loss='mse')
    utils.print_names_and_values(keract.get_activations(m1, [inp_a, inp_b]))
    utils.print_names_and_values(
        keract.get_gradients_of_trainable_weights(m1, [inp_a, inp_b], out_c))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m1, [inp_a, inp_b], out_c))

    # Just get the last layer!
    utils.print_names_and_values(
        keract.get_activations(m1, [inp_a, inp_b], layer_names='last_layer'))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m1, [inp_a, inp_b],
                                            out_c,
                                            layer_names='last_layer'))
    print('')

    print('SINGLE-INPUT MODEL')
    m2 = get_single_inputs_model()
    m2.compile(optimizer='adam', loss='mse')
    utils.print_names_and_values(keract.get_activations(m2, inp_a))
    utils.print_names_and_values(
        keract.get_gradients_of_trainable_weights(m2, inp_a, out_c))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m2, inp_a, out_c))
Ejemplo n.º 3
0
from data import MNIST

if __name__ == '__main__':
    # gradients requires no eager execution.
    tf.compat.v1.disable_eager_execution()
    # Check for GPUs and set them to dynamically grow memory as needed
    # Avoids OOM from tensorflow greedily allocating GPU memory
    utils.gpu_dynamic_mem_growth()

    x_train, y_train, _, _ = MNIST.get_mnist_data()

    # (60000, 28, 28, 1) to ((60000, 28, 28)
    # LSTM has (batch, time_steps, input_dim)
    x_train = x_train.squeeze()

    model = Sequential()
    model.add(LSTM(16, input_shape=(28, 28)))
    model.add(Dense(MNIST.num_classes, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    utils.print_names_and_shapes(keract.get_activations(model, x_train[:128]))
    utils.print_names_and_shapes(
        keract.get_gradients_of_trainable_weights(model, x_train[:128],
                                                  y_train[:128]))
    utils.print_names_and_shapes(
        keract.get_gradients_of_activations(model, x_train[:128],
                                            y_train[:128]))
Ejemplo n.º 4
0
    inp_a = np.random.uniform(size=(5, 10))
    inp_b = np.random.uniform(size=(5, 10))
    out_c = np.random.uniform(size=(5, 1))

    # Just for visual purposes.
    np.set_printoptions(precision=2)

    # Activations of all the layers
    print('MULTI-INPUT MODEL')
    m1 = get_multi_inputs_model()
    m1.compile(optimizer='adam', loss='mse')
    utils.print_names_and_values(keract.get_activations(m1, [inp_a, inp_b]))
    utils.print_names_and_values(
        keract.get_gradients_of_trainable_weights(m1, [inp_a, inp_b], out_c))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m1, [inp_a, inp_b], out_c))

    # Just get the last layer!
    utils.print_names_and_values(
        keract.get_activations(m1, [inp_a, inp_b], layer_name='last_layer'))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m1, [inp_a, inp_b],
                                            out_c,
                                            layer_name='last_layer'))
    print('')

    print('SINGLE-INPUT MODEL')
    m2 = get_single_inputs_model()
    m2.compile(optimizer='adam', loss='mse')
    utils.print_names_and_values(keract.get_activations(m2, inp_a))
    utils.print_names_and_values(
Ejemplo n.º 5
0
        print(model.summary())

        x_train, y_train, x_test, y_test = MNIST.get_mnist_data()

        # checking that the accuracy is the same as before 99% at the first epoch.
        # test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0, batch_size=128)
        # print('')
        # assert test_acc > 0.98

        utils.print_names_and_shapes(
            keract.get_activations(model, x_test[0:200]))  # with 200 samples.
        utils.print_names_and_shapes(
            keract.get_gradients_of_trainable_weights(model, x_train[0:10],
                                                      y_train[0:10]))
        utils.print_names_and_shapes(
            keract.get_gradients_of_activations(model, x_train[0:10],
                                                y_train[0:10]))

        a = keract.get_activations(model, x_test[0:1])  # with just one sample.
        keract.display_activations(a, directory='mnist_activations', save=True)

        # import numpy as np
        # import matplotlib.pyplot as plt
        # plt.imshow(np.squeeze(x_test[0:1]), interpolation='None', cmap='gray')
    else:
        x_train, y_train, x_test, y_test = MNIST.get_mnist_data()

        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation='relu',
Ejemplo n.º 6
0
 def get_gradients(self, x, y=None, layer_name=None):
     gradients = keract.get_gradients_of_activations(self.model,
                                                     x,
                                                     y,
                                                     layer_names=layer_name)
     return np.squeeze(list(gradients.values())[0])
Ejemplo n.º 7
0
                            patience=15,
                            verbose=1,
                            restore_best_weights=True)

# model.fit(
#     x=x_train,
#     y=y_train,
#     verbose=1,
#     batch_size=batch_size,
#     epochs=epochs,
#     callbacks=[plateau_callback, es_callback],
#     validation_data=(x_test, y_test))
# model.save_weights(filepath=data_model_path)

model.load_weights(filepath=data_model_path)
# score = model.evaluate(
#     x_test,
#     y_test,
#     verbose=0,
#     batch_size=batch_size)
# print("Test performance: ", score)

grads = keract.get_gradients_of_activations(model,
                                            x_test[[12]],
                                            y_test[[12]],
                                            layer_name='heatmap1')
keract.display_heatmaps(grads, x_test[12] * 255.0)

activations = keract.get_activations(model, x_test[[12]])
keract.display_activations(activations)
Ejemplo n.º 8
0
yhat = model.predict(img)
label = decode_predictions(yhat)
label = label[0][0]
print('{} ({})'.format(label[1], label[2] * 100))

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
activations = keract.get_activations(model, img, layer_name='block5_conv3')
first = activations.get('block5_conv3')
# keract.display_activations(activations)
# keract.display_heatmaps(activations, input_image=image)



grad_trainable_weights = keract.get_gradients_of_activations(model, img, yhat, layer_name='block5_conv3')

print(grad_trainable_weights['block5_conv3'].shape)
grad_trainable_weights = tf.convert_to_tensor(grad_trainable_weights['block5_conv3'])


pooled_grads = K.mean(grad_trainable_weights, axis=(0, 1, 2))

# 我们计算相类输出值关于特征图的梯度。然后,我们沿着除了通道维度之外的轴对梯度进行池化操作。最后,我们用计算出的梯度值对输出特征图加权。
heatmap = tf.reduce_mean(tf.multiply(pooled_grads, first[0]), axis=-1)

#  然后,我们沿着通道维度对加权的特征图求均值,从而得到大小为 14*14 的热力图。最后,我们对热力图进行归一化处理,以使其值在 0 和 1 之间。
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)

print(heatmap.shape)
Ejemplo n.º 9
0
    def gradients_of_activations(self, st=0, en=None, indices=None, layer_name=None) -> dict:
        test_x, test_y, test_label = self.fetch_data(start=st, ende=en, shuffle=False,
                                                     cache_data=False,
                                                     indices=indices)

        return keract.get_gradients_of_activations(self.k_model, test_x, test_label, layer_names=layer_name)