Esempio n. 1
0
from data import MNIST

if __name__ == '__main__':
    # gradients requires no eager execution.
    tf.compat.v1.disable_eager_execution()
    # Check for GPUs and set them to dynamically grow memory as needed
    # Avoids OOM from tensorflow greedily allocating GPU memory
    utils.gpu_dynamic_mem_growth()

    x_train, y_train, _, _ = MNIST.get_mnist_data()

    # (60000, 28, 28, 1) to ((60000, 28, 28)
    # LSTM has (batch, time_steps, input_dim)
    x_train = x_train.squeeze()

    model = Sequential()
    model.add(LSTM(16, input_shape=(28, 28)))
    model.add(Dense(MNIST.num_classes, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    utils.print_names_and_shapes(keract.get_activations(model, x_train[:128]))
    utils.print_names_and_shapes(
        keract.get_gradients_of_trainable_weights(model, x_train[:128],
                                                  y_train[:128]))
    utils.print_names_and_shapes(
        keract.get_gradients_of_activations(model, x_train[:128],
                                            y_train[:128]))
Esempio n. 2
0
        model = load_model(checkpoint_file)

        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        print(model.summary())

        x_train, y_train, x_test, y_test = get_mnist_data()

        # checking that the accuracy is the same as before 99% at the first epoch.
        # test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0, batch_size=128)
        # print('')
        # assert test_acc > 0.98

        utils.print_names_and_shapes(get_activations(
            model, x_test[0:200]))  # with 200 samples.

        a = get_activations(model, x_test[0:1])  # with just one sample.
        display_activations(a)

        # import numpy as np
        # import matplotlib.pyplot as plt
        # plt.imshow(np.squeeze(x_test[0:1]), interpolation='None', cmap='gray')
    else:
        x_train, y_train, x_test, y_test = get_mnist_data()

        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation='relu',
Esempio n. 3
0
import keras
from keras.layers import Dense
from keras.layers.recurrent import LSTM
from keras.models import Sequential

import utils
from data import get_mnist_data, num_classes
from keract import get_activations

if __name__ == '__main__':
    x_train, _, _, _ = get_mnist_data()

    # (60000, 28, 28, 1) to ((60000, 28, 28)
    # LSTM has (batch, time_steps, input_dim)
    x_train = x_train.squeeze()

    model = Sequential()
    model.add(LSTM(16, input_shape=(28, 28)))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])

    utils.print_names_and_shapes(get_activations(model, x_train[:128]))
Esempio n. 4
0
        model = load_model(checkpoint_file)

        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        print(model.summary())

        x_train, y_train, x_test, y_test = MNIST.get_mnist_data()

        # checking that the accuracy is the same as before 99% at the first epoch.
        # test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0, batch_size=128)
        # print('')
        # assert test_acc > 0.98

        utils.print_names_and_shapes(
            keract.get_activations(model, x_test[0:200]))  # with 200 samples.
        utils.print_names_and_shapes(
            keract.get_gradients_of_trainable_weights(model, x_train[0:10],
                                                      y_train[0:10]))
        utils.print_names_and_shapes(
            keract.get_gradients_of_activations(model, x_train[0:10],
                                                y_train[0:10]))

        a = keract.get_activations(model, x_test[0:1])  # with just one sample.
        keract.display_activations(a, directory='mnist_activations', save=True)

        # import numpy as np
        # import matplotlib.pyplot as plt
        # plt.imshow(np.squeeze(x_test[0:1]), interpolation='None', cmap='gray')
    else:
        x_train, y_train, x_test, y_test = MNIST.get_mnist_data()