from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential import keract import utils from data import MNIST if __name__ == '__main__': # gradients requires no eager execution. tf.compat.v1.disable_eager_execution() # Check for GPUs and set them to dynamically grow memory as needed # Avoids OOM from tensorflow greedily allocating GPU memory utils.gpu_dynamic_mem_growth() x_train, y_train, _, _ = MNIST.get_mnist_data() # (60000, 28, 28, 1) to ((60000, 28, 28) # LSTM has (batch, time_steps, input_dim) x_train = x_train.squeeze() model = Sequential() model.add(LSTM(16, input_shape=(28, 28))) model.add(Dense(MNIST.num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) utils.print_names_and_shapes(keract.get_activations(model, x_train[:128])) utils.print_names_and_shapes(
checkpoints = sorted( checkpoints ) # pip install natsort: natsorted() would be a better choice.. assert len(checkpoints) != 0, 'No checkpoints found.' checkpoint_file = checkpoints[-1] print('Loading [{}]'.format(checkpoint_file)) model = load_model(checkpoint_file) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) print(model.summary()) x_train, y_train, x_test, y_test = MNIST.get_mnist_data() # checking that the accuracy is the same as before 99% at the first epoch. # test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0, batch_size=128) # print('') # assert test_acc > 0.98 utils.print_names_and_shapes( keract.get_activations(model, x_test[0:200])) # with 200 samples. utils.print_names_and_shapes( keract.get_gradients_of_trainable_weights(model, x_train[0:10], y_train[0:10])) utils.print_names_and_shapes( keract.get_gradients_of_activations(model, x_train[0:10], y_train[0:10]))