示例#1
0
    def test_gradients_of_trainable_weights(self):
        model, x = dummy_model_and_inputs()
        model.compile(loss='mse', optimizer='adam')
        y = np.random.uniform(size=len(x))
        grad_trainable_weights = get_gradients_of_trainable_weights(
            model, x, y)

        self.assertListEqual(list(grad_trainable_weights),
                             ['fc1/kernel:0', 'fc1/bias:0'])
        w = grad_trainable_weights['fc1/kernel:0']
        b = grad_trainable_weights['fc1/bias:0']
        self.assertListEqual(list(w.shape), [10, 1])  # Dense.w
        self.assertListEqual(list(b.shape), [
            1,
        ])  # Dense.b
示例#2
0
def main():
    np.random.seed(123)
    inp_a = np.random.uniform(size=(5, 10))
    inp_b = np.random.uniform(size=(5, 10))
    out_c = np.random.uniform(size=(5, 1))

    # Just for visual purposes.
    np.set_printoptions(precision=2)

    # Activations of all the layers
    print('MULTI-INPUT MODEL')
    m1 = get_multi_inputs_model()
    m1.compile(optimizer='adam', loss='mse')
    utils.print_names_and_values(keract.get_activations(m1, [inp_a, inp_b]))
    utils.print_names_and_values(
        keract.get_gradients_of_trainable_weights(m1, [inp_a, inp_b], out_c))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m1, [inp_a, inp_b], out_c))

    # Just get the last layer!
    utils.print_names_and_values(
        keract.get_activations(m1, [inp_a, inp_b], layer_names='last_layer'))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m1, [inp_a, inp_b],
                                            out_c,
                                            layer_names='last_layer'))
    print('')

    print('SINGLE-INPUT MODEL')
    m2 = get_single_inputs_model()
    m2.compile(optimizer='adam', loss='mse')
    utils.print_names_and_values(keract.get_activations(m2, inp_a))
    utils.print_names_and_values(
        keract.get_gradients_of_trainable_weights(m2, inp_a, out_c))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m2, inp_a, out_c))
示例#3
0
from data import MNIST

if __name__ == '__main__':
    # gradients requires no eager execution.
    tf.compat.v1.disable_eager_execution()
    # Check for GPUs and set them to dynamically grow memory as needed
    # Avoids OOM from tensorflow greedily allocating GPU memory
    utils.gpu_dynamic_mem_growth()

    x_train, y_train, _, _ = MNIST.get_mnist_data()

    # (60000, 28, 28, 1) to ((60000, 28, 28)
    # LSTM has (batch, time_steps, input_dim)
    x_train = x_train.squeeze()

    model = Sequential()
    model.add(LSTM(16, input_shape=(28, 28)))
    model.add(Dense(MNIST.num_classes, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    utils.print_names_and_shapes(keract.get_activations(model, x_train[:128]))
    utils.print_names_and_shapes(
        keract.get_gradients_of_trainable_weights(model, x_train[:128],
                                                  y_train[:128]))
    utils.print_names_and_shapes(
        keract.get_gradients_of_activations(model, x_train[:128],
                                            y_train[:128]))
示例#4
0
if __name__ == '__main__':
    np.random.seed(123)
    inp_a = np.random.uniform(size=(5, 10))
    inp_b = np.random.uniform(size=(5, 10))
    out_c = np.random.uniform(size=(5, 1))

    # Just for visual purposes.
    np.set_printoptions(precision=2)

    # Activations of all the layers
    print('MULTI-INPUT MODEL')
    m1 = get_multi_inputs_model()
    m1.compile(optimizer='adam', loss='mse')
    utils.print_names_and_values(keract.get_activations(m1, [inp_a, inp_b]))
    utils.print_names_and_values(
        keract.get_gradients_of_trainable_weights(m1, [inp_a, inp_b], out_c))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m1, [inp_a, inp_b], out_c))

    # Just get the last layer!
    utils.print_names_and_values(
        keract.get_activations(m1, [inp_a, inp_b], layer_name='last_layer'))
    utils.print_names_and_values(
        keract.get_gradients_of_activations(m1, [inp_a, inp_b],
                                            out_c,
                                            layer_name='last_layer'))
    print('')

    print('SINGLE-INPUT MODEL')
    m2 = get_single_inputs_model()
    m2.compile(optimizer='adam', loss='mse')
示例#5
0
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        print(model.summary())

        x_train, y_train, x_test, y_test = MNIST.get_mnist_data()

        # checking that the accuracy is the same as before 99% at the first epoch.
        # test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0, batch_size=128)
        # print('')
        # assert test_acc > 0.98

        utils.print_names_and_shapes(
            keract.get_activations(model, x_test[0:200]))  # with 200 samples.
        utils.print_names_and_shapes(
            keract.get_gradients_of_trainable_weights(model, x_train[0:10],
                                                      y_train[0:10]))
        utils.print_names_and_shapes(
            keract.get_gradients_of_activations(model, x_train[0:10],
                                                y_train[0:10]))

        a = keract.get_activations(model, x_test[0:1])  # with just one sample.
        keract.display_activations(a, directory='mnist_activations', save=True)

        # import numpy as np
        # import matplotlib.pyplot as plt
        # plt.imshow(np.squeeze(x_test[0:1]), interpolation='None', cmap='gray')
    else:
        x_train, y_train, x_test, y_test = MNIST.get_mnist_data()

        model = Sequential()
        model.add(
示例#6
0
 def gradients_of_weights(self, st=0, en=None, indices=None) -> dict:
     test_x, test_y, test_label = self.fetch_data(start=st, ende=en, shuffle=False,
                                                  cache_data=False,
                                                  indices=indices)
     return keract.get_gradients_of_trainable_weights(self.k_model, test_x, test_label)