コード例 #1
0
def get_model_apoz(model, generator):
    # Get APoZ
    apoz = []
    for layer in model.layers[START:END]:
        if layer.__class__.__name__ == 'Conv2D':
            print(layer.name)
            apoz.extend([
                (layer.name, i, value)
                for (i, value) in enumerate(get_apoz(model, layer, generator))
            ])

    layer_name, index, apoz_value = zip(*apoz)
    apoz_df = pd.DataFrame({
        'layer': layer_name,
        'index': index,
        'apoz': apoz_value
    })
    apoz_df = apoz_df.set_index('layer')
    return apoz_df
コード例 #2
0
def get_model_apoz(model, generator):
    from kerassurgeon.identify import get_apoz
    import pandas as pd

    # Get APoZ
    start = None
    end = None
    apoz = []
    for layer in model.layers[start:end]:
        if layer.__class__.__name__ == 'Conv2D':
            print(layer.name)
            apoz.extend([(layer.name, i, value) for (i, value)
                         in enumerate(get_apoz(model, layer, generator))])

    layer_name, index, apoz_value = zip(*apoz)
    apoz_df = pd.DataFrame({'layer': layer_name, 'index': index,
                            'apoz': apoz_value})
    apoz_df = apoz_df.set_index('layer')
    return apoz_df
コード例 #3
0
def get_model_apoz(model, generator):
    # APoZ = Average Percentage of Zeros
    start = None
    end = None
    apoz = []
    for layer in model.layers[start:end]:
        if layer.__class__.__name__ == 'Conv2D':
            print(layer.name)
            apoz.extend([
                (layer.name, i, value)
                for (i, value) in enumerate(get_apoz(model, layer, generator))
            ])

    layer_name, index, apoz_value = zip(*apoz)
    apoz_df = pd.DataFrame({
        'layer': layer_name,
        'index': index,
        'apoz': apoz_value
    })
    apoz_df = apoz_df.set_index('layer')
    return apoz_df
コード例 #4
0
def get_model_apoz(model, generator):
    # Get APoZ
    start = None
    end = None
    apoz = []
    for layer in model.layers[start:end]:
        if layer.__class__.__name__ == "Conv2D":
            print(layer.name)
            apoz.extend([
                (layer.name, i, value)
                for (i, value) in enumerate(get_apoz(model, layer, generator))
            ])

    layer_name, index, apoz_value = zip(*apoz)
    apoz_df = pd.DataFrame({
        "layer": layer_name,
        "index": index,
        "apoz": apoz_value
    })
    apoz_df = apoz_df.set_index("layer")
    return apoz_df
コード例 #5
0
ファイル: lenet_mnist.py プロジェクト: audatic/keras-surgeon
def main():
    training_verbosity = 2
    # Download data if needed and import.
    mnist = input_data.read_data_sets("tempData", one_hot=True, reshape=False)
    val_images = mnist.validation.images
    val_labels = mnist.validation.labels

    # Create LeNet model
    model = Sequential()
    model.add(
        Conv2D(20, [3, 3],
               input_shape=[28, 28, 1],
               activation="relu",
               name="conv_1"))
    model.add(MaxPool2D())
    model.add(Conv2D(50, [3, 3], activation="relu", name="conv_2"))
    model.add(MaxPool2D())
    model.add(layers.Permute((2, 1, 3)))
    model.add(Flatten())
    model.add(Dense(500, activation="relu", name="dense_1"))
    model.add(Dense(10, activation="softmax", name="dense_2"))

    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

    early_stopping = callbacks.EarlyStopping(
        monitor="val_loss",
        min_delta=0,
        patience=10,
        verbose=training_verbosity,
        mode="auto",
    )
    reduce_lr = callbacks.ReduceLROnPlateau(
        monitor="val_loss",
        factor=0.1,
        patience=5,
        verbose=training_verbosity,
        mode="auto",
        epsilon=0.0001,
        cooldown=0,
        min_lr=0,
    )

    # Train LeNet on MNIST
    results = model.fit(
        mnist.train.images,
        mnist.train.labels,
        epochs=200,
        batch_size=128,
        verbose=2,
        validation_data=(val_images, val_labels),
        callbacks=[early_stopping, reduce_lr],
    )

    loss = model.evaluate(val_images, val_labels, batch_size=128, verbose=2)
    print("original model loss:", loss, "\n")

    layer_name = "dense_1"

    while True:
        layer = model.get_layer(name=layer_name)
        apoz = identify.get_apoz(model, layer, val_images)
        high_apoz_channels = identify.high_apoz(apoz)
        model = delete_channels(model, layer, high_apoz_channels)

        model.compile(optimizer="adam",
                      loss="categorical_crossentropy",
                      metrics=["accuracy"])

        loss = model.evaluate(val_images,
                              val_labels,
                              batch_size=128,
                              verbose=2)
        print("model loss after pruning: ", loss, "\n")

        results = model.fit(
            mnist.train.images,
            mnist.train.labels,
            epochs=200,
            batch_size=128,
            verbose=training_verbosity,
            validation_data=(val_images, val_labels),
            callbacks=[early_stopping, reduce_lr],
        )

        loss = model.evaluate(val_images,
                              val_labels,
                              batch_size=128,
                              verbose=2)
        print("model loss after retraining: ", loss, "\n")
コード例 #6
0
def main():
    training_verbosity = 2
    # Download data if needed and import.
    mnist_data = mnist.input_data.read_data_sets('MNIST_data',
                                                 one_hot=True,
                                                 reshape=False)
    val_images = mnist_data.validation.images
    val_labels = mnist_data.validation.labels

    # Create LeNet model
    model = Sequential()
    model.add(
        Conv2D(20, [3, 3],
               input_shape=[28, 28, 1],
               activation='relu',
               name='conv_1'))
    model.add(MaxPool2D())
    model.add(Conv2D(50, [3, 3], activation='relu', name='conv_2'))
    model.add(MaxPool2D())
    model.add(layers.Permute((2, 1, 3)))
    model.add(Flatten())
    model.add(Dense(500, activation='relu', name='dense_1'))
    model.add(Dense(10, activation='softmax', name='dense_2'))

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.summary()

    early_stopping = callbacks.EarlyStopping(monitor='val_loss',
                                             min_delta=0,
                                             patience=10,
                                             verbose=training_verbosity,
                                             mode='auto')
    reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                            factor=0.1,
                                            patience=5,
                                            verbose=training_verbosity,
                                            mode='auto',
                                            epsilon=0.0001,
                                            cooldown=0,
                                            min_lr=0)

    # Train LeNet on MNIST
    results = model.fit(mnist_data.train.images,
                        mnist_data.train.labels,
                        epochs=200,
                        batch_size=128,
                        verbose=2,
                        validation_data=(val_images, val_labels),
                        callbacks=[early_stopping, reduce_lr])

    loss = model.evaluate(val_images, val_labels, batch_size=128, verbose=2)
    print('original model loss:', loss, '\n')

    layer_name = 'dense_1'

    while True:
        layer = model.get_layer(name=layer_name)
        apoz = identify.get_apoz(model, layer, val_images)
        high_apoz_channels = identify.high_apoz(apoz)
        model = delete_channels(model, layer, high_apoz_channels)

        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        loss = model.evaluate(val_images,
                              val_labels,
                              batch_size=128,
                              verbose=2)
        print('model loss after pruning: ', loss, '\n')

        results = model.fit(mnist_data.train.images,
                            mnist_data.train.labels,
                            epochs=200,
                            batch_size=128,
                            verbose=training_verbosity,
                            validation_data=(val_images, val_labels),
                            callbacks=[early_stopping, reduce_lr])

        loss = model.evaluate(val_images,
                              val_labels,
                              batch_size=128,
                              verbose=2)
        print('model loss after retraining: ', loss, '\n')
コード例 #7
0
cem.save_weights('cem_{}_params.h5f'.format(ENV_NAME), overwrite=True)

# Finally, evaluate our algorithm for 5 episodes.
cem.test(env, nb_episodes=5, visualize=True)

#observations = np.array((4,10))
observations = []
for _ in range(10):
    observations.append(deepcopy(env.reset()))
observations = np.asarray(observations)

for layer in cem.model.layers:
    fake_x_test = np.ndarray(shape=(10,1), dtype=float, order='F')
    # apoz = identify.get_apoz(model, layer, x_test)
    #apoz = identify.get_apoz(model, layer, observations)
    apoz = identify.get_apoz(model, layer, env.reset())
    high_apoz_channels = identify.high_apoz(apoz)
    cem.model = delete_channels(cem.model, layer, high_apoz_channels)

    print('layer name: ', layer.name)

    cem.compile(optimizer=sgd,
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])

    reward = cem.test(env, nb_episodes=5, visualize=True)
    print('model loss after pruning: ', reward, '\n')

    results = cem.fit(env, nb_steps=100000, visualize=True, verbose=2)

    loss = cem.test(env, nb_episodes=5, visualize=True)