Exemplo n.º 1
0
def test_delete_channels_rec_1():
    inputs = Input(shape=(784, ))
    x = Dense(64, activation='relu')(inputs)
    x = Dense(64, activation='relu')(x)
    predictions = Dense(10, activation='softmax')(x)

    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    operations.delete_channels(model, model.layers[2], [0])
Exemplo n.º 2
0
def layer_test_helper_2d_global(layer, channel_index, data_format):
    # This should test that the output is the correct shape so it should pass
    # into a Dense layer rather than a Conv layer.
    # The weighted layer is the previous layer,
    # Create model
    main_input = Input(shape=list(random.randint(10, 20, size=3)))
    x = Conv2D(3, [3, 3], data_format=data_format)(main_input)
    x = layer(x)
    main_output = Dense(5)(x)
    model = Model(inputs=main_input, outputs=main_output)

    # Delete channels
    del_layer_index = 1
    next_layer_index = 3
    del_layer = model.layers[del_layer_index]
    new_model = operations.delete_channels(model, del_layer, channel_index)
    new_w = new_model.layers[next_layer_index].get_weights()

    # Calculate next layer's correct weights
    channel_count = getattr(del_layer, utils.get_channels_attr(del_layer))
    channel_index = [i % channel_count for i in channel_index]
    correct_w = model.layers[next_layer_index].get_weights()
    correct_w[0] = np.delete(correct_w[0], channel_index, axis=0)

    assert weights_equal(correct_w, new_w)
Exemplo n.º 3
0
def KerasSurgeonExample(logger, teacher, X_train, Y_train, X_test, Y_test):
    from tfkerassurgeon.operations import delete_channels
    layersofinterest = HelperUtil.find_layers_of_type(logger, teacher, "conv")
    layer_0 = teacher.layers[0]
    model_new = delete_channels(
        teacher, layer_0, [4, 15, 6, 26, 9, 24, 21, 17, 3, 23, 14, 10, 13])
    layer_1 = model_new.layers[1]
    model_new_two = delete_channels(model_new, layer_1,
                                    [11, 7, 37, 13, 48, 56, 41, 23, 16, 35])
    model_new_two.compile(loss='categorical_crossentropy',
                          optimizer=cfg.student_optimizer,
                          metrics=['accuracy'])
    prunedLoss, prunedAcc = HelperUtil.calculate_weighted_score(
        logger, model_new_two, X_train, Y_train, X_test, Y_test)
    logger.info('Teacher weighted score: (acc, loss) --> (%s, %s)' %
                (prunedLoss, prunedAcc))
Exemplo n.º 4
0
def test_delete_channels_conv2d_conv2d_next_layer(channel_index, data_format):
    model = model_3(data_format)
    layer_index = 1
    next_layer_index = 2
    new_model = operations.delete_channels(model, model.layers[layer_index],
                                           channel_index)
    channel_count = model.layers[layer_index].filters
    channel_index = [i % channel_count for i in channel_index]
    w = model.layers[next_layer_index].get_weights()
    correct_w = [np.delete(w[0], channel_index, axis=-2), w[1]]
    new_w = new_model.layers[next_layer_index].get_weights()
    assert weights_equal(correct_w, new_w)
Exemplo n.º 5
0
def get_model(input_model):

    model = load_model(input_model)
    model.compile(
        loss=tf.keras.losses.categorical_crossentropy,
        optimizer="adam",
        metrics=['accuracy'])

    model.summary()

    # Computing the L1 norm of filter weights
    ordered_filters = collections.OrderedDict()

    for i, layer_name in enumerate(['conv2d', 'conv2d_1']):

        weight = model.get_layer(layer_name).get_weights()[0]
        weights_dict = {}
        num_filters = len(weight[0, 0, 0, :])

        # compute the L1-norm of each filter weight and store it in a dictionary

        for j in range(num_filters):
            weights_dict[j] = np.sum(abs(weight[:, :, :, j]))

        # sort the filter as per their ascending L1 value
        weights_dict_sort = sorted(weights_dict.items(), key=lambda kv: kv[1])

        print('ll norm conv layer {}\n'.format(i + 1), weights_dict_sort)

        # get the L1-norm of weights from the dictionary and plot it
        weights_value = []

        for elem in weights_dict_sort:
            weights_value.append(elem[1])

        ordered_filters[layer_name] = collections.OrderedDict(weights_dict_sort)

    # Remove filters
    f = 0.9

    new_model = model

    for layer_name in ordered_filters:
        weight = model.get_layer(layer_name).get_weights()[0]
        n = int(len(weight[0, 0, 0, :]) * f)

        channels_to_remove = [x for x in ordered_filters[layer_name]][:n]

        new_model = delete_channels(new_model, new_model.get_layer(layer_name), channels=channels_to_remove, copy=False)

    new_model.summary()

    return new_model
Exemplo n.º 6
0
def prune_layer(model, layer):

    # Get the APOZ (Average Percentage of Zeros) that should identify where we can prune
    apoz = identify.get_apoz(model, layer, X_test)

    # Get the Channel Ids that have a high APOZ, which indicates they can be pruned
    high_apoz_channels = identify.high_apoz(apoz)

    # Run the pruning on the Model and get the Pruned (uncompiled) model as a result
    model = delete_channels(model, layer, high_apoz_channels)

    # Recompile the model
    compile_model(model)

    return model
Exemplo n.º 7
0
def test_delete_channels_conv2d_conv2d_small_shape():
    model = model_4("channels_last")
    layer_index = 1
    channel_index = [0]
    new_model = operations.delete_channels(model,
                                           model.layers[layer_index],
                                           channel_index,
                                           copy=True)
    channel_count = model.layers[layer_index].filters
    channel_index = [i % channel_count for i in channel_index]
    w = model.layers[layer_index].get_weights()
    correct_w = [
        np.delete(w[0], channel_index, axis=-1),
        np.delete(w[1], channel_index, axis=0)
    ]
    new_w = new_model.layers[layer_index].get_weights()
    assert weights_equal(correct_w, new_w)
Exemplo n.º 8
0
def recursive_test_helper(layer, channel_index):
    main_input = Input(shape=[32, 10])
    x = layer(main_input)
    x = GRU(4, return_sequences=False)(x)
    main_output = Dense(5)(x)
    model = Model(inputs=main_input, outputs=main_output)

    # Delete channels
    del_layer_index = 1
    next_layer_index = 2
    del_layer = model.layers[del_layer_index]
    new_model = operations.delete_channels(model, del_layer, channel_index)
    new_w = new_model.layers[next_layer_index].get_weights()

    # Calculate next layer's correct weights
    channel_count = getattr(del_layer, utils.get_channels_attr(del_layer))
    channel_index = [i % channel_count for i in channel_index]
    correct_w = model.layers[next_layer_index].get_weights()
    correct_w[0] = np.delete(correct_w[0], channel_index, axis=0)

    assert weights_equal(correct_w, new_w)
Exemplo n.º 9
0
def test_delete_channels_flatten(channel_index, data_format):
    # Create model
    main_input = Input(shape=list(random.randint(4, 10, size=3)))
    x = Conv2D(3, [3, 3], data_format=data_format)(main_input)
    x = Flatten()(x)
    main_output = Dense(5)(x)
    model = Model(inputs=main_input, outputs=main_output)

    # Delete channels
    layer_index = 1
    next_layer_index = 3
    layer = model.layers[layer_index]
    new_model = operations.delete_channels(model, layer, channel_index)
    new_w = new_model.layers[next_layer_index].get_weights()

    # Calculate next layer's correct weights
    flat_sz = np.prod(layer.output_shape[1:])
    channel_count = getattr(layer, utils.get_channels_attr(layer))
    channel_index = [i % channel_count for i in channel_index]
    if data_format == 'channels_first':
        delete_indices = [
            x * flat_sz // channel_count + i for x in channel_index
            for i in range(
                0,
                flat_sz // channel_count,
            )
        ]
    elif data_format == 'channels_last':
        delete_indices = [
            x + i for i in range(0, flat_sz, channel_count)
            for x in channel_index
        ]
    else:
        raise ValueError
    correct_w = model.layers[next_layer_index].get_weights()
    correct_w[0] = np.delete(correct_w[0], delete_indices, axis=0)

    assert weights_equal(correct_w, new_w)