Пример #1
0
def test_default_transform():
    graphs = default_transform(
        DefaultClassifierGenerator(10, (32, 32, 3)).generate())
    model = graphs[0].produce_model()
    model(torch.Tensor(get_conv_data()))
    assert len(graphs) == 1
    assert len(graphs[0].layer_list) == 44
Пример #2
0
def test_copy_model():
    model = get_add_skip_model()
    new_model = Graph(model).produce_model()
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert np.sum(output1 - output2) == 0
Пример #3
0
def test_dense_wider():
    model = get_add_skip_model()
    graph = Graph(model)
    new_model = graph.to_wider_model(model.layers[-2], 3)
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert abs(np.sum(output1 - output2)) < 1e-4
Пример #4
0
def test_conv_deeper():
    model = get_conv_model()
    graph = Graph(model)
    new_model = graph.to_conv_deeper_model(model.layers[4], 3)
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert abs(np.sum(output1 - output2)) < 0.2
Пример #5
0
def test_skip_concat_over_pooling():
    model = get_pooling_model()
    graph = Graph(model)
    new_model = graph.to_concat_skip_model(model.layers[4], model.layers[11])
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert abs(np.sum(output1 - output2)) < 1e-4
Пример #6
0
def test_skip_add_over_pooling():
    model = get_pooling_model()
    graph = Graph(model)
    new_model = graph.to_add_skip_model(model.layers[4], model.layers[11])
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert np.array_equal(output1, output2)
Пример #7
0
def test_deeper_conv_block():
    model = get_conv_model()
    layers = deeper_conv_block(model.layers[1], 3)
    output_tensor = layers[0](model.outputs[0])
    output_tensor = layers[1](output_tensor)
    output_tensor = layers[2](output_tensor)
    new_model = Model(inputs=model.inputs, outputs=output_tensor)
    input_data = get_conv_data()
    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()
    assert np.sum(np.abs(output1 - output2)) < 1e-1
Пример #8
0
def test_dense_wider():
    model = get_add_skip_model()
    graph = NetworkMorphismGraph(model)
    graph.to_wider_model(graph.layer_to_id[model.layers[-2]], 3)
    new_model = graph.produce_model()
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert np.sum(np.abs(output1 - output2)) < 1e-4
Пример #9
0
def test_skip_add_over_pooling():
    model = get_pooling_model()
    graph = NetworkMorphismGraph(model)
    graph.to_add_skip_model(graph.layer_to_id[model.layers[4]], graph.layer_to_id[model.layers[11]])
    new_model = graph.produce_model()
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert np.array_equal(output1, output2)
Пример #10
0
def test_dense_wider():
    model = get_add_skip_model()
    graph = Graph(model, True)
    graph.to_wider_model(19, 3)
    new_model = graph.produce_model()
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert np.sum(np.abs(output1 - output2)) < 1e-4
Пример #11
0
def test_skip_add_over_pooling():
    model = get_pooling_model()
    graph = Graph(model, True)
    graph.to_add_skip_model(2, 10)
    new_model = graph.produce_model()
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert np.array_equal(output1, output2)
Пример #12
0
def test_conv_deeper():
    model = get_conv_model()
    graph = Graph(model, True)
    graph.to_conv_deeper_model(6, 3)
    new_model = graph.produce_model()
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert np.sum(np.abs(output1 - output2)) < 4e-1
Пример #13
0
def test_skip_concat_over_pooling():
    model = get_pooling_model()
    graph = Graph(model, True)
    graph.to_concat_skip_model(6, 11)
    graph.to_concat_skip_model(6, 11)
    graph = Graph(graph.produce_model(), True)
    new_model = graph.produce_model()
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert np.sum(np.abs(output1 - output2)) < 4e-1
Пример #14
0
def test_skip_concat_over_pooling():
    model = get_pooling_model()
    graph = NetworkMorphismGraph(model)
    graph.to_concat_skip_model(graph.layer_to_id[model.layers[4]], graph.layer_to_id[model.layers[8]])
    graph.to_concat_skip_model(graph.layer_to_id[model.layers[4]], graph.layer_to_id[model.layers[8]])
    graph = NetworkMorphismGraph(graph.produce_model())
    new_model = graph.produce_model()
    input_data = get_conv_data()

    output1 = model.predict_on_batch(input_data).flatten()
    output2 = new_model.predict_on_batch(input_data).flatten()

    assert np.sum(np.abs(output1 - output2)) < 4e-1
Пример #15
0
def test_dense_deeper():
    graph = get_conv_dense_model()
    model = graph.produce_model()
    graph = deepcopy(graph)
    graph.to_dense_deeper_model(10)
    new_model = graph.produce_model()
    input_data = torch.Tensor(get_conv_data())

    model.eval()
    new_model.eval()
    output1 = model(input_data)
    output2 = new_model(input_data)

    assert (output1 - output2).abs().sum() < 1e-3
Пример #16
0
def test_dense_deeper():
    graph = get_conv_dense_model()
    model = graph.produce_model()
    graph = deepcopy(graph)
    graph.to_dense_deeper_model(9)
    new_model = graph.produce_model()
    input_data = torch.Tensor(get_conv_data())

    model.eval()
    new_model.eval()
    output1 = model(input_data)
    output2 = new_model(input_data)

    assert (output1 - output2).abs().sum() < 1e-3
Пример #17
0
def test_skip_add_over_pooling():
    graph = get_pooling_model()
    model = graph.produce_model()
    graph = deepcopy(graph)
    graph.to_add_skip_model(1, 10)
    new_model = graph.produce_model()
    input_data = torch.Tensor(get_conv_data())

    model.eval()
    new_model.eval()

    output1 = model(input_data)
    output2 = new_model(input_data)

    assert (output1 - output2).abs().sum() < 1e-4
Пример #18
0
def test_skip_add_over_pooling():
    graph = get_pooling_model()
    model = graph.produce_model()
    graph = deepcopy(graph)
    graph.to_add_skip_model(1, 8)
    new_model = graph.produce_model()
    input_data = torch.Tensor(get_conv_data())

    model.eval()
    new_model.eval()

    output1 = model(input_data)
    output2 = new_model(input_data)

    assert (output1 - output2).abs().sum() < 1e-4
Пример #19
0
def test_wider_conv():
    model = get_conv_model()

    layer1 = wider_pre_conv(model.layers[1], 3)
    layer2 = wider_bn(model.layers[2], 3, 3, 3)
    layer3 = wider_next_conv(model.layers[4], 3, 3, 3)

    input_tensor = Input(shape=(5, 5, 3))
    output_tensor = layer1(input_tensor)
    output_tensor = layer2(output_tensor)
    output_tensor = Activation('relu')(output_tensor)
    output_tensor = layer3(output_tensor)
    output_tensor = BatchNormalization()(output_tensor)
    output_tensor = Activation('relu')(output_tensor)
    model2 = Model(inputs=input_tensor, outputs=output_tensor)

    random_input = get_conv_data()
    output1 = model.predict_on_batch(random_input)
    output2 = model2.predict_on_batch(random_input)
    assert np.sum(np.abs(output1.flatten() - output2.flatten())) < 1e-1
Пример #20
0
def test_default_transform():
    graphs = default_transform(CnnGenerator(10, (32, 32, 3)).generate())
    model = graphs[0].produce_model()
    model(torch.Tensor(get_conv_data()))
    assert len(graphs) == 1
    assert len(graphs[0].layer_list) == 43