Exemplo n.º 1
0
def test_multibranch_propagation_with_fq_moving():
    TEST_CASES_PATH = TEST_ROOT / 'data' / 'test_cases_refs'
    model_path = (TEST_CASES_PATH /
                  'test_ig_border_case_with_fq_moving.xml').as_posix()
    weights_path = (TEST_CASES_PATH /
                    'test_ig_border_case_with_fq_moving.bin').as_posix()

    ignored_params = {
        "scope": [
            '8/WithoutBiases', '9/WithoutBiases', '10/WithoutBiases',
            '11/WithoutBiases'
        ]
    }

    config = Dict({'model': model_path, 'weights': weights_path})
    model = load_model(config)

    hardware_config = HardwareConfig.from_json(
        (HARDWARE_CONFIG_PATH / 'cpu.json').as_posix())
    quantized_model = GraphTransformer(hardware_config).insert_fake_quantize(
        model, ignored_params)

    node = get_node_by_name(quantized_model, '14')
    for node_input in get_node_inputs(node)[:2]:
        assert node_input.type == 'FakeQuantize'
    assert get_node_inputs(node)[2].type == 'Concat'

    node = get_node_by_name(quantized_model, '12')
    for node_input in get_node_inputs(node)[:2]:
        assert node_input.type == 'FakeQuantize'

    assert len(get_nodes_by_type(quantized_model, ['FakeQuantize'])) == 6
Exemplo n.º 2
0
def cut_fq_node(model, node_list, graph_transformer, tmp_path):
    model_ = load_model(model.model_params)
    quantized_model = graph_transformer.insert_fake_quantize(model_)
    cropped_model = quantized_model
    for node_name in node_list:
        node = get_node_by_name(cropped_model, node_name)
        for parent_node in nu.get_node_inputs(node):
            if parent_node and parent_node and parent_node.type == 'FakeQuantize':
                cropped_model, *_ = graph_transformer.remove_fq_nodes(quantized_model, [parent_node.name])
                break

    check_model(tmp_path, cropped_model, model.model_name + '_cut_fq', model.framework)
Exemplo n.º 3
0
def test_multibranch_propagation_with_fq_moving(tmp_path, models, model_name, model_framework):
    ignored_params = {
        "scope": ['Convolution_104', 'Convolution_152', 'Convolution_8', 'Convolution_56']
    }

    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)

    hardware_config = HardwareConfig.from_json((HARDWARE_CONFIG_PATH / 'cpu.json').as_posix())
    quantized_model = GraphTransformer(hardware_config).insert_fake_quantize(model, ignored_params)

    node = get_node_by_name(quantized_model, '14')
    for node_input in get_node_inputs(node)[:2]:
        assert node_input.type == 'FakeQuantize'
    assert get_node_inputs(node)[2].type == 'Concat'

    node = get_node_by_name(quantized_model, '12')
    for node_input in get_node_inputs(node)[:2]:
        assert node_input.type == 'FakeQuantize'

    assert len(get_nodes_by_type(quantized_model, ['FakeQuantize'])) == 6
Exemplo n.º 4
0
 def _test_unify_scales(model_, to_unify_):
     for _, fqs in to_unify_:
         ranges = []
         for fq in fqs:
             fq = get_node_by_name(model_, fq)
             fq_inputs = nu.get_node_inputs(fq)[1:]
             ranges.append(
                 tuple(
                     fqut.get_node_value(fq_input)
                     for fq_input in fq_inputs))
             assert all([
                 np.array_equal(r, ranges[0][i])
                 for i, r in enumerate(ranges[-1])
             ])