コード例 #1
0
ファイル: test_graph.py プロジェクト: terfendail/openvino
def test_build_quantization_graph(tmp_path, models, model_name, model_framework, target_device):
    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params, target_device=target_device)

    if target_device == 'GNA':
        hardware_config = HardwareConfig.from_json(GNA_CONFIG_PATH.as_posix())
    else:
        hardware_config = HardwareConfig.from_json(CPU_CONFIG_PATH.as_posix())

    quantization_model = GraphTransformer(hardware_config).insert_fake_quantize(model)

    check_model(tmp_path, quantization_model, model_name, model_framework)
コード例 #2
0
def test_multibranch_propagation_with_fq_moving():
    TEST_CASES_PATH = TEST_ROOT / 'data' / 'test_cases_refs'
    model_path = (TEST_CASES_PATH /
                  'test_ig_border_case_with_fq_moving.xml').as_posix()
    weights_path = (TEST_CASES_PATH /
                    'test_ig_border_case_with_fq_moving.bin').as_posix()

    ignored_params = {
        "scope": [
            '8/WithoutBiases', '9/WithoutBiases', '10/WithoutBiases',
            '11/WithoutBiases'
        ]
    }

    config = Dict({'model': model_path, 'weights': weights_path})
    model = load_model(config)

    hardware_config = HardwareConfig.from_json(
        (HARDWARE_CONFIG_PATH / 'cpu.json').as_posix())
    quantized_model = GraphTransformer(hardware_config).insert_fake_quantize(
        model, ignored_params)

    node = get_node_by_name(quantized_model, '14')
    for node_input in get_node_inputs(node)[:2]:
        assert node_input.type == 'FakeQuantize'
    assert get_node_inputs(node)[2].type == 'Concat'

    node = get_node_by_name(quantized_model, '12')
    for node_input in get_node_inputs(node)[:2]:
        assert node_input.type == 'FakeQuantize'

    assert len(get_nodes_by_type(quantized_model, ['FakeQuantize'])) == 6
コード例 #3
0
def test_cutting_fq_layers(_params, tmp_path, models):
    model_name, model_framework, node_list = _params
    model = models.get(model_name, model_framework, tmp_path)
    hardware_config = HardwareConfig.from_json(CPU_CONFIG_PATH.as_posix())
    graph_transformer = GraphTransformer(hardware_config)

    cut_fq_node(model, node_list, graph_transformer, tmp_path)
コード例 #4
0
ファイル: test_graph.py プロジェクト: terfendail/openvino
def test_build_quantization_graph_with_ignored_blocks(tmp_path, models, model_name, model_framework):
    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)
    hardware_config = HardwareConfig.from_json(CPU_CONFIG_PATH.as_posix())
    quantization_model = GraphTransformer(hardware_config).insert_fake_quantize(model)

    check_model(tmp_path, quantization_model, model_name + '_ig_pt', model_framework)
コード例 #5
0
ファイル: test_graph.py プロジェクト: terfendail/openvino
def test_build_quantization_graph_with_ignored_agnostic_params(
        tmp_path, models, model_name, model_framework):
    if model_name in CASCADE_MAP:
        model = models.get_cascade(model_name, model_framework, tmp_path, CASCADE_MAP[model_name])
    else:
        model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)
    hardware_config = HardwareConfig.from_json(CPU_CONFIG_PATH.as_posix())
    if model_name not in CASCADE_MAP:
        ignored_params = {
            'scope': [],
            'operations': [{'type': 'MaxPool'},
                           {'type': 'Reshape'}]
        }

    if model_name == 'mtcnn':
        ignored_params = {
            'pnet': {'scope': [], 'operations': [{'type': 'MaxPool'}]},
            'rnet': {'skip_model': True, 'scope': [], 'operations': [{'type': 'MaxPool'}]},
            'onet': {'scope': [], 'operations': [{'type': 'MaxPool'}]}
        }

    quantization_model = GraphTransformer(hardware_config).insert_fake_quantize(model, ignored_params)

    for model_dict in quantization_model.models:
        model = model_dict['model']
        dict_ignored_operation_model = ignored_params[model_dict['name']]['operations'] \
            if quantization_model.is_cascade else ignored_params['operations']
        ignored_params_operation = [op['type'] for op in dict_ignored_operation_model]
        for node in model.get_op_nodes():
            if node.type in ignored_params_operation:
                parent_type = [str(n.type) for n in nu.get_node_inputs(node) if n is not None]
                assert 'FakeQuantize' not in parent_type
コード例 #6
0
def test_build_quantization_graph_with_ignored_params(tmp_path, models,
                                                      model_name,
                                                      model_framework):
    if model_name in CASCADE_MAP:
        model = models.get_cascade(model_name, model_framework, tmp_path,
                                   CASCADE_MAP[model_name])
    else:
        model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)
    hardware_config = HardwareConfig.from_json(CPU_CONFIG_PATH.as_posix())

    if model_name not in CASCADE_MAP:
        ignored_params = {
            'operations': [{
                'type': 'Add',
            }, {
                'type': 'Convolution',
                'attributes': {
                    'output': 1280,
                    'group': 1
                }
            }]
        }

    if model_name == 'resnet_example':
        ignored_params['scope'] = [
            'Conv_11/WithoutBiases', 'Conv_29/WithoutBiases'
        ]
    elif model_name == 'googlenet_example':
        node_name = 'Conv_10/WithoutBiases'
        ignored_params['scope'] = [node_name]
    elif model_name == 'mtcnn':
        ignored_params = {
            'pnet': {
                'scope': ['conv1/WithoutBiases', 'conv3/WithoutBiases']
            },
            'rnet': {
                'skip_model': True
            },
            'onet': {
                'operations': [{
                    'type': 'MatMul'
                }]
            }
        }

    quantization_model = GraphTransformer(
        hardware_config).insert_fake_quantize(model, ignored_params)

    print(len(get_nodes_by_type(quantization_model, ['FakeQuantize'])))
    check_model(tmp_path, quantization_model, model_name + '_ig_params',
                model_framework)
コード例 #7
0
ファイル: test_graph.py プロジェクト: sesorov/openvino
def test_multibranch_propagation_without_fq_moving(tmp_path, models, model_name, model_framework):
    ignored_params = {
        "scope": ['Convolution_104', 'Convolution_152', 'Convolution_8', 'Convolution_56']
    }

    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)

    hardware_config = HardwareConfig.from_json((HARDWARE_CONFIG_PATH / 'cpu.json').as_posix())
    quantized_model = GraphTransformer(hardware_config).insert_fake_quantize(model, ignored_params)

    node = get_node_by_name(quantized_model, 'Convolution_201')
    for node_input in get_node_inputs(node)[:2]:
        assert node_input.type == 'FakeQuantize'
    assert len(get_nodes_by_type(quantized_model, ['FakeQuantize'])) == 2
コード例 #8
0
def test_per_channel_activations_for_depthwise(tmp_path, models, model_name,
                                               model_framework,
                                               hardware_config_path):
    model = models.get(model_name, model_framework, tmp_path)
    model = load_model(model.model_params)
    hardware_config = HardwareConfig.from_json(hardware_config_path.as_posix())
    model = GraphTransformer(hardware_config).insert_fake_quantize(model)
    fq_configurations = read_all_fake_quantize_configurations(
        ALGORITHM_CONFIG, hardware_config, model)
    ALGORITHM_CONFIG.preset = ALGORITHM_CONFIG.params.preset
    ALGORITHM_CONFIG.target_device = ALGORITHM_CONFIG.params.target_device
    fq_configuration = get_configurations_by_preset(ALGORITHM_CONFIG, model,
                                                    fq_configurations)
    fq_dw_names = [
        'Conv_4/WithoutBiases/fq_input_0', 'Conv_13/WithoutBiases/fq_input_0',
        'Conv_22/WithoutBiases/fq_input_0', 'Conv_32/WithoutBiases/fq_input_0',
        'Conv_41/WithoutBiases/fq_input_0', 'Conv_51/WithoutBiases/fq_input_0',
        'Conv_61/WithoutBiases/fq_input_0', 'Conv_70/WithoutBiases/fq_input_0',
        'Conv_80/WithoutBiases/fq_input_0', 'Conv_90/WithoutBiases/fq_input_0',
        'Conv_100/WithoutBiases/fq_input_0',
        'Conv_109/WithoutBiases/fq_input_0',
        'Conv_119/WithoutBiases/fq_input_0',
        'Conv_129/WithoutBiases/fq_input_0',
        'Conv_138/WithoutBiases/fq_input_0',
        'Conv_148/WithoutBiases/fq_input_0',
        'Conv_158/WithoutBiases/fq_input_0'
    ]
    dw_config = None
    for config_by_type in hardware_config:
        if config_by_type['type'] == 'DepthWiseConvolution':
            dw_config = config_by_type['quantization']['activations'][0]

    if not dw_config:
        raise Exception('DepthWise missing at hardware configuration')

    save_model(model, tmp_path.as_posix(), model_name)

    for fq_name in fq_configuration:
        if fq_name in fq_dw_names:
            fq_config = fq_configuration[fq_name]['activations']
            assert fq_config == dw_config
コード例 #9
0
def test_load_hardware_config(hw_config_name):
    hw_config_path = HARDWARE_CONFIG_PATH.joinpath(hw_config_name).as_posix()
    hw_config = HardwareConfig.from_json(hw_config_path)
    check_hardware_config(hw_config, hw_config_name)