def test_output_quantization(_case_config): model = test_models.UNet() input_shape = (1, 3, 360, 480) config = get_basic_quantization_config(_case_config.quant_type, input_sample_size=input_shape) config["compression"].update({"quantize_outputs": True}) compressed_model, _ = create_compressed_model_and_algo_for_test(model, config) check_model_graph(compressed_model, 'unet_qoutput.dot', _case_config.graph_dir)
def test_output_quantization(_quantize_config): net = test_models.UNet() ctx = reset_context('orig') ctx = reset_context('quantized_graphs') input_shape = (1, 3, 360, 480) qnet = QuantizedNetwork(net, _quantize_config.quantizer, [ModelInputInfo(input_shape), ], quantize_outputs=True) _ = qnet(torch.zeros(*input_shape)) _ = qnet(torch.zeros(*input_shape)) check_graph(ctx.graph, 'unet_qoutput.dot', _quantize_config.graph_dir)
def test_output_quantization(_quantize_config): net = test_models.UNet() ctx = reset_context('orig') ctx = reset_context('quantized_graphs') input_shape = (1, 3, 360, 480) qnet = QuantizedNetwork(net, _quantize_config.quantizer, input_shape, dummy_forward_fn=create_dummy_forward_fn(input_shape), quantize_outputs=True) _ = qnet(torch.zeros(*input_shape)) _ = qnet(torch.zeros(*input_shape)) check_graph(to_networkx(ctx), 'unet_qoutput.dot', _quantize_config.graph_dir)
def test_output_quantization(_case_config): # TODO: Add support "quantize_outputs" option in propagation mode. pytest.skip() model = test_models.UNet() input_shape = [1, 3, 360, 480] config = get_basic_quantization_config(_case_config.quant_type, input_sample_sizes=input_shape) config["compression"].update({"quantize_outputs": True}) compressed_model, _ = create_compressed_model_and_algo_for_test( model, config) check_model_graph(compressed_model, 'unet_qoutput.dot', _case_config.graph_dir)