def pose_estimation_quanteval(args):
    # load the model checkpoint from meta
    model_builder = ModelBuilder()
    model_builder.create_model()
    model = model_builder.model

    state_dict = torch.load(args.model_dir)
    state = model.state_dict()
    state.update(state_dict)

    model.load_state_dict(state)

    # create quantsim object which inserts quant ops between layers
    sim = quantsim.QuantizationSimModel(model,
                                        input_shapes=(1, 3, 128, 128),
                                        quant_scheme=args.quant_scheme)

    evaluate = partial(evaluate_model,
                       num_imgs=500
                       )
    sim.compute_encodings(evaluate, args.coco_path)

    eval_num = evaluate_model(sim.model,
                              args.coco_path
                              )
    print(f'The [mAP, mAR] results are: {eval_num}')
Пример #2
0
    def test_get_quantized_weight(self):
        model = mnist_model.Net()

        params = qsim.QuantParams(weight_bw=4,
                                  act_bw=4,
                                  round_mode="nearest",
                                  quant_scheme=QuantScheme.post_training_tf)
        use_cuda = False
        dataset_size = 2
        batch_size = 1
        data_loader = create_fake_data_loader(dataset_size=dataset_size,
                                              batch_size=batch_size)

        def pass_data_through_model(model,
                                    early_stopping_iterations=None,
                                    use_cuda=False):
            # forward pass for given number of batches for model
            for _, (images_in_one_batch, _) in enumerate(data_loader):
                model(images_in_one_batch)

        quantsim = qsim.QuantizationSimModel(model=model,
                                             quant_scheme=params.quant_scheme,
                                             rounding_mode=params.round_mode,
                                             default_output_bw=params.act_bw,
                                             default_param_bw=params.weight_bw,
                                             in_place=False,
                                             dummy_input=torch.rand(
                                                 1, 1, 28, 28))
        quantsim.compute_encodings(pass_data_through_model, None)
        layer = quantsim.model.conv2
        quant_dequant_weights = bias_correction.get_quantized_dequantized_weight(
            layer, use_cuda)
        self.assertEqual(quant_dequant_weights.shape,
                         torch.Size([64, 32, 5, 5]))
Пример #3
0
def main(args):
    # parse the options file
    print(f'Parsing file {args.options_file}...')
    opt = option.parse(args.options_file, is_train=False)
    opt = option.dict_to_nonedict(opt)

    print('Loading test images...')
    test_loaders = []
    for phase, dataset_opt in sorted(opt['datasets'].items()):
        test_set = create_dataset(dataset_opt)
        test_loader = create_dataloader(test_set, dataset_opt)
        test_loaders.append(test_loader)

    model = create_model(opt)
    generator = model.netG.module

    for test_loader in test_loaders:
        test_set_name = test_loader.dataset.opt['name']
        print(f'Testing on dataset {test_set_name}')
        psnr_vals, ssim_vals = evaluate_generator(generator, test_loader, opt)
        psnr_val = np.mean(psnr_vals)
        ssim_val = np.mean(ssim_vals)
        print(
            f'Mean PSNR and SSIM for {test_set_name} on original model are: [{psnr_val}, {ssim_val}]'
        )

    # The input shape is chosen arbitrarily to generate dummy input for creating quantsim object
    input_shapes = (1, 3, 24, 24)
    sim = quantsim.QuantizationSimModel(
        generator,
        input_shapes=input_shapes,
        quant_scheme=args.quant_scheme,
        default_output_bw=args.default_output_bw,
        default_param_bw=args.default_param_bw)

    evaluate_func = partial(evaluate_generator, options=opt)
    sim.compute_encodings(evaluate_func, test_loaders[0])

    for test_loader in test_loaders:
        test_set_name = test_loader.dataset.opt['name']
        print(f'Testing on dataset {test_set_name}')
        psnr_vals, ssim_vals = evaluate_generator(sim.model,
                                                  test_loader,
                                                  opt,
                                                  output_dir=args.output_dir)
        psnr_val = np.mean(psnr_vals)
        ssim_val = np.mean(ssim_vals)
        print(
            f'Mean PSNR and SSIM for {test_set_name} on quantized model are: [{psnr_val}, {ssim_val}]'
        )
Пример #4
0
def correct_bias(model: torch.nn.Module,
                 quant_params: qsim.QuantParams,
                 num_quant_samples: int,
                 data_loader,
                 num_bias_correct_samples: int,
                 conv_bn_dict: Union[Dict[torch.nn.Module, ConvBnInfoType],
                                     None] = None,
                 perform_only_empirical_bias_corr: bool = True,
                 layers_to_ignore: List[torch.nn.Module] = None):
    """
    Corrects bias for each Conv layer of model (unless ignored). A combination of Analytical and Empirical Bias
    Correction is used i.e. all the layers which can be corrected using Analytical Bias Correction are corrected
    using Analytical Bias Correction and remaining layers are corrected using Empirical method.

    Returns an in-place corrected floating point model

    :param model: Model to be corrected
    :param quant_params: Named tuple for quantization simulation for bias correction
    :param num_quant_samples: number of samples of images to pass through quantization sim for bias correction.
    :param data_loader: data loader for the model
    :param num_bias_correct_samples: number of samples for Bias correction
    :param conv_bn_dict: Dict of conv and bn with information related to activation. If None, the function calc it
    :param perform_only_empirical_bias_corr: Default True. If true will perform only empirical Bias Corr for all layers
           irrespective of the fact that layer is eligible for Analytical Bias Corr.
    :param layers_to_ignore: list of layer names for which we need to skip bias correction.

    """

    if layers_to_ignore is None:
        layers_to_ignore = []

    # Find batch size and shape of input tensor
    batch_size, input_shape = utils.get_input_shape_batch_size(data_loader)

    # Rounding up number of samples to batch size
    n_batches_bias_correction = int(
        np.ceil(num_bias_correct_samples / batch_size))
    n_batches_quantization = int(np.ceil(num_quant_samples / batch_size))

    data_loader_n_samples_bias_corr = utils.IterFirstX(
        data_loader, n_batches_bias_correction)
    data_loader_n_samples_quant = utils.IterFirstX(data_loader,
                                                   n_batches_quantization)

    # TODO: Remove wrapper function
    # Create a wrapping function for data loader for quantization
    def pass_data_through_model(model,
                                early_stopping_iterations=None,
                                use_cuda=False):
        # pylint: disable=unused-argument
        # forward pass for given number of batches for model
        for (images_in_one_batch, _) in data_loader_n_samples_quant:
            forward_pass(model, images_in_one_batch)

    ordered_conv_linear_nodes = get_ordered_lists_of_conv_fc(
        model, input_shape)

    if conv_bn_dict is None:
        conv_bn_dict = find_all_conv_bn_with_activation(model, input_shape)

    # Create a copy of the model as reference model
    model_copy = copy.deepcopy(model)

    # Add bias for all the layers whose bias is None
    for name, module in ordered_conv_linear_nodes:
        if module.bias is None:
            if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
                output_size = module.out_channels
            elif isinstance(module, torch.nn.Linear):
                output_size = module.out_features
            module.bias = torch.nn.Parameter(torch.zeros(output_size))
            module.bias.data = module.bias.data.to(device=module.weight.device)

    # Quantize full model
    dummy_tensors = utils.create_rand_tensors_given_shapes(input_shape)
    dummy_tensors = [
        tensor.to(utils.get_device(model)) for tensor in dummy_tensors
    ]
    q = qsim.QuantizationSimModel(model=model,
                                  quant_scheme=quant_params.quant_scheme,
                                  rounding_mode=quant_params.round_mode,
                                  default_output_bw=quant_params.act_bw,
                                  default_param_bw=quant_params.weight_bw,
                                  in_place=True,
                                  dummy_input=dummy_tensors,
                                  config_file=quant_params.config_file)

    # make sure  model got updated in-place before we use it for bc updates
    assert (q.model is model)

    # updates to skip_output_activation and layers_to_ignore
    for name, module in model.named_modules():
        # Skip all layer's output quantization
        if isinstance(module, QcQuantizeWrapper):
            module.output_quantizers[0].enabled = False

    q.compute_encodings(pass_data_through_model, None)

    # For first conv layer, perform analytical bc if perform_only_empirical_bias_corr is set to False
    # and layer is not marked to be ignored during bc.
    if not perform_only_empirical_bias_corr:
        module_name, module = ordered_conv_linear_nodes[0]
        if module not in layers_to_ignore:
            logger.info('Correcting layer %s using Analytical Bias Correction',
                        module_name)
            quantize_layer = utils.get_layer_by_name(model, module_name)
            call_analytical_mo_correct_bias(quantize_layer, None, None)
            logger.info('Corrected bias for the layer')
            ordered_conv_linear_nodes.pop(0)

    for module_name, module in ordered_conv_linear_nodes:
        # Ignore all layers which are skipped by user
        if module in layers_to_ignore:
            continue
        else:
            # make sure module is in the model used by qsim.
            assert (module in list(q.model.modules()))
            # Analytical Bias Correction is only done for Conv layers
            reference_layer = utils.get_layer_by_name(model_copy, module_name)
            quantize_layer = utils.get_layer_by_name(model, module_name)

            if module in conv_bn_dict.keys():

                bn_layer_info = conv_bn_dict[module]

                if perform_only_empirical_bias_corr or bn_layer_info is None or bn_layer_info.input_bn is None:
                    logger.info(
                        'Correcting layer %s using Empirical Bias Correction',
                        module_name)
                    bias_correction = libpymo.BiasCorrection()

                    # Get output from quantized model and reference model

                    for images_in_one_batch, _ in data_loader_n_samples_bias_corr:
                        reference_output_batch = get_output_data(
                            reference_layer, model_copy, images_in_one_batch)
                        quantized_model_output_batch = get_output_data(
                            quantize_layer, model, images_in_one_batch)

                        if isinstance(reference_layer, torch.nn.Linear):
                            extended_shape = np.concatenate(
                                (reference_output_batch.shape, np.array([1,
                                                                         1])))
                            reference_output_batch = reference_output_batch.reshape(
                                extended_shape)
                            quantized_model_output_batch = quantized_model_output_batch.reshape(
                                extended_shape)

                        bias_correction.storePreActivationOutput(
                            reference_output_batch)
                        bias_correction.storeQuantizedPreActivationOutput(
                            quantized_model_output_batch)

                    call_empirical_mo_correct_bias(module, bias_correction)

                else:
                    logger.info(
                        'Correcting layer %s using Analytical Bias Correction',
                        module_name)
                    call_analytical_mo_correct_bias(
                        quantize_layer, bn_layer_info.input_bn,
                        bn_layer_info.in_activation_type)

                logger.info('Corrected bias for the layer')

    SaveUtils.remove_quantization_wrappers(model)

    logger.info('Completed bias correction')