def bias_correction_analytical_and_empirical():

    dataset_size = 2000
    batch_size = 64

    data_loader = create_fake_data_loader(dataset_size=dataset_size,
                                          batch_size=batch_size,
                                          image_size=(3, 224, 224))

    model = MobileNetV2()
    model.eval()

    # Find all BN + Conv pairs for analytical BC and remaining Conv for Empirical BC
    module_prop_dict = bias_correction.find_all_conv_bn_with_activation(
        model, input_shape=(1, 3, 224, 224))

    params = QuantParams(weight_bw=4,
                         act_bw=4,
                         round_mode="nearest",
                         quant_scheme='tf_enhanced')

    # Perform Bias Correction
    bias_correction.correct_bias(model.to(device="cuda"),
                                 params,
                                 num_quant_samples=1000,
                                 data_loader=data_loader,
                                 num_bias_correct_samples=512,
                                 conv_bn_dict=module_prop_dict,
                                 perform_only_empirical_bias_corr=False)
    def test_cross_layer_equalization_mobilenet_v2(self):
        torch.manual_seed(10)

        model = MobileNetV2().to(torch.device('cpu'))
        print(model)

        model = model.eval()
        equalize_model(model, (1, 3, 224, 224))
    def test_cross_layer_equalization_mobilenet_v2_visualize_after_optimization(
            self):
        bokeh_visualizations_url, process = start_bokeh_server_session(8006)
        torch.manual_seed(10)
        model = MobileNetV2().to(torch.device('cpu'))
        bokeh_session = BokehServerSession(bokeh_visualizations_url,
                                           session_id="cle")
        model = model.eval()
        model_copy = copy.deepcopy(model)

        # model_copy_again = copy.deepcopy(model)
        batch_norm_fold.fold_all_batch_norms(model_copy, (1, 3, 224, 224))
        equalize_model(model, (1, 3, 224, 224))
        visualize_model.visualize_changes_after_optimization(
            model_copy, model, bokeh_visualizations_url)
        bokeh_session.server_session.close("test complete")
        os.killpg(os.getpgid(process.pid), signal.SIGTERM)
def cross_layer_equalization_depthwise_layers():
    model = MobileNetV2().to(torch.device('cpu'))
    model.eval()
    # Batch Norm Fold
    # Create a list of conv/linear and BN layers for folding forward or backward
    layer_list = [(model.features[0][0], model.features[0][1]),
                  (model.features[1].conv[0], model.features[1].conv[1]),
                  (model.features[1].conv[3], model.features[1].conv[4])]

    # Save the corresponding BN layers (needed only for high bias folding)
    bn_dict = {}
    for conv_bn in layer_list:
        bn_dict[conv_bn[0]] = conv_bn[1]

    batch_norm_fold.fold_given_batch_norms(model, layer_list)

    # Replace any ReLU6 layers with ReLU
    utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU6,
                                              torch.nn.ReLU)

    # Cross Layer Scaling
    # Create a list of consecutive conv layers to be equalized
    consecutive_layer_list = [(model.features[0][0], model.features[1].conv[0],
                               model.features[1].conv[3])]
    scaling_factor_list = cross_layer_equalization.CrossLayerScaling.scale_cls_sets(
        consecutive_layer_list)

    # High Bias Fold
    # Create a list of consecutive conv layers whose previous layers bias has to be folded to next layers bias
    ClsSetInfo = cross_layer_equalization.ClsSetInfo
    ClsPairInfo = cross_layer_equalization.ClsSetInfo.ClsSetLayerPairInfo
    cls_set_info_list = [
        ClsSetInfo(
            ClsPairInfo(model.features[0][0], model.features[1].conv[0],
                        scaling_factor_list[0][0], True)),
        ClsSetInfo(
            ClsPairInfo(model.features[1].conv[0], model.features[1].conv[3],
                        scaling_factor_list[0][1], True))
    ]

    cross_layer_equalization.HighBiasFold.bias_fold(cls_set_info_list, bn_dict)
def bias_correction_empirical():
    dataset_size = 2000
    batch_size = 64

    data_loader = create_fake_data_loader(dataset_size=dataset_size,
                                          batch_size=batch_size,
                                          image_size=(3, 224, 224))

    model = MobileNetV2()
    model.eval()

    params = QuantParams(weight_bw=4,
                         act_bw=4,
                         round_mode="nearest",
                         quant_scheme='tf_enhanced')

    # Perform Bias Correction
    bias_correction.correct_bias(model.to(device="cuda"),
                                 params,
                                 num_quant_samples=1000,
                                 data_loader=data_loader.train_loader,
                                 num_bias_correct_samples=512)
Ejemplo n.º 6
0
    def test_bias_correction_hybrid(self):

        torch.manual_seed(10)

        model = MobileNetV2().to(torch.device('cpu'))
        model.eval()
        module_prop_list = aimet_torch.bias_correction.find_all_conv_bn_with_activation(
            model, input_shape=(1, 3, 224, 224))
        batch_norm_fold.fold_all_batch_norms(model, (1, 3, 224, 224))
        model_copy = copy.deepcopy(model)
        model.eval()
        model_copy.eval()

        image_dir = './data/tiny-imagenet-200'
        image_size = 224
        batch_size = 1
        num_workers = 1

        data_loader = ImageNetDataLoader(image_dir, image_size, batch_size,
                                         num_workers)
        params = QuantParams(weight_bw=4,
                             act_bw=4,
                             round_mode="nearest",
                             quant_scheme=QuantScheme.post_training_tf)

        bias_correction.correct_bias(model.to(device="cuda"), params, 1,
                                     data_loader.train_loader, 1,
                                     module_prop_list, False)

        assert (np.allclose(
            model.features[0][0].bias.detach().cpu().numpy(),
            model_copy.features[0][0].bias.detach().cpu().numpy()))

        assert (np.allclose(
            model.features[1].conv[0].bias.detach().cpu().numpy(),
            model_copy.features[1].conv[0].bias.detach().cpu().numpy()))

        # To check if wrappers got removed
        assert (isinstance(model.features[11].conv[0], nn.Conv2d))