def equalize_model(model: torch.nn.Module, input_shapes: Union[Tuple,
                                                               List[Tuple]]):
    """
    High-level API to perform Cross-Layer Equalization (CLE) on the given model. The model is equalized in place.

    :param model: Model to equalize
    :param input_shapes: Shape of the input (can be a tuple or a list of tuples if multiple inputs)
    :return: None
    """

    device = get_device(model)
    model.cpu()

    # fold batchnorm layers
    folded_pairs = fold_all_batch_norms(model, input_shapes)
    bn_dict = {}
    for conv_bn in folded_pairs:
        bn_dict[conv_bn[0]] = conv_bn[1]

    # replace any ReLU6 layers with ReLU
    utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU6,
                                              torch.nn.ReLU)

    # perform cross-layer scaling on applicable layer sets
    cls_set_info_list = CrossLayerScaling.scale_model(model, input_shapes)

    # high-bias fold
    HighBiasFold.bias_fold(cls_set_info_list, bn_dict)
    model.to(device=device)
Exemplo n.º 2
0
def main():
    args = arguments()
    seed(args)

    model = DeepLab(backbone='mobilenet',
                    output_stride=16,
                    num_classes=21,
                    sync_bn=False)
    model.eval()

    from aimet_torch import batch_norm_fold
    from aimet_torch import utils
    args.input_shape = (1, 3, 513, 513)
    batch_norm_fold.fold_all_batch_norms(model, args.input_shape)
    utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU6,
                                              torch.nn.ReLU)

    if args.checkpoint_path:
        model.load_state_dict(torch.load(args.checkpoint_path))
    else:
        raise ValueError('checkpoint path {} must be specified'.format(
            args.checkpoint_path))

    data_loader_kwargs = {'worker_init_fn': work_init, 'num_workers': 0}
    train_loader, val_loader, test_loader, num_class = make_data_loader(
        args, **data_loader_kwargs)
    eval_func_quant = model_eval(args, val_loader)
    eval_func = model_eval(args, val_loader)

    from aimet_common.defs import QuantScheme
    from aimet_torch.quantsim import QuantizationSimModel
    if hasattr(args, 'quant_scheme'):
        if args.quant_scheme == 'range_learning_tf':
            quant_scheme = QuantScheme.training_range_learning_with_tf_init
        elif args.quant_scheme == 'range_learning_tfe':
            quant_scheme = QuantScheme.training_range_learning_with_tf_enhanced_init
        elif args.quant_scheme == 'tf':
            quant_scheme = QuantScheme.post_training_tf
        elif args.quant_scheme == 'tf_enhanced':
            quant_scheme = QuantScheme.post_training_tf_enhanced
        else:
            raise ValueError("Got unrecognized quant_scheme: " +
                             args.quant_scheme)
        kwargs = {
            'quant_scheme': quant_scheme,
            'default_param_bw': args.default_param_bw,
            'default_output_bw': args.default_output_bw,
            'config_file': args.config_file
        }
    print(kwargs)
    sim = QuantizationSimModel(model.cpu(),
                               input_shapes=args.input_shape,
                               **kwargs)
    sim.compute_encodings(eval_func_quant, (1024, True))
    post_quant_top1 = eval_func(sim.model.cuda(), (99999999, True))
    print("Post Quant mIoU :", post_quant_top1)
Exemplo n.º 3
0
    def test_replace_relu_with_relu6(self):
        model = torchvision.models.resnet18()
        model.eval()

        utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU,
                                                  torch.nn.ReLU6)

        # check - no ReLU modules left in the model anymore
        for module in model.modules():
            self.assertTrue(not isinstance(module, torch.nn.ReLU))

        # sanity-check: forward pass continues to work
        with torch.no_grad():
            x = torch.rand(1, 3, 224, 224)
            output = model(x)
def cross_layer_equalization_manual():
    model = models.resnet18(pretrained=True)

    model = model.eval()

    # Batch Norm Fold
    # Create a list of conv/linear and BN layers for folding forward or backward
    layer_list = [(model.conv1, model.bn1),
                  (model.layer1[0].conv1, model.layer1[0].bn1)]

    # Save the corresponding BN layers (needed only for high bias folding)
    bn_dict = {}
    for conv_bn in layer_list:
        bn_dict[conv_bn[0]] = conv_bn[1]

    batch_norm_fold.fold_given_batch_norms(model, layer_list)

    # Replace any ReLU6 layers with ReLU
    utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU6,
                                              torch.nn.ReLU)

    # Cross Layer Scaling
    # Create a list of consecutive conv layers to be equalized
    consecutive_layer_list = [(model.conv1, model.layer1[0].conv1),
                              (model.layer1[0].conv1, model.layer1[0].conv2)]

    scaling_factor_list = cross_layer_equalization.CrossLayerScaling.scale_cls_sets(
        consecutive_layer_list)

    # High Bias Fold
    # Create a list of consecutive conv layers whose previous layers bias has to be folded to next layers bias
    ClsSetInfo = cross_layer_equalization.ClsSetInfo
    ClsPairInfo = cross_layer_equalization.ClsSetInfo.ClsSetLayerPairInfo
    cls_set_info_list = [
        ClsSetInfo(
            ClsPairInfo(model.conv1, model.layer1[0].conv1,
                        scaling_factor_list[0], True)),
        ClsSetInfo(
            ClsPairInfo(model.layer1[0].conv1, model.layer1[0].conv2,
                        scaling_factor_list[1], True))
    ]

    cross_layer_equalization.HighBiasFold.bias_fold(cls_set_info_list, bn_dict)
def cross_layer_equalization_depthwise_layers():
    model = MobileNetV2().to(torch.device('cpu'))
    model.eval()
    # Batch Norm Fold
    # Create a list of conv/linear and BN layers for folding forward or backward
    layer_list = [(model.features[0][0], model.features[0][1]),
                  (model.features[1].conv[0], model.features[1].conv[1]),
                  (model.features[1].conv[3], model.features[1].conv[4])]

    # Save the corresponding BN layers (needed only for high bias folding)
    bn_dict = {}
    for conv_bn in layer_list:
        bn_dict[conv_bn[0]] = conv_bn[1]

    batch_norm_fold.fold_given_batch_norms(model, layer_list)

    # Replace any ReLU6 layers with ReLU
    utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU6,
                                              torch.nn.ReLU)

    # Cross Layer Scaling
    # Create a list of consecutive conv layers to be equalized
    consecutive_layer_list = [(model.features[0][0], model.features[1].conv[0],
                               model.features[1].conv[3])]
    scaling_factor_list = cross_layer_equalization.CrossLayerScaling.scale_cls_sets(
        consecutive_layer_list)

    # High Bias Fold
    # Create a list of consecutive conv layers whose previous layers bias has to be folded to next layers bias
    ClsSetInfo = cross_layer_equalization.ClsSetInfo
    ClsPairInfo = cross_layer_equalization.ClsSetInfo.ClsSetLayerPairInfo
    cls_set_info_list = [
        ClsSetInfo(
            ClsPairInfo(model.features[0][0], model.features[1].conv[0],
                        scaling_factor_list[0][0], True)),
        ClsSetInfo(
            ClsPairInfo(model.features[1].conv[0], model.features[1].conv[3],
                        scaling_factor_list[0][1], True))
    ]

    cross_layer_equalization.HighBiasFold.bias_fold(cls_set_info_list, bn_dict)
def cross_layer_equalization_auto_step_by_step():
    model = models.resnet18(pretrained=True)

    model = model.eval()
    input_shape = (1, 3, 224, 224)
    # Fold batchnorm layers
    folded_pairs = batch_norm_fold.fold_all_batch_norms(model, input_shape)
    bn_dict = {}
    for conv_bn in folded_pairs:
        bn_dict[conv_bn[0]] = conv_bn[1]

    # Replace any ReLU6 layers with ReLU
    utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU6,
                                              torch.nn.ReLU)

    # Perform cross-layer scaling on applicable layer sets
    cls_set_info_list = cross_layer_equalization.CrossLayerScaling.scale_model(
        model, input_shape)

    # Perform high-bias fold
    cross_layer_equalization.HighBiasFold.bias_fold(cls_set_info_list, bn_dict)