Ejemplo n.º 1
0
    def fuse_model(self, fuse_relu=False):
        """Fuse conv/bn and optinally relu modules in resnet models to prepare
         for quantization.
        Model is modified in place
        :param fuse_relu: Whether or not to fuse ReLU with Conv/Bn
        """
        # Fuse bottleneck layers
        for m in self.features.modules():
            if isinstance(m, (BasicBlock, Bottleneck)):
                m.fuse_model(fuse_relu=fuse_relu)

        modules_to_fuse = []

        # Fuse "stem" layer
        stem = self.features.stem
        if isinstance(stem, nn.Conv2d):
            modules_to_fuse.append("stem")
        else:
            conv_name = next(f"stem.{k}" for k, v in stem.named_modules()
                             if isinstance(v, nn.Conv2d))
            modules_to_fuse.append(conv_name)

        modules_to_fuse.append("bn_stem")

        if fuse_relu:
            act_stem = self.features.act_stem
            if isinstance(act_stem, nn.ReLU):
                modules_to_fuse.append("act_stem")
            else:
                act_name = next(f"act_stem.{k}" for k, v in act_stem.named_module()
                                if isinstance(v, nn.ReLU))
                modules_to_fuse.append(act_name)

        fuse_modules(self.features, modules_to_fuse, inplace=True)
Ejemplo n.º 2
0
    def fuse_model(self, fuse_relu=False):
        """
        Fuse Conv, BatchNorm and optionally ReLU
        :param fuse_relu: Whether or not to fuse ReLU with Conv/Bn
        """
        types_to_fuse = [nn.Conv2d, nn.BatchNorm2d]
        if fuse_relu:
            types_to_fuse.append(nn.ReLU)

        # Create a list with all Conv2d, BatchNorm2d and ReLU submodule names
        submodule_names = [
            f"regular_path.{k}" for k, v in self.regular_path.named_modules()
            if isinstance(v, tuple(types_to_fuse))
        ]

        # Break the list into groups of 2 or 3 (cnn, bn, relu)
        group = 3 if fuse_relu else 2
        modules_to_fuse = list(map(list, zip_longest(*[iter(submodule_names)] * group)))

        # 2rd Layer has no ReLU. Remove empty entry
        if fuse_relu:
            modules_to_fuse[1].pop(-1)

        # Collect shortcut Conv2d and BatchNorm2d submodule names
        cnn_bn = [
            f"shortcut.{k}" for k, v in self.shortcut.named_modules()
            if isinstance(v, (nn.Conv2d, nn.BatchNorm2d))
        ]
        if len(cnn_bn) > 0:
            modules_to_fuse.append(cnn_bn)

        fuse_modules(self, modules_to_fuse=modules_to_fuse, inplace=True)
Ejemplo n.º 3
0
 def fuse_model(self):
     for m in self.modules():
         if type(m) == Micro_Conv:
             fuse_modules(m,
                          ['micro_conv.2', 'micro_conv.3', 'micro_conv.4'],
                          inplace=True)
     return self
Ejemplo n.º 4
0
 def fuse_model(self) -> None:
     """Fuse modules and create intrinsic opterators."""
     for module in self.modules():
         if type(module) is ConvBNReLU:
             fuse_modules(module, ["conv", "bn", "relu"], inplace=True)
         if type(module) is ConvBN:
             fuse_modules(module, ["conv", "bn"], inplace=True)
Ejemplo n.º 5
0
 def test_fusion_sequential_model_eval(self):
     model = ModelWithSequentialFusion().eval()
     model.to(torch.float)
     fuse_modules(model, [['conv1', 'relu1'] ,
                          ['features.0.0', 'features.0.1', 'features.0.2'],
                          ['features.1.0', 'features.1.1', 'features.1.2'],
                          ['features.2.0', 'features.2.1', 'features.2.2'],
                          ['classifier.0', 'classifier.1']], inplace=True)
     self.assertEqual(type(model.conv1), nni.ConvReLU2d,
                      "Fused Conv + Relu: nni.ConvReLU2d")
     self.assertEqual(type(model.conv1[0]), nn.Conv2d,
                      "Fused Conv + Relu: Conv2d")
     self.assertEqual(type(model.conv1[1]), nn.ReLU,
                      "Fused Conv + Relu: Relu")
     self.assertEqual(type(model.relu1), nn.Identity,
                      "Fused Conv + Relu: Identity")
     for i in range(3):
         self.assertEqual(type(model.features[i][0]), nni.ConvReLU2d,
                          "Fused submodule Conv + folded BN")
         self.assertEqual(type(model.features[i][1]), nn.Identity,
                          "Fused submodule (skipped BN)")
         self.assertEqual(type(model.features[i][2]), nn.Identity,
                          "Non-fused submodule Conv")
     self.assertEqual(type(model.classifier[0]), nni.LinearReLU)
     self.assertEqual(type(model.classifier[1]), nn.Identity)
     model.qconfig = default_qconfig
     prepare(model, inplace=True)
     self.checkObservers(model)
     model(self.img_data[0][0])
     convert(model, inplace=True)
     model(self.img_data[1][0])
     self.checkModelWithSequentialQuantized(model)
Ejemplo n.º 6
0
    def test_fuse_module_eval(self):
        import torch.nn._intrinsic.modules.fused as torch_fused
        testMod = ModForFusion()
        testMod.eval()
        fuse_modules(testMod,
                     [['conv1', 'bn1', 'relu1'], ['sub1.conv', 'sub1.bn']])
        self.assertEqual(type(testMod.conv1), torch_fused.ConvReLU2d,
                         "Fused Conv + BN + Relu first layer (BN is folded)")
        self.assertEqual(type(testMod.conv1[0]), torch.nn.Conv2d,
                         "Fused Conv + BN + Relu (Conv + folded BN only)")
        self.assertEqual(type(testMod.conv1[1]), torch.nn.ReLU,
                         "Fused Conv + BN + Relu second layer (Relu only)")
        self.assertEqual(type(testMod.bn1), torch.nn.Identity,
                         "Fused Conv + BN + Relu second layer (Skipped BN)")
        self.assertEqual(type(testMod.relu1), torch.nn.Identity,
                         "Fused Conv + BN + Relu second layer (Skipped Relu)")

        self.assertEqual(type(testMod.sub1.conv), torch.nn.Conv2d,
                         "Fused submodule Conv + folded BN")
        self.assertEqual(type(testMod.sub1.bn), torch.nn.Identity,
                         "Fused submodule (skipped BN)")
        self.assertEqual(type(testMod.sub2.conv), torch.nn.Conv2d,
                         "Non-fused submodule Conv")
        self.assertEqual(type(testMod.sub2.bn), torch.nn.BatchNorm2d,
                         "Non-fused submodule BN")
Ejemplo n.º 7
0
 def fuse_model(self):
     fuse_modules(self,
                  [['conv1', 'bn1', 'relu1'], ['conv2', 'bn2', 'relu2'],
                   ['conv3', 'bn3']],
                  inplace=True)
     if self.downsample:
         torch.quantization.fuse_modules(self.downsample, ['0', '1'],
                                         inplace=True)
Ejemplo n.º 8
0
 def fuse_model(self):
     for i in range(0, len(self.module_list)):
         if self.module_defs[i]["type"] == "convolutional":
             if int(self.module_defs[i]["batch_normalize"]):
                 if self.module_defs[i]["activation"] == "relu":
                     fuse_modules(self, ['module_list.%d.conv_%d' % (i, i), 'module_list.%d.batch_norm_%d' % (i, i), 'module_list.%d.relu_%d' % (i, i)], inplace=True)
                 else:
                     fuse_modules(self, ['module_list.%d.conv_%d' % (i, i), 'module_list.%d.batch_norm_%d' % (i, i)], inplace=True)
Ejemplo n.º 9
0
 def fuse_model(self) -> None:
     for m in self.modules():
         if type(m) == ConvNormActivation:
             modules_to_fuse = ["0", "1"]
             if len(m) == 3 and type(m[2]) == nn.ReLU:
                 modules_to_fuse.append("2")
             fuse_modules(m, modules_to_fuse, inplace=True)
         elif type(m) == QuantizableSqueezeExcitation:
             m.fuse_model()
Ejemplo n.º 10
0
 def fuse_model(self):
     for m in self.modules():
         if type(m) == ConvBNReLU:
             torch.quantization.fuse_modules(m, ['0', '1', '2'], inplace=True)
         if type(m) == InvertedResidual:
             for idx in range(len(m.conv)):
                 if type(m.conv[idx]) == nn.Conv2d:
                     fuse_modules(m.conv, [str(idx), str(idx + 1)],
                                  inplace=True)
 def fuse_model(self):
     for m in self.modules():
         if type(m) == ConvBNActivation:
             modules_to_fuse = ['0', '1']
             if type(m[2]) == nn.ReLU:
                 modules_to_fuse.append('2')
             fuse_modules(m, modules_to_fuse, inplace=True)
         elif type(m) == QuantizableSqueezeExcitation:
             m.fuse_model()
Ejemplo n.º 12
0
 def fuse_model(self):
     fuse_modules(self,
                  [['conv1', 'bn1', 'relu1'], ['conv2', 'bn2', 'relu2'],
                   ['conv3', 'bn3']],
                  inplace=True)
     for m in self.modules():
         if type(m) == SEModule:
             m.fuse_model()
     if self.downsample:
         fuse_modules(self.downsample, ['0', '1'], inplace=True)
Ejemplo n.º 13
0
    def test_forward_hooks_preserved(self):
        r"""Test case that checks whether forward pre hooks of the first module and
        post forward hooks of the last module in modules list passed to fusion function preserved.
        (e.g. before fusion: [nn.Conv2d (with pre forward hooks), nn.BatchNorm2d, nn.ReLU (with post forward hooks)]
        after fusion: [nni.ConvBnReLU2d (with pre and post hooks), nn.Identity, nn.Identity])
        """
        model = ModelForFusion(default_qat_qconfig).train()

        counter = {
            'pre_forwards': 0,
            'forwards': 0,
        }
        fused = False

        def fw_pre_hook(fused_module_class, h_module, input):
            if fused:
                self.assertEqual(
                    type(h_module), fused_module_class,
                    "After fusion owner of the first module's forward pre hook is not a fused module"
                )
            counter['pre_forwards'] += 1

        def fw_hook(fused_module_class, h_module, input, output):
            if fused:
                self.assertEqual(
                    type(h_module), fused_module_class,
                    "After fusion owner of the last module's forward hook is not a fused module"
                )
            counter['forwards'] += 1

        # Registering two pre and two post forward hooks, thus expecting counter increment by two each inference
        model.conv1.register_forward_pre_hook(
            lambda *args: fw_pre_hook(nni.ConvBnReLU2d, *args))
        model.sub1.conv.register_forward_pre_hook(
            lambda *args: fw_pre_hook(nni.ConvBn2d, *args))
        model.relu1.register_forward_hook(
            lambda *args: fw_hook(nni.ConvBnReLU2d, *args))
        model.sub1.bn.register_forward_hook(
            lambda *args: fw_hook(nni.ConvBn2d, *args))

        test_only_eval_fn(model, self.img_data_1d)
        self.assertEqual(counter['pre_forwards'], 2 * len(self.img_data_1d))
        self.assertEqual(counter['forwards'], 2 * len(self.img_data_1d))

        model = fuse_modules(model, ['conv1', 'bn1', 'relu1'])
        model = fuse_modules(model, ['sub1.conv', 'sub1.bn'])

        fused = True
        before_fusion_pre_count = counter['pre_forwards']
        before_fusion_post_count = counter['forwards']
        test_only_eval_fn(model, self.img_data_1d)
        self.assertEqual(counter['pre_forwards'] - before_fusion_pre_count,
                         2 * len(self.img_data_1d))
        self.assertEqual(counter['forwards'] - before_fusion_post_count,
                         2 * len(self.img_data_1d))
Ejemplo n.º 14
0
 def fuse_model(self):
     if self.input_3x3:
         fuse_modules(self.layer0,
                      [['conv1', 'bn1', 'relu1'], ['conv2', 'bn2', 'relu2'],
                       ['conv3', 'bn3', 'relu3']],
                      inplace=True)
     else:
         fuse_modules(self.layer0, ['conv1', 'bn1', 'relu1'], inplace=True)
     for m in self.modules():
         if type(m) == SEResNeXtBottleneck:
             m.fuse_model()
Ejemplo n.º 15
0
 def fuse_model(self):
     for m in self.children():
         names = []
         for n, _ in m.named_children():
             names.append(n)
         for i in range(len(m)-1):
             if isinstance(m[i], nn.Conv2d) and isinstance(m[i+1], nn.BatchNorm2d):
                 if i+2 < len(m) and isinstance(m[i+2], nn.ReLU):
                     fuse_modules(m, [names[i], names[i+1], names[i+2]], inplace=True)
                 else:
                     fuse_modules(m, [names[i], names[i+1]], inplace=True)
    def fuse_model(self):
        r"""Fuse conv/bn/relu modules in resnet models
        Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        """

        fuse_modules(self, ['conv1', 'bn1', 'relu'], inplace=True)
        for m in self.modules():
            if type(m) == Bottleneck or type(m) == BasicBlock:
                m.fuse_model()
Ejemplo n.º 17
0
    def test_batchnorm_relu_basic(self):
        """
        Basic test of the PyTorch 3D batchnorm RELU Node on Glow.
        """
        class SimpleQuantizedBatchNormRelu(nn.Module):
            def __init__(self, w, b, m, v):
                super(SimpleQuantizedBatchNormRelu, self).__init__()
                self.bn = torch.nn.BatchNorm3d(4)
                self.relu = torch.nn.ReLU()
                self.bn.weight = torch.nn.Parameter(w)
                self.bn.bias = torch.nn.Parameter(b)
                self.bn.running_mean = m
                self.bn.running_var = v
                self.q = QuantStub()
                self.dq = DeQuantStub()

            def forward(self, x):
                qx = self.q(x)
                qy = self.bn(qx)
                qy_relu = self.relu(qy)
                y = self.dq(qy_relu)
                return y

        C = 4
        weight = torch.ones(C) + torch.rand(C) * 0.001
        bias = torch.rand(C) * 0.0001
        running_mean = torch.zeros(C)
        running_var = torch.ones(C)

        inputs = torch.randn((10, C, 2, 3, 4), requires_grad=False)
        model = SimpleQuantizedBatchNormRelu(weight, bias, running_mean,
                                             running_var)
        model.eval()
        model.qconfig = my_qconfig
        modules_to_fuse = [["bn", "relu"]]
        fuse_modules(model, modules_to_fuse, inplace=True)
        prepare(model, inplace=True)
        model.forward(inputs)
        convert(model, inplace=True)

        # Because of the difference of quantization between PyTorch & Glow
        # We set eps big enough.
        # Batchnorm introduced great accuracy issues, which could create up to
        # ~1e-2 difference in some rare cases. In order to prevent this test
        # to be flaky, atol is set to be 0.1 and rtol is set to 0.00001.
        utils.compare_tracing_methods(
            model,
            inputs,
            fusible_ops={"quantized::batch_norm3d_relu"},
            atol=1e-1,
            rtol=1e-5,
            fp16=True,
            skip_to_glow=True,
        )
Ejemplo n.º 18
0
    def test_fuse_module_train(self):
        model = ModelForFusion(default_qat_qconfig).train()
        # Test step by step fusion
        model = fuse_modules(model, ['conv1', 'bn1', 'relu1'])
        model = fuse_modules(model, ['sub1.conv', 'sub1.bn'])
        self.assertEqual(type(model.conv1), nni.ConvBnReLU2d,
                         "Fused Conv + BN + Relu first layer")
        self.assertEqual(type(model.bn1), torch.nn.Identity,
                         "Fused Conv + BN + Relu (skipped BN)")
        self.assertEqual(type(model.relu1), torch.nn.Identity,
                         "Fused Conv + BN + Relu (skipped Relu)")

        self.assertEqual(type(model.sub1.conv), nni.ConvBn2d,
                         "Fused submodule Conv + BN")
        self.assertEqual(type(model.sub1.bn), torch.nn.Identity,
                         "Fused submodule Conv + BN (skipped BN)")
        self.assertEqual(type(model.sub2.conv), torch.nn.Conv2d,
                         "Non-fused submodule Conv")
        self.assertEqual(type(model.sub2.relu), torch.nn.ReLU,
                         "Non-fused submodule ReLU")
        model = prepare_qat(model)
        self.checkObservers(model)

        def checkQAT(model):
            self.assertEqual(type(model.conv1), nniqat.ConvBnReLU2d)
            self.assertEqual(type(model.bn1), nn.Identity)
            self.assertEqual(type(model.relu1), nn.Identity)
            self.assertEqual(type(model.sub1.conv), nniqat.ConvBn2d)
            self.assertEqual(type(model.sub1.bn), nn.Identity)
            self.assertEqual(type(model.sub2.conv), nn.Conv2d)
            self.assertEqual(type(model.sub2.relu), nn.ReLU)

        checkQAT(model)
        test_only_train_fn(model, self.img_data)
        model = convert(model)

        def checkQuantized(model):
            self.assertEqual(type(model.conv1), nniq.ConvReLU2d)
            self.assertEqual(type(model.bn1), nn.Identity)
            self.assertEqual(type(model.relu1), nn.Identity)
            self.assertEqual(type(model.sub1.conv), nnq.Conv2d)
            self.assertEqual(type(model.sub1.bn), nn.Identity)
            self.assertEqual(type(model.sub2.conv), nn.Conv2d)
            self.assertEqual(type(model.sub2.relu), nn.ReLU)
            test_only_eval_fn(model, self.img_data)

        checkQuantized(model)

        model = ModelForFusion(default_qat_qconfig).train()
        model = fuse_modules(
            model, [['conv1', 'bn1', 'relu1'], ['sub1.conv', 'sub1.bn']])
        model = quantize_qat(model, test_only_train_fn, self.img_data)
        checkQuantized(model)
Ejemplo n.º 19
0
 def fuse_model(self):
     modules_list = []
     for name, m in self.features.named_modules():
         if isinstance(m, nn.Sequential):
           continue
         if isinstance(m, nn.Conv2d):
             modules_list.append(name)
         elif isinstance(m, nn.ReLU):
             modules_list.append(name)
             q.fuse_modules(self.features, modules_list, inplace=True)
             modules_list = []
         elif len(modules_list) > 0:
             modules_list.append(name)
Ejemplo n.º 20
0
def fuse_module_conv_bn_relus(module: Module, inplace: bool = True) -> Module:
    """
    Performs fusion of Conv2d, BatchNorm2d, and ReLU layers found in the
    given module. To be fused, these layers must appear sequentially in
    module.named_modules() and be in the same submodule.
    Fuses either Conv2d -> BatchNorm2d or Conv2d -> BatchNorm2d -> ReLU blocks

    If this function does not fuse the model in the desired way, implement an
    in place fusing function for the model.

    :param module: the module to fuse
    :param inplace: set True to perform fusions in-place. default is True
    :return: the fused module
    """
    if torch_quantization is None:
        raise RuntimeError(
            "Unable to import package torch.quantization. "
            "Try upgrading your PyTorch version."
        )
    if not inplace:
        module = deepcopy(module)
    conv_blocks = []
    current_block = []
    current_block_submodule_name = ""
    for name, layer in module.named_modules():
        submodule_name = ".".join(name.split(".")[:-1])
        if (
            len(current_block) == 1  # [Conv2d]
            and isinstance(layer, BatchNorm2d)
            and submodule_name == current_block_submodule_name
        ) or (
            len(current_block) == 2  # [Conv2d, BatchNorm2d]
            and isinstance(layer, ReLU)
            and submodule_name == current_block_submodule_name
        ):
            if isinstance(layer, ReLU_nm):
                _replace_nm_relu(module, name, layer)
            current_block.append(name)
        else:
            if current_block:
                if len(current_block) > 1:  # cannot fuse single module
                    conv_blocks.append(current_block)
                current_block = []
                current_block_submodule_name = ""
            if isinstance(layer, Conv2d):
                current_block.append(name)
                current_block_submodule_name = submodule_name
    if conv_blocks:
        torch_quantization.fuse_modules(module, conv_blocks, inplace=True)
    return module
Ejemplo n.º 21
0
 def fuse_model(self):
     if self.dropblock_prob <= 0.0:
         if self.radix == 0:
             fuse_modules(self,
                          [['conv1', 'bn1', 'relu1'],
                           ['conv2', 'bn2', 'relu2'], ['conv3', 'bn3']],
                          inplace=True)
         else:
             fuse_modules(self,
                          [['conv1', 'bn1', 'relu1'], ['conv3', 'bn3']],
                          inplace=True)
     else:
         if self.radix == 0:
             fuse_modules(
                 self,
                 [['conv1', 'bn1'], ['conv2', 'bn2'], ['conv3', 'bn3']],
                 inplace=True)
         else:
             fuse_modules(self, [['conv1', 'bn1'], ['conv3', 'bn3']],
                          inplace=True)
     for m in self.modules():
         if type(m) == SplAtConv2d:
             m.fuse_model()
     if self.downsample:
         if len(self.downsample) == 3:
             torch.quantization.fuse_modules(self.downsample, ['1', '2'],
                                             inplace=True)
         else:
             torch.quantization.fuse_modules(self.downsample, ['0', '1'],
                                             inplace=True)
Ejemplo n.º 22
0
    def test_fusion_conv_with_bias(self):
        for qengine in supported_qengines:
            with override_quantized_engine(qengine):
                model = ModelForFusionWithBias().train()
                # output with no fusion.
                out_ref = model(self.img_data_2d[0][0])

                model.qconfig = QConfig(activation=torch.nn.Identity,
                                        weight=torch.nn.Identity)
                model = fuse_modules(
                    model, [["conv1", "bn1", "relu1"], ["conv2", "bn2"]])
                prep_model = prepare_qat(model, inplace=False)
                # output with fusion but no observers.
                out_fused = prep_model(self.img_data_2d[0][0])
                self.assertEqual(out_ref, out_fused)

                model.qconfig = torch.quantization.get_default_qconfig(qengine)
                prepare_qat(model, inplace=True)

                model(self.img_data_2d[0][0])

                def checkQAT(model):
                    self.assertEqual(type(model.conv1), nniqat.ConvBnReLU2d)
                    self.assertEqual(type(model.bn1), nn.Identity)
                    self.assertEqual(type(model.relu1), nn.Identity)
                    self.assertEqual(type(model.conv2), nniqat.ConvBn2d)
                    self.assertEqual(type(model.bn2), nn.Identity)

                checkQAT(model)
Ejemplo n.º 23
0
    def test_fuse_module_eval(self):
        model = ModelForFusion(default_qconfig)
        model.eval()
        fuse_modules(model,
                     [['conv1', 'bn1', 'relu1'], ['sub1.conv', 'sub1.bn']])
        self.assertEqual(type(model.conv1), nni.ConvReLU2d,
                         "Fused Conv + BN + Relu first layer (BN is folded)")
        self.assertEqual(type(model.conv1[0]), nn.Conv2d,
                         "Fused Conv + BN + Relu (Conv + folded BN only)")
        self.assertEqual(type(model.conv1[1]), nn.ReLU,
                         "Fused Conv + BN + Relu second layer (Relu only)")
        self.assertEqual(type(model.bn1), nn.Identity,
                         "Fused Conv + BN + Relu second layer (Skipped BN)")
        self.assertEqual(type(model.relu1), nn.Identity,
                         "Fused Conv + BN + Relu second layer (Skipped Relu)")

        self.assertEqual(type(model.sub1.conv), nn.Conv2d,
                         "Fused submodule Conv + folded BN")
        self.assertEqual(type(model.sub1.bn), nn.Identity,
                         "Fused submodule (skipped BN)")
        self.assertEqual(type(model.sub2.conv), nn.Conv2d,
                         "Non-fused submodule Conv")
        self.assertEqual(type(model.sub2.relu), torch.nn.ReLU,
                         "Non-fused submodule ReLU")

        prepare(model)
        self.checkObservers(model)
        test_only_eval_fn(model, self.img_data)
        convert(model)

        def checkQuantized(model):
            self.assertEqual(type(model.conv1), nniq.ConvReLU2d)
            self.assertEqual(type(model.bn1), nn.Identity)
            self.assertEqual(type(model.relu1), nn.Identity)
            self.assertEqual(type(model.sub1.conv), nnq.Conv2d)
            self.assertEqual(type(model.sub1.bn), nn.Identity)
            self.assertEqual(type(model.sub2.conv), nn.Conv2d)
            self.assertEqual(type(model.sub2.relu), nn.ReLU)
            test_only_eval_fn(model, self.img_data)

        checkQuantized(model)

        model = ModelForFusion(default_qat_qconfig).eval()
        fuse_modules(model,
                     [['conv1', 'bn1', 'relu1'], ['sub1.conv', 'sub1.bn']])
        model = quantize(model, test_only_eval_fn, self.img_data)
        checkQuantized(model)
Ejemplo n.º 24
0
 def fuse(self, inplace=True):
     if self.norm_type != 'batch' or self.fused:
         print("Cannot fuse")
         return
     for m in self.modules():
         if type(m) == DWSConv or type(m) == DWSConvT:
             fuse_modules(m, ['depthwise','norm1'],inplace=inplace)
             if m.leak==0:
                 fuse_modules(m, ['pointwise','norm2','relu'],inplace=inplace) 
             else:
                 fuse_modules(m, ['pointwise','norm2'],inplace=inplace) 
         if type(m) == Conv1stLayer or type(m) == ConvBNReLU:
             if m.leak==0:
                 fuse_modules(m, ['conv2d','norm','relu'],inplace=inplace)
             else:
                 fuse_modules(m, ['conv2d','norm'],inplace=inplace)
     print("Fusion complete")
     self.fused = True
Ejemplo n.º 25
0
    def test_resnet_base(self, qconfig):
        r"""Test quantization for bottleneck topology used in resnet/resnext
        and add coverage for conversion of average pool and float functional
        """
        model = ResNetBase().float().eval()
        model = QuantWrapper(model)
        model.qconfig = qconfig
        fuse_list = ['module.conv1', 'module.bn1', 'module.relu1']
        fuse_modules(model, fuse_list, inplace=True)
        model = prepare(model)
        self.checkObservers(model)
        test_only_eval_fn(model, self.img_data)
        model = convert(model)

        def checkQuantized(model):
            self.assertEqual(type(model.module.conv1), nn._intrinsic.quantized.ConvReLU2d)
            self.assertEqual(type(model.module.myop), nn.quantized.QFunctional)
            self.assertEqual(type(model.module.avgpool), nn.AdaptiveAvgPool2d)
            test_only_eval_fn(model, self.img_data)

        checkQuantized(model)
Ejemplo n.º 26
0
    def test_fuse_module_train(self):
        import torch.nn._intrinsic.modules.fused as torch_fused
        testMod = ModForFusion()
        testMod.train()
        fuse_modules(testMod,
                     [['conv1', 'bn1', 'relu1'], ['sub1.conv', 'sub1.bn']])
        self.assertEqual(type(testMod.conv1), torch_fused.ConvBnReLU2d,
                         "Fused Conv + BN + Relu first layer")
        self.assertEqual(type(testMod.bn1), torch.nn.Identity,
                         "Fused Conv + BN + Relu (skipped BN)")
        self.assertEqual(type(testMod.relu1), torch.nn.Identity,
                         "Fused Conv + BN + Relu (skipped Relu)")

        self.assertEqual(type(testMod.sub1.conv), torch_fused.ConvBn2d,
                         "Fused submodule Conv + BN")
        self.assertEqual(type(testMod.sub1.bn), torch.nn.Identity,
                         "Fused submodule Conv + BN (skipped BN)")
        self.assertEqual(type(testMod.sub2.conv), torch.nn.Conv2d,
                         "Non-fused submodule Conv")
        self.assertEqual(type(testMod.sub2.bn), torch.nn.BatchNorm2d,
                         "Non-fused submodule BN")
Ejemplo n.º 27
0
    def test_fusion_linear_bn_eval(self):
        model = ModelForLinearBNFusion().train()
        inp1 = torch.randn(8, 20)
        inp2 = torch.randn(8, 20)

        # Get some interesting values into the running mean and variance.
        model(inp1)
        model.eval()
        golden = model(inp2)

        model = fuse_modules(model, [["fc", "bn"]])
        self.assertEqual(type(model.bn), nn.Identity)
        self.assertEqual(golden, model(inp2))
Ejemplo n.º 28
0
 def fuse_model(self):
     if type(self.conv_exp) == nn.Conv2d:
         if type(self.conv_pwl) == nn.Conv2d:
             if type(self.act1) == nn.ReLU:
                 fuse_modules(
                     self,
                     [['conv_exp', 'bn1', 'act1'], ['conv_pwl', 'bn2']],
                     inplace=True)
             else:
                 fuse_modules(self,
                              [['conv_exp', 'bn1'], ['conv_pwl', 'bn2']],
                              inplace=True)
         else:
             if type(self.act1) == nn.ReLU:
                 fuse_modules(self, ['conv_exp', 'bn1', 'act1'],
                              inplace=True)
             else:
                 fuse_modules(self, ['conv_exp', 'bn1'], inplace=True)
     else:
         if type(self.conv_pwl) == nn.Conv2d:
             fuse_modules(self, ['conv_pwl', 'bn2'], inplace=True)
     for m in self.modules():
         if type(m) == SqueezeExcite and type(m) != nn.Identity:
             m.fuse_model()
Ejemplo n.º 29
0
 def fuse_model(self):
     for m in self.modules():
         if isinstance(m, SEModule):
             fuse_modules(m.fc, ["0", "1"], inplace=True)
         if isinstance(m, MobileBottleneck):
             for idx in range(len(m.conv)):
                 if isinstance(m.conv[idx], nn.Conv2d):
                     indices = [str(idx), str(idx + 1)]  # Conv2d + BN
                     if num_children(m.conv) > idx + 2 and \
                             isinstance(m.conv[idx + 2], nn.ReLU):
                         indices.append(str(idx + 2))
                     fuse_modules(m.conv, indices, inplace=True)
         if isinstance(m, nn.Sequential) and num_children(m) == 3:
             fuse_modules(m, ['0', '1'], inplace=True)
Ejemplo n.º 30
0
    def fuse_model(self):
        if type(self.conv_stem) == nn.Conv2d and type(self.act1) == nn.ReLU:
            fuse_modules(self, ['conv_stem', 'bn1', 'act1'], inplace=True)
        elif type(self.conv_stem) == nn.Conv2d:
             fuse_modules(self, ['conv_stem', 'bn1'], inplace=True)

        if type(self.conv_head) == nn.Conv2d and type(self.act2) == nn.ReLU:
            fuse_modules(self, ['conv_head', 'act2'], inplace=True)

        for m in self.modules():
            if type(m) == DepthwiseSeparableConv or type(m) == EdgeResidual or \
                type(m) == CondConvResidual or type(m) == InvertedResidual or \
                    type(m) == ConvBnAct:
                m.fuse_model()