def __init__(self): super().__init__() self.sub1 = LinearReluModel() self.sub2 = QuantWrapper(TwoLayerLinearModel()) self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) self.fc3.qconfig = default_qconfig self.sub2.qconfig = default_qconfig
def __init__(self): super(AnnotatedNestedModel, self).__init__() self.sub1 = LinearReluModel() self.sub2 = TwoLayerLinearModel() self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) self.fc3.qconfig = default_qconfig self.sub2.fc1 = QuantWrapper(self.sub2.fc1) self.sub2.fc1.qconfig = default_per_channel_qconfig
def __init__(self, qengine): super().__init__() self.sub1 = LinearReluModel() self.sub2 = TwoLayerLinearModel() self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) self.fc3.qconfig = default_qconfig self.sub2.fc1 = QuantWrapper(self.sub2.fc1) if qengine == 'fbgemm': self.sub2.fc1.qconfig = default_per_channel_qconfig else: self.sub2.fc1.qconfig = default_qconfig
def __init__(self): super(SkipQuantModel, self).__init__() self.qconfig = default_qconfig self.sub = QuantWrapper(InnerModule()) self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) # don't quantize this fc self.fc.qconfig = None
def __init__(self, qengine): super().__init__() self.qconfig = torch.quantization.get_default_qconfig(qengine) self.sub = QuantWrapper(InnerModule()) self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) # don't quantize this fc self.fc.qconfig = None
def __init__(self, with_relu=False): super().__init__() layers = [nn.Conv2d(3, 32, 3, bias=True), nn.BatchNorm2d(32)] if with_relu: layers.append(nn.ReLU()) self.conv = nn.Sequential(*layers) self.quant_wrap = QuantWrapper(self.conv) self.with_relu = with_relu
def __init__(self, with_relu=False): super().__init__() layers = [nn.Linear(16, 32)] if with_relu: layers.append(nn.ReLU()) self.fc = nn.Sequential(*layers) self.quant_wrap = QuantWrapper(self.fc) self.with_relu = with_relu
def __init__(self): super().__init__() self.sub1 = LinearReluModel() self.sub2 = TwoLayerLinearModel() self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) self.fc3.qconfig = default_qconfig self.sub2.qconfig = default_qconfig custom_options = { 'dtype': torch.quint8, 'qscheme': torch.per_tensor_affine } custom_qconfig = QConfig(activation=default_observer.with_args(**custom_options), weight=default_weight_observer) self.sub2.fc1.qconfig = custom_qconfig self.sub2.fc1 = QuantWrapper(self.sub2.fc1) self.sub2.fc2 = QuantWrapper(self.sub2.fc2)
def test_linear_chain(self): class LinearChain(nn.Module): def __init__(self): super(LinearChain, self).__init__() self.linear1 = nn.Linear(3, 4) self.linear2 = nn.Linear(4, 5) self.linear3 = nn.Linear(5, 6) def forward(self, x): x = self.linear1(x) x = self.linear2(x) x = self.linear3(x) return x float_model = QuantWrapper(LinearChain()) img_data = [(torch.rand(10, 3, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)) for _ in range(50)] self.correct_artificial_bias_float(float_model, img_data) self.correct_artificial_bias_quantize(float_model, img_data)
def test_conv_chain(self): class ConvChain(nn.Module): def __init__(self): super(ConvChain, self).__init__() self.conv2d1 = nn.Conv2d(3, 4, 5, 5) self.conv2d2 = nn.Conv2d(4, 5, 5, 5) self.conv2d3 = nn.Conv2d(5, 6, 5, 5) def forward(self, x): x = self.conv2d1(x) x = self.conv2d2(x) x = self.conv2d3(x) return x float_model = QuantWrapper(ConvChain()) img_data = [(torch.rand(10, 3, 125, 125, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)) for _ in range(50)] self.correct_artificial_bias_float(float_model, img_data) self.correct_artificial_bias_quantize(float_model, img_data)
def test_resnet_base(self, qconfig): r"""Test quantization for bottleneck topology used in resnet/resnext and add coverage for conversion of average pool and float functional """ model = ResNetBase().float().eval() model = QuantWrapper(model) model.qconfig = qconfig fuse_list = ['module.conv1', 'module.bn1', 'module.relu1'] fuse_modules(model, fuse_list, inplace=True) model = prepare(model) self.checkObservers(model) test_only_eval_fn(model, self.img_data) model = convert(model) def checkQuantized(model): self.assertEqual(type(model.module.conv1), nn._intrinsic.quantized.ConvReLU2d) self.assertEqual(type(model.module.myop), nn.quantized.QFunctional) self.assertEqual(type(model.module.avgpool), nn.AdaptiveAvgPool2d) test_only_eval_fn(model, self.img_data) checkQuantized(model)
def __init__(self, add_stub=False): super().__init__() self.hswish = QuantWrapper(nn.Hardswish())
def __init__(self): super().__init__() layers = [nn.ConvTranspose2d(3, 32, 3, bias=True)] self.conv = nn.Sequential(*layers) self.quant_wrap = QuantWrapper(self.conv)
def __init__(self): super().__init__() self.qconfig = default_qconfig self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
def __init__(self, qengine='fbgemm'): super().__init__() self.qconfig = torch.quantization.get_default_qconfig(qengine) self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
def __init__(self): super().__init__() self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float)) self.fc2.qconfig = torch.quantization.get_default_qconfig("fbgemm")
def __init__(self): super().__init__() self.pool = QuantWrapper(nn.AdaptiveAvgPool2d((1, 1)))
def __init__(self): super().__init__() self.pool = QuantWrapper(nn.AvgPool2d(kernel_size=2))
def __init__(self): super(AnnotatedSingleLayerLinearModel, self).__init__() self.qconfig = default_qconfig self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
def __init__(self): super(AnnotatedTwoLayerLinearModel, self).__init__() self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float)) self.fc2.qconfig = default_qconfig
def __init__(self): super().__init__() self.relu = QuantWrapper(nn.ReLU())
def __init__(self): super().__init__() self.relu = QuantWrapper(nn.ReLU()) self.quant = QuantStub() self.dequant = DeQuantStub()
def __init__(self): super().__init__() self.leaky_relu = QuantWrapper(nn.LeakyReLU())