コード例 #1
0
 def __init__(self, quantized=False):
     super(ModForWrapping, self).__init__()
     self.qconfig = default_qconfig
     if quantized:
         self.mycat = nnq.QFunctional()
         self.myadd = nnq.QFunctional()
     else:
         self.mycat = nnq.FloatFunctional()
         self.myadd = nnq.FloatFunctional()
         self.mycat.observer = DummyObserver()
         self.myadd.observer = DummyObserver()
コード例 #2
0
    def __init__(self,
                 embed_dim: int,
                 num_heads: int,
                 dropout: float = 0.,
                 bias: bool = True,
                 add_bias_kv: bool = False,
                 add_zero_attn: bool = False,
                 kdim: int = None,
                 vdim: int = None):
        super(MultiheadAttention,
              self).__init__(embed_dim, num_heads, dropout, bias, add_bias_kv,
                             add_zero_attn, kdim, vdim)
        self.linear_Q = nn.Linear(self.embed_dim, self.embed_dim, bias=bias)
        self.linear_K = nn.Linear(self.kdim, self.embed_dim, bias=bias)
        self.linear_V = nn.Linear(self.vdim, self.embed_dim, bias=bias)

        # TODO: The use of the `_LinearWithBias` increases the quantization noise
        # The `out_proj` in the parent is ``_LinearWithBias`, so need to ignore
        # the type for mypy not to complain.
        self.out_proj = nn.Linear(self.embed_dim, self.embed_dim,
                                  bias=bias)  # type: ignore

        # Functionals
        self.q_scaling_product = nnq.FloatFunctional()

        # Quant/Dequant
        self.quant_attn_output = torch.quantization.QuantStub()
        self.quant_attn_output_weights = torch.quantization.QuantStub()
        self.dequant_q = torch.quantization.DeQuantStub()
        self.dequant_k = torch.quantization.DeQuantStub()
        self.dequant_v = torch.quantization.DeQuantStub()
コード例 #3
0
ファイル: resnets.py プロジェクト: sergius-htm/nupic.research
    def __init__(self, in_planes, planes, stride, conv_layer, conv_args,
                 act_layer, act_args, norm_layer, norm_args):
        super(BasicBlock, self).__init__()

        self.regular_path = nn.Sequential(OrderedDict([
            ("conv1", conv_layer(in_planes, planes, kernel_size=3, stride=stride,
                                 padding=1, bias=False,
                                 **conv_args["conv3x3_1"])),
            ("bn1", norm_layer(planes, **norm_args["bn1"])),
            ("act1", act_layer(planes, **act_args["act1"])),
            ("conv2", conv_layer(planes, planes, kernel_size=3, padding=1,
                                 bias=False, **conv_args["conv3x3_2"])),
            ("bn2", norm_layer(planes, **norm_args["bn2"])),
        ]))

        if stride != 1 or in_planes != planes:
            self.shortcut = nn.Sequential(OrderedDict([
                ("conv", conv_layer(in_planes, planes, kernel_size=1,
                                    stride=stride, bias=False,
                                    **conv_args["shortcut"])),
                ("bn", norm_layer(planes, **norm_args["shortcut"])),
            ]))
        else:
            self.shortcut = nn.Identity()

        self.post_activation = act_layer(planes, **act_args["act2"])
        self.quant_ops = nnq.FloatFunctional()
コード例 #4
0
 def __init__(self):
     super(SimpleModel, self).__init__()
     self.quant = torch.quantization.QuantStub()
     self.dequant = torch.quantization.DeQuantStub()
     self.func_add = nnq.FloatFunctional()
     self.conv1 = nn.Conv2d(3, 2, 5, bias=None).to(dtype=torch.float)
     self.act1 = nn.Sigmoid()
     self.conv2 = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
     self.fc = nn.Linear(72, 10).to(dtype=torch.float)
     self.fc.qconfig = None
コード例 #5
0
 def __init__(self):
     super(ModelWithFunctionals, self).__init__()
     self.mycat = nnq.FloatFunctional()
     self.myadd = nnq.FloatFunctional()
     self.mymul = nnq.FloatFunctional()
     self.myadd_relu = nnq.FloatFunctional()
     self.my_scalar_add = nnq.FloatFunctional()
     self.my_scalar_mul = nnq.FloatFunctional()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
コード例 #6
0
ファイル: test_adaptor_pytorch.py プロジェクト: intel/lpot
 def __init__(self):
     super(ModelWithFunctionals, self).__init__()
     self.mycat = nnq.FloatFunctional()
     self.myadd = nnq.FloatFunctional()
     self.myadd_relu = nnq.FloatFunctional()
     # Tracing doesnt work yet for c10 ops with scalar inputs
     # https://github.com/pytorch/pytorch/issues/27097
     self.my_scalar_add = nnq.FloatFunctional()
     self.mymul = nnq.FloatFunctional()
     self.my_scalar_mul = nnq.FloatFunctional()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
コード例 #7
0
ファイル: activation.py プロジェクト: yzhuo1992/pytorch
    def __init__(self,
                 embed_dim: int,
                 num_heads: int,
                 dropout: float = 0.,
                 bias: bool = True,
                 add_bias_kv: bool = False,
                 add_zero_attn: bool = False,
                 kdim: int = None,
                 vdim: int = None,
                 batch_first: bool = False,
                 device=None,
                 dtype=None) -> None:
        factory_kwargs = {'device': device, 'dtype': dtype}
        super(MultiheadAttention,
              self).__init__(embed_dim, num_heads, dropout, bias, add_bias_kv,
                             add_zero_attn, kdim, vdim, batch_first,
                             **factory_kwargs)
        self.linear_Q = nn.Linear(self.embed_dim,
                                  self.embed_dim,
                                  bias=bias,
                                  **factory_kwargs)
        self.linear_K = nn.Linear(self.kdim,
                                  self.embed_dim,
                                  bias=bias,
                                  **factory_kwargs)
        self.linear_V = nn.Linear(self.vdim,
                                  self.embed_dim,
                                  bias=bias,
                                  **factory_kwargs)
        # for the type: ignore, see https://github.com/pytorch/pytorch/issues/58969
        self.out_proj = nn.Linear(self.embed_dim,
                                  self.embed_dim,
                                  bias=bias,
                                  **factory_kwargs)  # type: ignore[assignment]

        # Functionals
        self.q_scaling_product = nnq.FloatFunctional()

        # Quant/Dequant
        self.quant_attn_output = torch.ao.quantization.QuantStub()
        self.quant_attn_output_weights = torch.ao.quantization.QuantStub()
        self.dequant_q = torch.ao.quantization.DeQuantStub()
        self.dequant_k = torch.ao.quantization.DeQuantStub()
        self.dequant_v = torch.ao.quantization.DeQuantStub()
コード例 #8
0
ファイル: resnets.py プロジェクト: sergius-htm/nupic.research
    def __init__(self, in_planes, planes, stride, conv_layer, conv_args,
                 act_layer, act_args, norm_layer, norm_args):
        super().__init__()

        self.regular_path = nn.Sequential(OrderedDict([
            # 1st layer
            ("conv1", conv_layer(in_planes, planes, kernel_size=1, bias=False,
                                 **conv_args["conv1x1_1"])),
            ("bn1", norm_layer(planes, **norm_args["bn1"])),
            ("act1", act_layer(planes, **act_args["act1"])),
            # 2nd layer
            ("conv2", conv_layer(planes, planes, stride=stride, kernel_size=3,
                                 padding=1, bias=False,
                                 **conv_args["conv3x3_2"])),
            ("bn2", norm_layer(planes, **norm_args["bn2"])),
            ("act2", act_layer(planes,
                               kernel_size=3,  # Deprecated, discarded by default
                               **act_args["act2"])),
            # 3rd layer
            ("conv3", conv_layer(planes, self.expansion * planes, kernel_size=1,
                                 bias=False, **conv_args["conv1x1_3"])),
            ("bn3", norm_layer(self.expansion * planes, **norm_args["bn3"])),
        ]))

        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(OrderedDict([
                ("conv", conv_layer(in_planes, self.expansion * planes,
                                    kernel_size=1, stride=stride, bias=False,
                                    **conv_args["shortcut"])),
                ("bn", norm_layer(self.expansion * planes,
                                  **norm_args["shortcut"])),
            ]))
        else:
            self.shortcut = nn.Identity()

        self.post_activation = act_layer(self.expansion * planes,
                                         **act_args["act3"])
        self.quant_ops = nnq.FloatFunctional()
コード例 #9
0
ファイル: common_quantization.py プロジェクト: nutm/pytorch
 def __init__(self):
     super().__init__()
     self.mycat = nnq.FloatFunctional()
     self.myadd = nnq.FloatFunctional()
     self.myadd_relu = nnq.FloatFunctional()
コード例 #10
0
 def __init__(self):
     super(ModelWithFunctionals, self).__init__()
     self.mycat = nnq.FloatFunctional()
     self.myadd = nnq.FloatFunctional()
     self.myadd_relu = nnq.FloatFunctional()