Esempio n. 1
0
    def forward(self, x):
        x = quantize_activations_gemm(x)

        x1 = self.conv1(x)
        conv1_weight, conv1_scale = quantize_weight_gemm(self.conv1.weight)
        # conv1_weight = quantize_weights_bias_gemm(self.conv1.weight)
        # conv1_scale = 1
        conv1_bias = quantize_bias_gemm(self.conv1.bias) / conv1_scale
        x = F.conv2d(x, conv1_weight, conv1_bias, stride=1,
                     padding=1) * conv1_scale
        x = self.relu(x)
        x = quantize_activations_gemm(x)
        # x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = F.avg_pool2d(x, 4)
        x = x.view(x.size(0), -1)
        x = self.linear(x)
        # x = quantize_activations_gemm(x)
        # x = self.scalar(x)  # 修改

        return x
Esempio n. 2
0
    def forward(self, x):
        x = quantize_activations_gemm(x)
        x = self.features(x)
        x = x.view(x.size(0), -1)
        #  x = self.scale(x)
        x = self.classifier(x)

        return x
Esempio n. 3
0
    def forward(self, x):
        x = quantize_activations_gemm(x)
        residual = x

        out1 = self.conv1(x)
        conv1_weight, conv1_scale = quantize_weight_gemm(self.conv1.weight)
        # conv1_weight = quantize_weights_bias_gemm(self.conv1.weight)
        # conv1_scale = 1
        conv1_bias = quantize_bias_gemm(self.conv1.bias) / conv1_scale
        out = F.conv2d(x, conv1_weight, conv1_bias) * conv1_scale
        out = self.relu(out)
        out = quantize_activations_gemm(out)

        out1 = self.conv2(out)
        conv2_weight, conv2_scale = quantize_weight_gemm(self.conv2.weight)
        # conv2_weight = quantize_weights_bias_gemm(self.conv2.weight)
        # conv2_scale = 1
        conv2_bias = quantize_bias_gemm(self.conv2.bias) / conv2_scale
        out = F.conv2d(
            out, conv2_weight, conv2_bias, stride=self.stride,
            padding=1) * conv2_scale
        out = self.relu(out)
        out = quantize_activations_gemm(out)

        out1 = self.conv3(out)
        conv3_weight, conv3_scale = quantize_weight_gemm(self.conv3.weight)
        # conv3_weight = quantize_weights_bias_gemm(self.conv3.weight)
        # conv3_scale = 1
        conv3_bias = quantize_bias_gemm(self.conv3.bias) / conv3_scale
        out = F.conv2d(out, conv3_weight, conv3_bias, padding=1) * conv3_scale
        out = quantize_activations_gemm(out)

        out1 += self.shortcut(residual)
        if self.downsample:
            short_weight, short_scale = quantize_weight_gemm(
                self.shortcut[0].weight)
            short_bias = quantize_bias_gemm(
                self.shortcut[0].bias) / short_scale
            residual = F.conv2d(
                residual, short_weight, short_bias,
                stride=self.stride) * short_scale

        out += residual
        out = self.relu(out)

        return out
 def forward(self, input):
     qweight = quantize_weight_gemm_C(self.weight)
     if self.bias is not None:
         qbias = quantize_bias_gemm(self.bias)
     else:
         qbias = None
     qinput = quantize_activations_gemm(input)
     out = F.linear(qinput, qweight, qbias)
     # out = quantize_activations_gemm(out)
     return out
Esempio n. 5
0
    def forward(self, x):
        out1 = self.conv(x)
        conv_weight, conv_scale = quantize_weight_gemm(self.conv.weight)
        conv_bias = quantize_bias_gemm(self.conv.bias / conv_scale)
        out = F.conv2d(x, conv_weight, conv_bias, stride=1,
                       padding=1) * conv_scale
        # s = nn.Parameter(torch.tensor(conv_scale))
        out = self.relu(out)
        out = quantize_activations_gemm(out)

        return out
    def forward(self, input):
        qweight, scale = quantize_weight_gemm_S(self.weight)
        if self.bias is not None:
            qbias = quantize_bias_gemm(self.bias / scale)
        else:
            qbias = None

        qinput = quantize_activations_gemm(input)
        out = F.conv2d(qinput, qweight, qbias, self.stride, self.padding,
                       self.dilation, self.groups) * scale

        return out
Esempio n. 7
0
 def forward(self, input, scale, bias):
     qweight = quantize_weights_bias_gemm(self.weight)
     qbias = None
     qinput = quantize_activations_gemm(input)
     return F.conv2d(qinput, qweight, qbias, self.stride,
                     self.padding, self.dilation, self.groups)*scale + bias