Exemple #1
0
    def _make_layers(self, cfg):
        layers = []
        in_channels = 3

        for x in cfg:
            if x == 'M':
                layers += [
                    nn.AvgPool2d(kernel_size=2, stride=2),
                    nn.Dropout2d(0.15)
                ]
            else:
                padding = x[1] if isinstance(x, tuple) else 1
                out_channels = x[0] if isinstance(x, tuple) else x
                layers += [
                    catSNN.QuantizedConv2d(in_channels,
                                           out_channels,
                                           kernel_size=3,
                                           padding=padding,
                                           bias=self.bias),
                    nn.BatchNorm2d(out_channels),
                    catSNN.Clamp(max=self.clamp_max),
                    nn.Dropout2d(0.15)
                ]
                in_channels = out_channels

        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)
Exemple #2
0
 def _make_layers(self, cfg, quantize_bit=32):
     layers = []
     in_channels = 3
     for x in cfg:
         # [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']
         if x == 'M':
             #Train the model with dropout
             #layers += [nn.AvgPool2d(kernel_size=2, stride=2)]
             layers += [
                 nn.AvgPool2d(kernel_size=2, stride=2),
                 nn.Dropout2d(0.3)
             ]
         else:
             # ReLU()-->Clamp()-->Clamp_q-->fuse bn and dropout
             #layers += [catSNN.QuantizedConv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias, quantize_bit=quantize_bit),
             #catSNN.Clamp(max = self.clamp_max)]
             #layers += [catSNN.QuantizedConv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias, quantize_bit=quantize_bit),nn.BatchNorm2d(x),
             #catSNN.Clamp(max = self.clamp_max),nn.Dropout2d(0.3)]
             layers += [
                 catSNN.QuantizedConv2d(in_channels,
                                        x,
                                        kernel_size=3,
                                        padding=1,
                                        bias=self.bias,
                                        quantize_bit=quantize_bit),
                 nn.BatchNorm2d(x),
                 nn.ReLU(),
                 nn.Dropout2d(0.2)
             ]
             if self.quantize_factor != -1:
                 layers += [catSNN.Quantize(self.quantize_factor)]
             in_channels = x
     layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
     return nn.Sequential(*layers)
Exemple #3
0
 def _make_layers(self, cfg, quantize_bit=32):
     layers = []
     in_channels = 3
     for x in cfg:
         # [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']
         if type(x) is tuple:
             layers += [
                 nn.Conv2d(x[0],
                           x[1],
                           kernel_size=2,
                           stride=2,
                           bias=self.bias,
                           groups=x[1]),
                 catSNN.Clamp(max=self.clamp_max)
             ]
             #layers += [nn.Conv2d(x[0], x[1], kernel_size=2, stride = 2, bias=self.bias, groups = x[1]),catSNN.Clamp(max = self.clamp_max),nn.Dropout2d(0.2)]
         else:
             layers += [
                 catSNN.QuantizedConv2d(in_channels,
                                        x,
                                        kernel_size=3,
                                        padding=1,
                                        bias=self.bias,
                                        quantize_bit=quantize_bit),
                 catSNN.Clamp(max=self.clamp_max)
             ]
             #layers += [catSNN.QuantizedConv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias, quantize_bit=quantize_bit),catSNN.Clamp(max = self.clamp_max),nn.Dropout2d(0.2)]
             if self.quantize_factor != -1:
                 layers += [catSNN.Quantize(self.quantize_factor)]
             in_channels = x
     layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
     return nn.Sequential(*layers)
Exemple #4
0
 def _make_layers(self, cfg, quantize_bit=32):
     layers = []
     in_channels = 3
     for x in cfg:
         # [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']
         if x == 'M':
             layers += [nn.AvgPool2d(kernel_size=2, stride=2)]
         else:
             layers += [catSNN.QuantizedConv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias, quantize_bit=quantize_bit),
                        catSNN.Clamp(max = self.clamp_max)]#catSNN.Clamp(max = self.clamp_max)
             if self.quantize_factor!=-1:
                 layers += [catSNN.Quantize(self.quantize_factor)]
             in_channels = x
     layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
     return nn.Sequential(*layers)