def _make_layers(self, cfg, quantize_bit=32): layers = [] in_channels = 3 for x in cfg: # [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'] if x == 'M': #Train the model with dropout #layers += [nn.AvgPool2d(kernel_size=2, stride=2)] layers += [ nn.AvgPool2d(kernel_size=2, stride=2), nn.Dropout2d(0.3) ] else: # ReLU()-->Clamp()-->Clamp_q-->fuse bn and dropout #layers += [catSNN.QuantizedConv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias, quantize_bit=quantize_bit), #catSNN.Clamp(max = self.clamp_max)] #layers += [catSNN.QuantizedConv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias, quantize_bit=quantize_bit),nn.BatchNorm2d(x), #catSNN.Clamp(max = self.clamp_max),nn.Dropout2d(0.3)] layers += [ catSNN.QuantizedConv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias, quantize_bit=quantize_bit), nn.BatchNorm2d(x), nn.ReLU(), nn.Dropout2d(0.2) ] if self.quantize_factor != -1: layers += [catSNN.Quantize(self.quantize_factor)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers)
def _make_layers(self, cfg, quantize_bit=32): layers = [] in_channels = 3 for x in cfg: # [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'] if type(x) is tuple: layers += [ nn.Conv2d(x[0], x[1], kernel_size=2, stride=2, bias=self.bias, groups=x[1]), catSNN.Clamp(max=self.clamp_max) ] #layers += [nn.Conv2d(x[0], x[1], kernel_size=2, stride = 2, bias=self.bias, groups = x[1]),catSNN.Clamp(max = self.clamp_max),nn.Dropout2d(0.2)] else: layers += [ catSNN.QuantizedConv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias, quantize_bit=quantize_bit), catSNN.Clamp(max=self.clamp_max) ] #layers += [catSNN.QuantizedConv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias, quantize_bit=quantize_bit),catSNN.Clamp(max = self.clamp_max),nn.Dropout2d(0.2)] if self.quantize_factor != -1: layers += [catSNN.Quantize(self.quantize_factor)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers)
def _make_layers(self, cfg, quantize_bit=32): layers = [] in_channels = 3 for x in cfg: # [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'] if x == 'M': layers += [nn.AvgPool2d(kernel_size=2, stride=2)] else: layers += [catSNN.QuantizedConv2d(in_channels, x, kernel_size=3, padding=1, bias=self.bias, quantize_bit=quantize_bit), catSNN.Clamp(max = self.clamp_max)]#catSNN.Clamp(max = self.clamp_max) if self.quantize_factor!=-1: layers += [catSNN.Quantize(self.quantize_factor)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers)