def __init__( self, num_classes=None, # pylint: disable=unused-argument num_channels=3, dimensions=(160, 120), # pylint: disable=unused-argument bias=True, **kwargs ): super().__init__() self.conv1 = ai8x.FusedConv2dReLU(num_channels, 16, 3, padding=1, bias=False, **kwargs) self.conv2 = ai8x.FusedMaxPoolConv2dReLU(16, 32, 3, pool_size=2, pool_stride=2, padding=1, bias=False, **kwargs) self.conv3 = ai8x.FusedMaxPoolConv2dReLU(32, 32, 3, pool_size=2, pool_stride=2, padding=1, bias=bias, **kwargs) self.conv4 = ai8x.FusedMaxPoolConv2dReLU(32, 64, 3, pool_size=2, pool_stride=2, padding=1, bias=bias, **kwargs) self.conv5 = ai8x.FusedMaxPoolConv2dReLU(64, 64, 3, pool_size=2, pool_stride=2, padding=1, bias=bias, **kwargs) self.conv6 = ai8x.FusedConv2dReLU(64, 64, 3, padding=1, bias=bias, **kwargs) self.conv7 = ai8x.FusedConv2dReLU(64, 64, 3, padding=1, bias=bias, **kwargs) self.conv8 = ai8x.FusedMaxPoolConv2d(64, 512, 1, pool_size=2, pool_stride=2, padding=0, bias=False, **kwargs) self.avgpool = ai8x.AvgPool2d((5, 3))
def __init__( self, num_classes=100, num_channels=3, dimensions=(32, 32), # pylint: disable=unused-argument bias=False, **kwargs): super().__init__() # Stem Layer self.conv_stem = ai8x.FusedMaxPoolConv2dBNReLU(num_channels, 32, 3, pool_size=2, pool_stride=2, stride=1, batchnorm='Affine', padding=1, bias=bias, eps=1e-03, momentum=0.01, **kwargs) # Series of MBConv blocks self.mb_conv1 = ai8x_blocks.MBConvBlock(32, 16, 3, bias=bias, se_ratio=None, expand_ratio=1, fused=True, **kwargs) self.mb_conv2 = ai8x_blocks.MBConvBlock(16, 32, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv3 = ai8x_blocks.MBConvBlock(32, 32, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv4 = ai8x_blocks.MBConvBlock(32, 48, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv5 = ai8x_blocks.MBConvBlock(48, 48, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv6 = ai8x_blocks.MBConvBlock(48, 96, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) self.mb_conv7 = ai8x_blocks.MBConvBlock(96, 96, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) self.mb_conv8 = ai8x_blocks.MBConvBlock(96, 128, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) self.mb_conv9 = ai8x_blocks.MBConvBlock(128, 128, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) # Head Layer self.conv_head = ai8x.FusedConv2dBNReLU(128, 1024, 1, stride=1, batchnorm='Affine', padding=0, bias=bias, eps=1e-03, momentum=0.01, **kwargs) # Final linear layer self.avg_pooling = ai8x.AvgPool2d((16, 16)) # self.dropout = nn.Dropout(0.2) self.fc = ai8x.Linear(1024, num_classes, bias=bias, wide=True, **kwargs)