def __init__( self, num_classes=4, num_channels=3, dimensions=(128, 128), # pylint: disable=unused-argument bias=True, **kwargs ): super().__init__() self.enc1 = ai8x.FusedConv2dBNReLU(num_channels, 4, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.enc2 = ai8x.FusedMaxPoolConv2dBNReLU(4, 8, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.enc3 = ai8x.FusedMaxPoolConv2dBNReLU(8, 32, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.bneck = ai8x.FusedMaxPoolConv2dBNReLU(32, 64, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.upconv3 = ai8x.ConvTranspose2d(64, 32, 3, stride=2, padding=1) self.dec3 = ai8x.FusedConv2dBNReLU(64, 32, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.upconv2 = ai8x.ConvTranspose2d(32, 8, 3, stride=2, padding=1) self.dec2 = ai8x.FusedConv2dBNReLU(16, 8, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.upconv1 = ai8x.ConvTranspose2d(8, 4, 3, stride=2, padding=1) self.dec1 = ai8x.FusedConv2dBNReLU(8, 16, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv = ai8x.FusedConv2dBN(16, num_classes, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs)
def __init__( self, num_classes=4, num_channels=48, dimensions=(88, 88), # pylint: disable=unused-argument bias=True, fold_ratio=4, **kwargs ): super().__init__() self.fold_ratio = fold_ratio self.num_classes = num_classes self.num_final_channels = num_classes * fold_ratio * fold_ratio self.prep0 = ai8x.FusedConv2dBNReLU(num_channels, 64, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.prep1 = ai8x.FusedConv2dBNReLU(64, 64, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.prep2 = ai8x.FusedConv2dBNReLU(64, 32, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.enc1 = ai8x.FusedConv2dBNReLU(32, 8, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.enc2 = ai8x.FusedMaxPoolConv2dBNReLU(8, 28, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.enc3 = ai8x.FusedMaxPoolConv2dBNReLU(28, 56, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.bneck = ai8x.FusedMaxPoolConv2dBNReLU(56, 112, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.upconv3 = ai8x.ConvTranspose2d(112, 56, 3, stride=2, padding=1) self.dec3 = ai8x.FusedConv2dBNReLU(112, 56, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.upconv2 = ai8x.ConvTranspose2d(56, 28, 3, stride=2, padding=1) self.dec2 = ai8x.FusedConv2dBNReLU(56, 28, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.upconv1 = ai8x.ConvTranspose2d(28, 8, 3, stride=2, padding=1) self.dec1 = ai8x.FusedConv2dBNReLU(16, 48, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.dec0 = ai8x.FusedConv2dBNReLU(48, 64, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv_p1 = ai8x.FusedConv2dBNReLU(64, 64, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.conv_p2 = ai8x.FusedConv2dBNReLU(64, 64, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.conv_p3 = ai8x.FusedConv2dBN(64, 64, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.conv = ai8x.FusedConv2dBN(64, self.num_final_channels, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs)
def __init__( self, num_classes=100, num_channels=3, dimensions=(32, 32), # pylint: disable=unused-argument bias=False, **kwargs ): super().__init__() self.conv1 = ai8x.FusedConv2dBNReLU(num_channels, 24, 3, stride=1, padding=1, bias=bias, **kwargs) self.conv2 = ai8x.FusedConv2dBNReLU(24, 32, 3, stride=1, padding=1, bias=bias, **kwargs) self.conv3 = ai8x.FusedConv2dBNReLU(32, 32, 3, stride=1, padding=1, bias=bias, **kwargs) self.conv4 = ai8x.FusedConv2dBNReLU(32, 32, 3, stride=1, padding=1, bias=bias, **kwargs) self.conv5 = ai8x.FusedMaxPoolConv2dBNReLU(32, 32, 3, pool_size=2, pool_stride=2, stride=1, padding=1, bias=bias, **kwargs) self.conv6 = ai8x.FusedConv2dBNReLU(32, 32, 3, stride=1, padding=1, bias=bias, **kwargs) self.conv7 = ai8x.FusedConv2dBNReLU(32, 64, 3, stride=1, padding=1, bias=bias, **kwargs) self.conv8 = ai8x.FusedMaxPoolConv2dBNReLU(64, 64, 3, pool_size=2, pool_stride=2, stride=1, padding=1, bias=bias, **kwargs) self.conv9 = ai8x.FusedConv2dBNReLU(64, 64, 3, stride=1, padding=1, bias=bias, **kwargs) self.conv10 = ai8x.FusedMaxPoolConv2dBNReLU(64, 128, 3, pool_size=2, pool_stride=2, stride=1, padding=1, bias=bias, **kwargs) self.conv11 = ai8x.FusedMaxPoolConv2dBNReLU(128, 512, 1, pool_size=2, pool_stride=2, padding=0, bias=bias, **kwargs) self.conv12 = ai8x.FusedConv2dBNReLU(512, 192, 1, stride=1, padding=0, bias=bias, **kwargs) self.conv13 = ai8x.FusedMaxPoolConv2dBNReLU(192, 192, 3, pool_size=2, pool_stride=2, stride=1, padding=1, bias=bias, **kwargs) self.conv14 = ai8x.Conv2d(192, num_classes, 1, stride=1, padding=0, bias=bias, wide=True, **kwargs)
def __init__( self, num_classes=10, num_channels=3, dimensions=(32, 32), # pylint: disable=unused-argument bias=False, **kwargs ): super().__init__() self.conv1_1 = ai8x.FusedConv2dBNReLU(num_channels, 64, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv1_2 = ai8x.FusedConv2dBNReLU(64, 32, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.conv1_3 = ai8x.FusedConv2dBNReLU(32, 64, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv2_1 = ai8x.FusedMaxPoolConv2dBNReLU(64, 32, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv2_2 = ai8x.FusedConv2dBNReLU(32, 64, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.conv3_1 = ai8x.FusedMaxPoolConv2dBNReLU(64, 128, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv3_2 = ai8x.FusedConv2dBNReLU(128, 128, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.conv4_1 = ai8x.FusedMaxPoolConv2dBNReLU(128, 64, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv4_2 = ai8x.FusedConv2dBNReLU(64, 128, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv5_1 = ai8x.FusedMaxPoolConv2dBNReLU(128, 128, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.fc = ai8x.Linear(512, num_classes, bias=bias, wide=True, **kwargs)
def __init__( # pylint: disable=too-many-arguments self, pre_layer_stride, bottleneck_settings, last_layer_width, avg_pool_size=4, num_classes=100, num_channels=3, dimensions=(32, 32), # pylint: disable=unused-argument bias=False, depthwise_bias=False, **kwargs): super().__init__() self.pre_stage = ai8x.FusedConv2dBNReLU(num_channels, bottleneck_settings[0][1], 3, padding=1, stride=pre_layer_stride, bias=bias, **kwargs) self.feature_stage = nn.ModuleList([]) for setting in bottleneck_settings: self._create_bottleneck_stage(setting, bias, depthwise_bias, **kwargs) self.post_stage = ai8x.FusedConv2dReLU(bottleneck_settings[-1][2], last_layer_width, 1, padding=0, stride=1, bias=False, **kwargs) self.classifier = ai8x.FusedAvgPoolConv2d(last_layer_width, num_classes, 1, padding=0, stride=1, pool_size=avg_pool_size, pool_stride=avg_pool_size, bias=False, wide=True, **kwargs)
def __init__(self, in_channels, out_channels, expansion_factor, stride=1, bias=False, depthwise_bias=False, **kwargs): super().__init__() self.stride = stride hidden_channels = int(round(in_channels * expansion_factor)) if hidden_channels == in_channels: self.conv1 = ai8x.Empty() else: self.conv1 = ai8x.FusedConv2dBNReLU(in_channels, hidden_channels, 1, padding=0, bias=bias, **kwargs) if stride == 1: if depthwise_bias: self.conv2 = ai8x.FusedDepthwiseConv2dBNReLU( hidden_channels, hidden_channels, 3, padding=1, stride=stride, bias=depthwise_bias, **kwargs) else: self.conv2 = ai8x.FusedDepthwiseConv2dReLU(hidden_channels, hidden_channels, 3, padding=1, stride=stride, bias=depthwise_bias, **kwargs) else: if depthwise_bias: self.conv2 = ai8x.FusedMaxPoolDepthwiseConv2dBNReLU( hidden_channels, hidden_channels, 3, padding=1, pool_size=stride, pool_stride=stride, bias=depthwise_bias, **kwargs) else: self.conv2 = ai8x.FusedMaxPoolDepthwiseConv2dReLU( hidden_channels, hidden_channels, 3, padding=1, pool_size=stride, pool_stride=stride, bias=depthwise_bias, **kwargs) self.conv3 = ai8x.FusedConv2dBN(hidden_channels, out_channels, 1, bias=bias, **kwargs) if (stride == 1) and (in_channels == out_channels): self.resid = ai8x.Add() else: self.resid = self.NoResidual()
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, bias=False, se_ratio=None, expand_ratio=1, fused=False, **kwargs): super().__init__() self.has_se = (se_ratio is not None) and (0 < se_ratio <= 1) self.in_channels = in_channels self.out_channels = out_channels self.stride = stride self.expand_ratio = expand_ratio self.fused = fused # Expansion phase (Inverted Bottleneck) inp = in_channels # number of input channels out = in_channels * expand_ratio # number of output channels if expand_ratio != 1: if fused is True: self.expand_conv = ai8x.FusedConv2dBNReLU( inp, out, kernel_size=kernel_size, padding=1, batchnorm='Affine', bias=bias, eps=1e-03, momentum=0.01, **kwargs) else: self.expand_conv = ai8x.FusedConv2dBNReLU(inp, out, 1, batchnorm='Affine', bias=bias, eps=1e-03, momentum=0.01, **kwargs) # Depthwise Convolution phase if fused is not True: self.depthwise_conv = ai8x.FusedConv2dBNReLU( in_channels=out, out_channels=out, groups=out, # groups makes it depthwise padding=1, kernel_size=kernel_size, stride=stride, batchnorm='Affine', bias=bias, eps=1e-03, momentum=0.01, **kwargs) # Squeeze and Excitation phase if self.has_se: num_squeezed_channels = max(1, int(in_channels * se_ratio)) self.se_reduce = ai8x.FusedConv2dReLU( in_channels=out, out_channels=num_squeezed_channels, kernel_size=1, stride=1, bias=bias, **kwargs) self.se_expand = ai8x.Conv2d(in_channels=num_squeezed_channels, out_channels=out, kernel_size=1, stride=1, bias=bias, **kwargs) # Output Convolution phase final_out = out_channels self.project_conv = ai8x.FusedConv2dBN(in_channels=out, out_channels=final_out, kernel_size=1, batchnorm='Affine', bias=bias, eps=1e-03, momentum=0.01, **kwargs) # Skip connection self.resid = ai8x.Add()
def __init__( self, num_classes=100, num_channels=3, dimensions=(32, 32), # pylint: disable=unused-argument bias=False, **kwargs): super().__init__() # Stem Layer self.conv_stem = ai8x.FusedMaxPoolConv2dBNReLU(num_channels, 32, 3, pool_size=2, pool_stride=2, stride=1, batchnorm='Affine', padding=1, bias=bias, eps=1e-03, momentum=0.01, **kwargs) # Series of MBConv blocks self.mb_conv1 = ai8x_blocks.MBConvBlock(32, 16, 3, bias=bias, se_ratio=None, expand_ratio=1, fused=True, **kwargs) self.mb_conv2 = ai8x_blocks.MBConvBlock(16, 32, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv3 = ai8x_blocks.MBConvBlock(32, 32, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv4 = ai8x_blocks.MBConvBlock(32, 48, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv5 = ai8x_blocks.MBConvBlock(48, 48, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv6 = ai8x_blocks.MBConvBlock(48, 96, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) self.mb_conv7 = ai8x_blocks.MBConvBlock(96, 96, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) self.mb_conv8 = ai8x_blocks.MBConvBlock(96, 128, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) self.mb_conv9 = ai8x_blocks.MBConvBlock(128, 128, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) # Head Layer self.conv_head = ai8x.FusedConv2dBNReLU(128, 1024, 1, stride=1, batchnorm='Affine', padding=0, bias=bias, eps=1e-03, momentum=0.01, **kwargs) # Final linear layer self.avg_pooling = ai8x.AvgPool2d((16, 16)) # self.dropout = nn.Dropout(0.2) self.fc = ai8x.Linear(1024, num_classes, bias=bias, wide=True, **kwargs)