def __init__( self, num_classes=21, num_channels=128, dimensions=(128, 1), # pylint: disable=unused-argument bias=False, **kwargs ): super().__init__() # T: 128 F :128 self.conv1 = ai8x.FusedConv1dBNReLU(num_channels, 100, 1, stride=1, padding=0, bias=bias, batchnorm='Affine', **kwargs) # T: 128 F: 100 self.conv2 = ai8x.FusedConv1dBNReLU(100, 48, 3, stride=1, padding=0, bias=bias, batchnorm='Affine', **kwargs) # T: 126 F : 48 self.conv3 = ai8x.FusedMaxPoolConv1dBNReLU(48, 96, 3, stride=1, padding=1, bias=bias, batchnorm='Affine', **kwargs) # T: 62 F : 96 self.conv4 = ai8x.FusedConv1dBNReLU(96, 128, 3, stride=1, padding=0, bias=bias, batchnorm='Affine', **kwargs) # T : 60 F : 128 self.conv5 = ai8x.FusedMaxPoolConv1dBNReLU(128, 160, 3, stride=1, padding=1, bias=bias, batchnorm='Affine', **kwargs) # T: 30 F : 160 self.conv6 = ai8x.FusedConv1dBNReLU(160, 192, 3, stride=1, padding=0, bias=bias, batchnorm='Affine', **kwargs) # T: 28 F : 192 self.conv7 = ai8x.FusedAvgPoolConv1dBNReLU(192, 192, 3, stride=1, padding=1, bias=bias, batchnorm='Affine', **kwargs) # T : 14 F: 256 self.conv8 = ai8x.FusedConv1dBNReLU(192, 32, 3, stride=1, padding=0, bias=bias, batchnorm='Affine', **kwargs) # T: 12 F : 32 self.fc = ai8x.Linear(32 * 12, num_classes, bias=bias, wide=True, **kwargs)
def __init__(self, num_classes=2, num_channels=1, dimensions=(80, 80), planes=8, pool=2, fc_inputs=2, bias=False, **kwargs): super().__init__() # load the pretrained model self.feature_extractor = MiniVggNet(**kwargs) model, compression_scheduler, optimizer, start_epoch = apputils.load_checkpoint( self.feature_extractor, "../ai8x-synthesis/trained/mini_vgg_net.pth.tar") self.feature_extractor = model # freeze the weights for param in self.feature_extractor.parameters(): param.requires_grad = False # retrain the last layer to detect a bounding box self.fc3 = ai8x.Linear(64 * 3 * 3, 4, bias=False, wide=True, **kwargs) # add a fully connected layer for bounding box detection after the conv10 for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight)
def __init__(self, num_classes=10, num_channels=3, dimensions=(28, 28), fc_inputs=8, bias=False): super().__init__() # AI84 Limits assert dimensions[0] == dimensions[1] # Only square supported # Keep track of image dimensions so one constructor works for all image sizes dim = dimensions[0] self.conv1 = ai8x.FusedConv2dReLU(num_channels, 8, 3, padding=1, bias=bias) # padding 1 -> no change in dimensions -> 8x28x28 pad = 2 if dim == 28 else 1 self.conv2 = ai8x.FusedMaxPoolConv2dReLU(8, 8, 3, pool_size=2, pool_stride=2, padding=pad, bias=bias) dim //= 2 # pooling, padding 0 -> 8x14x14 if pad == 2: dim += 2 # padding 2 -> 8x16x16 self.conv3 = ai8x.FusedMaxPoolConv2dReLU(8, fc_inputs, 3, pool_size=4, pool_stride=4, padding=1, bias=bias) dim //= 4 # pooling, padding 0 -> 8x4x4 # padding 1 -> 8x4x4 self.fc = ai8x.Linear(fc_inputs*dim*dim, num_classes, bias=True, wide=True) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def __init__( self, num_classes=10, num_channels=3, dimensions=(32, 32), # pylint: disable=unused-argument bias=False, **kwargs ): super().__init__() self.conv1_1 = ai8x.FusedConv2dBNReLU(num_channels, 64, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv1_2 = ai8x.FusedConv2dBNReLU(64, 32, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.conv1_3 = ai8x.FusedConv2dBNReLU(32, 64, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv2_1 = ai8x.FusedMaxPoolConv2dBNReLU(64, 32, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv2_2 = ai8x.FusedConv2dBNReLU(32, 64, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.conv3_1 = ai8x.FusedMaxPoolConv2dBNReLU(64, 128, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv3_2 = ai8x.FusedConv2dBNReLU(128, 128, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.conv4_1 = ai8x.FusedMaxPoolConv2dBNReLU(128, 64, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv4_2 = ai8x.FusedConv2dBNReLU(64, 128, 3, stride=1, padding=1, bias=bias, batchnorm='NoAffine', **kwargs) self.conv5_1 = ai8x.FusedMaxPoolConv2dBNReLU(128, 128, 1, stride=1, padding=0, bias=bias, batchnorm='NoAffine', **kwargs) self.fc = ai8x.Linear(512, num_classes, bias=bias, wide=True, **kwargs)
def __init__(self, num_classes=2, num_channels=1, dimensions=(22, 1), fc_inputs=16, bias=False): super().__init__() dim1 = dimensions[0] self.mfcc_conv1 = ai8x.FusedConv1dReLU(num_channels, 64, 5, stride=1, padding=2, bias=bias) self.dropout1 = nn.Dropout(0.2) self.mfcc_conv2 = ai8x.FusedConv1dReLU(64, 32, 5, stride=1, padding=2, bias=bias) self.mfcc_conv4 = ai8x.FusedConv1dReLU(32, fc_inputs, 5, stride=1, padding=2, bias=bias) self.fc = ai8x.Linear(fc_inputs * dim1, num_classes, bias=bias) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def __init__( self, num_classes=21, num_channels=128, dimensions=(128, 1), # pylint: disable=unused-argument fc_inputs=7, bias=False, **kwargs ): super().__init__() self.voice_conv1 = ai8x.FusedConv1dReLU(num_channels, 100, 1, stride=1, padding=0, bias=bias, **kwargs) self.voice_conv2 = ai8x.FusedConv1dReLU(100, 100, 1, stride=1, padding=0, bias=bias, **kwargs) self.voice_conv3 = ai8x.FusedConv1dReLU(100, 50, 1, stride=1, padding=0, bias=bias, **kwargs) self.voice_conv4 = ai8x.FusedConv1dReLU(50, 16, 1, stride=1, padding=0, bias=bias, **kwargs) self.kws_conv1 = ai8x.FusedConv2dReLU(16, 32, 3, stride=1, padding=1, bias=bias, **kwargs) self.kws_conv2 = ai8x.FusedConv2dReLU(32, 64, 3, stride=1, padding=1, bias=bias, **kwargs) self.kws_conv3 = ai8x.FusedConv2dReLU(64, 64, 3, stride=1, padding=1, bias=bias, **kwargs) self.kws_conv4 = ai8x.FusedConv2dReLU(64, 30, 3, stride=1, padding=1, bias=bias, **kwargs) self.kws_conv5 = ai8x.FusedConv2dReLU(30, fc_inputs, 3, stride=1, padding=1, bias=bias, **kwargs) self.fc = ai8x.Linear(fc_inputs * 128, num_classes, bias=bias, wide=True, **kwargs) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def __init__(self, num_classes, num_channels, dimensions, bias, n_units, depth_list, width_list, kernel_list, bn, unit, **kwargs): super().__init__() self.num_classes = num_classes self.num_channels = num_channels self.dimensions = dimensions self.bias = bias self.n_units = n_units self.depth_list = depth_list self.width_list = width_list self.kernel_list = kernel_list self.bn = bn self.unit = unit self.units = nn.ModuleList([]) inp_2d = True if len(dimensions) == 1 or dimensions[1] == 1: inp_2d = False dim1 = dimensions[0] dim2 = dimensions[1] if inp_2d else 1 last_width = num_channels for i in range(n_units): if i == 0: pooling = False else: pooling = True dim1 = dim1 // 2 dim2 = (dim2 // 2) if inp_2d else 1 self.units.append( unit(depth_list[i], kernel_list[i], width_list[i], last_width, bias, pooling, bn, **kwargs)) last_width = width_list[i] self.classifier = ai8x.Linear(dim1 * dim2 * last_width, num_classes, bias=bias, wide=True, **kwargs)
def __init__(self, num_classes=2, num_channels=3, dimensions=(64, 64), fc_inputs=30, bias=False, **kwargs): super().__init__() # AI85 Limits assert dimensions[0] == dimensions[1] # Only square supported # Keep track of image dimensions so one constructor works for all image sizes dim = dimensions[0] self.conv1 = ai8x.FusedConv2dReLU(num_channels, 15, 3, padding=1, bias=bias, **kwargs) # padding 1 -> no change in dimensions -> 15x64x64 pad = 2 if dim == 28 else 1 self.conv2 = ai8x.FusedMaxPoolConv2dReLU(15, 30, 3, pool_size=2, pool_stride=2, padding=pad, bias=bias, **kwargs) dim //= 2 # pooling, padding 0 -> 30x32x32 if pad == 2: dim += 2 # padding 2 -> 30x16x16 self.conv3 = ai8x.FusedMaxPoolConv2dReLU(30, 60, 3, pool_size=2, pool_stride=2, padding=1, bias=bias, **kwargs) dim //= 2 # pooling, padding 0 -> 60x16x16 self.conv4 = ai8x.FusedMaxPoolConv2dReLU(60, 30, 3, pool_size=2, pool_stride=2, padding=1, bias=bias, **kwargs) dim //= 2 # pooling, padding 0 -> 30x8x8 self.conv5 = ai8x.FusedMaxPoolConv2dReLU(30, 30, 3, pool_size=2, pool_stride=2, padding=1, bias=bias, **kwargs) dim //= 2 # pooling, padding 0 -> 30x4x4 self.conv6 = ai8x.FusedConv2dReLU(30, fc_inputs, 3, padding=1, bias=bias, **kwargs) self.fc = ai8x.Linear(fc_inputs*dim*dim, num_classes, bias=True, **kwargs) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def __init__(self, num_classes=10, num_channels=3, dimensions=(28, 28), planes=60, pool=2, fc_inputs=12, bias=False): super().__init__() # Limits assert planes + num_channels <= ai8x.dev.WEIGHT_INPUTS assert planes + fc_inputs <= ai8x.dev.WEIGHT_DEPTH-1 # Keep track of image dimensions so one constructor works for all image sizes dim = dimensions[0] self.conv1 = ai8x.FusedConv2dReLU(num_channels, planes, 3, padding=1, bias=bias) # padding 1 -> no change in dimensions -> MNIST: 28x28 | CIFAR: 32x32 pad = 2 if dim == 28 else 1 self.conv2 = ai8x.FusedMaxPoolConv2dReLU(planes, planes, 3, pool_size=2, pool_stride=2, padding=pad, bias=bias) dim //= 2 # pooling, padding 0 -> MNIST: 14x14 | CIFAR: 16x16 if pad == 2: dim += 2 # MNIST: padding 2 -> 16x16 | CIFAR: padding 1 -> 16x16 self.conv3 = ai8x.FusedMaxPoolConv2dReLU(planes, 128-planes-fc_inputs, 3, pool_size=2, pool_stride=2, padding=1, bias=bias) dim //= 2 # pooling, padding 0 -> 8x8 # padding 1 -> no change in dimensions self.conv4 = ai8x.FusedAvgPoolConv2dReLU(128-planes-fc_inputs, fc_inputs, 3, pool_size=pool, pool_stride=2, padding=1, bias=bias) dim //= pool # pooling, padding 0 -> 4x4 # padding 1 -> no change in dimensions self.fc = ai8x.Linear(fc_inputs*dim*dim, num_classes, bias=True, wide=True) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def __init__( self, num_classes=21, num_channels=128, dimensions=(128, 1), # pylint: disable=unused-argument bias=False, **kwargs): super().__init__() self.drop = nn.Dropout(p=0.2) # Time: 128 Feature :128 self.voice_conv1 = ai8x.FusedConv1dReLU(num_channels, 100, 1, stride=1, padding=0, bias=bias, **kwargs) # T: 128 F: 100 self.voice_conv2 = ai8x.FusedConv1dReLU(100, 96, 3, stride=1, padding=0, bias=bias, **kwargs) # T: 126 F : 96 self.voice_conv3 = ai8x.FusedMaxPoolConv1dReLU(96, 64, 3, stride=1, padding=1, bias=bias, **kwargs) # T: 62 F : 64 self.voice_conv4 = ai8x.FusedConv1dReLU(64, 48, 3, stride=1, padding=0, bias=bias, **kwargs) # T : 60 F : 48 self.kws_conv1 = ai8x.FusedMaxPoolConv1dReLU(48, 64, 3, stride=1, padding=1, bias=bias, **kwargs) # T: 30 F : 64 self.kws_conv2 = ai8x.FusedConv1dReLU(64, 96, 3, stride=1, padding=0, bias=bias, **kwargs) # T: 28 F : 96 self.kws_conv3 = ai8x.FusedAvgPoolConv1dReLU(96, 100, 3, stride=1, padding=1, bias=bias, **kwargs) # T : 14 F: 100 self.kws_conv4 = ai8x.FusedMaxPoolConv1dReLU(100, 64, 6, stride=1, padding=1, bias=bias, **kwargs) # T : 2 F: 128 self.fc = ai8x.Linear(256, num_classes, bias=bias, wide=True, **kwargs)
def __init__( self, num_classes=100, num_channels=3, dimensions=(32, 32), # pylint: disable=unused-argument bias=False, **kwargs): super().__init__() # Stem Layer self.conv_stem = ai8x.FusedMaxPoolConv2dBNReLU(num_channels, 32, 3, pool_size=2, pool_stride=2, stride=1, batchnorm='Affine', padding=1, bias=bias, eps=1e-03, momentum=0.01, **kwargs) # Series of MBConv blocks self.mb_conv1 = ai8x_blocks.MBConvBlock(32, 16, 3, bias=bias, se_ratio=None, expand_ratio=1, fused=True, **kwargs) self.mb_conv2 = ai8x_blocks.MBConvBlock(16, 32, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv3 = ai8x_blocks.MBConvBlock(32, 32, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv4 = ai8x_blocks.MBConvBlock(32, 48, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv5 = ai8x_blocks.MBConvBlock(48, 48, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=True, **kwargs) self.mb_conv6 = ai8x_blocks.MBConvBlock(48, 96, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) self.mb_conv7 = ai8x_blocks.MBConvBlock(96, 96, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) self.mb_conv8 = ai8x_blocks.MBConvBlock(96, 128, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) self.mb_conv9 = ai8x_blocks.MBConvBlock(128, 128, 3, bias=bias, se_ratio=None, expand_ratio=4, fused=False, **kwargs) # Head Layer self.conv_head = ai8x.FusedConv2dBNReLU(128, 1024, 1, stride=1, batchnorm='Affine', padding=0, bias=bias, eps=1e-03, momentum=0.01, **kwargs) # Final linear layer self.avg_pooling = ai8x.AvgPool2d((16, 16)) # self.dropout = nn.Dropout(0.2) self.fc = ai8x.Linear(1024, num_classes, bias=bias, wide=True, **kwargs)
def __init__(self, num_classes=2, num_channels=1, dimensions=(80, 80), planes=8, pool=2, fc_inputs=2, bias=False, **kwargs): super().__init__() # Limits assert planes + num_channels <= ai8x.dev.WEIGHT_INPUTS assert planes + fc_inputs <= ai8x.dev.WEIGHT_DEPTH - 1 # 1x80x80 --> 8x80x80 (padding by 1 so same dimension) self.conv1 = ai8x.FusedConv2dReLU(1, 8, 3, padding=1, bias=False, **kwargs) self.conv2 = ai8x.FusedConv2dReLU(8, 8, 3, padding=1, bias=False, **kwargs) # 8x80x80 --> 16x40x40 (padding by 1 so same dimension) self.conv3 = ai8x.FusedMaxPoolConv2dReLU(8, 16, 3, padding=1, bias=False, pool_size=2, pool_stride=2, **kwargs) self.conv4 = ai8x.FusedConv2dReLU(16, 16, 3, padding=1, bias=False, **kwargs) # 16x40x40 --> 32x20x20 (padding by 1 so increase dimension) self.conv5 = ai8x.FusedMaxPoolConv2dReLU(16, 32, 3, padding=1, bias=False, pool_size=2, pool_stride=2, **kwargs) self.conv6 = ai8x.FusedConv2dReLU(32, 32, 3, padding=1, bias=False, **kwargs) # 32x20x20 --> 64x12x12 (padding by 2 so increase dimension) self.conv7 = ai8x.FusedMaxPoolConv2dReLU(32, 64, 3, padding=2, bias=False, pool_size=2, pool_stride=2, **kwargs) self.conv8 = ai8x.FusedConv2dReLU(64, 64, 3, padding=1, bias=False, **kwargs) # 64x12x12 --> 64x6x6 (padding by 1 so same dimension) self.conv9 = ai8x.FusedMaxPoolConv2dReLU(64, 64, 3, padding=1, bias=False, pool_size=2, pool_stride=2, **kwargs) # 64x6x6 --> 64x3x3 (passing by 1 so same dimension) self.conv10 = ai8x.FusedMaxPoolConv2dReLU(64, 64, 3, padding=1, bias=False, pool_size=2, pool_stride=2, **kwargs) # flatten to fully connected layer self.fc1 = ai8x.FusedLinearReLU(64 * 3 * 3, 10, bias=True, **kwargs) self.fc2 = ai8x.Linear(10, 2, bias=True, wide=True, **kwargs) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')