def __init__( self, num_classes=21, num_channels=128, dimensions=(128, 1), # pylint: disable=unused-argument bias=False, **kwargs ): super().__init__() # T: 128 F :128 self.conv1 = ai8x.FusedConv1dReLU(num_channels, 100, 1, stride=1, padding=0, bias=bias, **kwargs) # T: 128 F: 100 self.conv2 = ai8x.FusedConv1dReLU(100, 48, 3, stride=1, padding=0, bias=bias, **kwargs) # T: 126 F : 48 self.conv3 = ai8x.FusedMaxPoolConv1dReLU(48, 96, 3, stride=1, padding=1, bias=bias, **kwargs) # T: 62 F : 96 self.conv4 = ai8x.FusedConv1dReLU(96, 128, 3, stride=1, padding=0, bias=bias, **kwargs) # T : 60 F : 128 self.conv5 = ai8x.FusedMaxPoolConv1dReLU(128, 160, 3, stride=1, padding=1, bias=bias, **kwargs) # T: 30 F : 160 self.conv6 = ai8x.FusedConv1dReLU(160, 192, 3, stride=1, padding=0, bias=bias, **kwargs) # T: 28 F : 192 self.conv7 = ai8x.FusedAvgPoolConv1dReLU(192, 192, 3, stride=1, padding=1, bias=bias, **kwargs) # T : 14 F: 256 self.conv8 = ai8x.FusedConv1dReLU(192, 32, 3, stride=1, padding=0, bias=bias, **kwargs) # T: 12 F : 32 self.fc = ai8x.Linear(32 * 12, num_classes, bias=bias, wide=True, **kwargs)
def __init__(self, num_classes=2, num_channels=1, dimensions=(22, 1), fc_inputs=16, bias=False): super().__init__() dim1 = dimensions[0] self.mfcc_conv1 = ai8x.FusedConv1dReLU(num_channels, 64, 5, stride=1, padding=2, bias=bias) self.dropout1 = nn.Dropout(0.2) self.mfcc_conv2 = ai8x.FusedConv1dReLU(64, 32, 5, stride=1, padding=2, bias=bias) self.mfcc_conv4 = ai8x.FusedConv1dReLU(32, fc_inputs, 5, stride=1, padding=2, bias=bias) self.fc = ai8x.Linear(fc_inputs * dim1, num_classes, bias=bias) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def __init__( self, num_classes=21, num_channels=128, dimensions=(128, 1), # pylint: disable=unused-argument fc_inputs=7, bias=False, **kwargs ): super().__init__() self.voice_conv1 = ai8x.FusedConv1dReLU(num_channels, 100, 1, stride=1, padding=0, bias=bias, **kwargs) self.voice_conv2 = ai8x.FusedConv1dReLU(100, 100, 1, stride=1, padding=0, bias=bias, **kwargs) self.voice_conv3 = ai8x.FusedConv1dReLU(100, 50, 1, stride=1, padding=0, bias=bias, **kwargs) self.voice_conv4 = ai8x.FusedConv1dReLU(50, 16, 1, stride=1, padding=0, bias=bias, **kwargs) self.kws_conv1 = ai8x.FusedConv2dReLU(16, 32, 3, stride=1, padding=1, bias=bias, **kwargs) self.kws_conv2 = ai8x.FusedConv2dReLU(32, 64, 3, stride=1, padding=1, bias=bias, **kwargs) self.kws_conv3 = ai8x.FusedConv2dReLU(64, 64, 3, stride=1, padding=1, bias=bias, **kwargs) self.kws_conv4 = ai8x.FusedConv2dReLU(64, 30, 3, stride=1, padding=1, bias=bias, **kwargs) self.kws_conv5 = ai8x.FusedConv2dReLU(30, fc_inputs, 3, stride=1, padding=1, bias=bias, **kwargs) self.fc = ai8x.Linear(fc_inputs * 128, num_classes, bias=bias, wide=True, **kwargs) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def __init__( self, num_classes=21, num_channels=128, dimensions=(128, 1), # pylint: disable=unused-argument bias=False, **kwargs): super().__init__() self.drop = nn.Dropout(p=0.2) # Time: 128 Feature :128 self.voice_conv1 = ai8x.FusedConv1dReLU(num_channels, 100, 1, stride=1, padding=0, bias=bias, **kwargs) # T: 128 F: 100 self.voice_conv2 = ai8x.FusedConv1dReLU(100, 96, 3, stride=1, padding=0, bias=bias, **kwargs) # T: 126 F : 96 self.voice_conv3 = ai8x.FusedMaxPoolConv1dReLU(96, 64, 3, stride=1, padding=1, bias=bias, **kwargs) # T: 62 F : 64 self.voice_conv4 = ai8x.FusedConv1dReLU(64, 48, 3, stride=1, padding=0, bias=bias, **kwargs) # T : 60 F : 48 self.kws_conv1 = ai8x.FusedMaxPoolConv1dReLU(48, 64, 3, stride=1, padding=1, bias=bias, **kwargs) # T: 30 F : 64 self.kws_conv2 = ai8x.FusedConv1dReLU(64, 96, 3, stride=1, padding=0, bias=bias, **kwargs) # T: 28 F : 96 self.kws_conv3 = ai8x.FusedAvgPoolConv1dReLU(96, 100, 3, stride=1, padding=1, bias=bias, **kwargs) # T : 14 F: 100 self.kws_conv4 = ai8x.FusedMaxPoolConv1dReLU(100, 64, 6, stride=1, padding=1, bias=bias, **kwargs) # T : 2 F: 128 self.fc = ai8x.Linear(256, num_classes, bias=bias, wide=True, **kwargs)