Ejemplo n.º 1
0
    def __init__(
            self,
            num_classes=None,  # pylint: disable=unused-argument
            num_channels=3,
            dimensions=(160, 120),  # pylint: disable=unused-argument
            bias=True,
            **kwargs
    ):
        super().__init__()

        self.conv1 = ai8x.FusedConv2dReLU(num_channels, 16, 3, padding=1,
                                          bias=False, **kwargs)
        self.conv2 = ai8x.FusedMaxPoolConv2dReLU(16, 32, 3, pool_size=2, pool_stride=2,
                                                 padding=1, bias=False, **kwargs)
        self.conv3 = ai8x.FusedMaxPoolConv2dReLU(32, 32, 3, pool_size=2, pool_stride=2,
                                                 padding=1, bias=bias, **kwargs)
        self.conv4 = ai8x.FusedMaxPoolConv2dReLU(32, 64, 3, pool_size=2, pool_stride=2,
                                                 padding=1, bias=bias, **kwargs)
        self.conv5 = ai8x.FusedMaxPoolConv2dReLU(64, 64, 3, pool_size=2, pool_stride=2,
                                                 padding=1, bias=bias, **kwargs)
        self.conv6 = ai8x.FusedConv2dReLU(64, 64, 3, padding=1, bias=bias, **kwargs)
        self.conv7 = ai8x.FusedConv2dReLU(64, 64, 3, padding=1, bias=bias, **kwargs)
        self.conv8 = ai8x.FusedMaxPoolConv2d(64, 512, 1, pool_size=2, pool_stride=2,
                                             padding=0, bias=False, **kwargs)
        self.avgpool = ai8x.AvgPool2d((5, 3))
Ejemplo n.º 2
0
 def __init__(self,
              in_planes,
              squeeze_planes,
              expand1x1_planes,
              expand3x3_planes,
              bias=True,
              **kwargs):
     super().__init__()
     self.squeeze_layer = ai8x.FusedConv2dReLU(in_channels=in_planes,
                                               out_channels=squeeze_planes,
                                               kernel_size=1,
                                               bias=bias,
                                               **kwargs)
     self.expand1x1_layer = ai8x.FusedConv2dReLU(
         in_channels=squeeze_planes,
         out_channels=expand1x1_planes,
         kernel_size=1,
         bias=bias,
         **kwargs)
     self.expand3x3_layer = ai8x.FusedConv2dReLU(
         in_channels=squeeze_planes,
         out_channels=expand3x3_planes,
         kernel_size=3,
         padding=1,
         bias=bias,
         **kwargs)
Ejemplo n.º 3
0
    def __init__(
            self,
            num_classes=100,
            num_channels=3,
            dimensions=(32, 32),  # pylint: disable=unused-argument
            bias=False,
            **kwargs
    ):
        super().__init__()

        self.conv1 = ai8x.FusedConv2dReLU(num_channels, 16, 3, stride=1, padding=1, bias=bias,
                                          **kwargs)
        self.conv2 = ai8x.FusedConv2dReLU(16, 20, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv3 = ai8x.FusedConv2dReLU(20, 20, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv4 = ai8x.FusedConv2dReLU(20, 20, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv5 = ai8x.FusedMaxPoolConv2dReLU(20, 20, 3, pool_size=2, pool_stride=2,
                                                 stride=1, padding=1, bias=bias, **kwargs)
        self.conv6 = ai8x.FusedConv2dReLU(20, 20, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv7 = ai8x.FusedConv2dReLU(20, 44, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv8 = ai8x.FusedMaxPoolConv2dReLU(44, 48, 3, pool_size=2, pool_stride=2,
                                                 stride=1, padding=1, bias=bias, **kwargs)
        self.conv9 = ai8x.FusedConv2dReLU(48, 48, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv10 = ai8x.FusedMaxPoolConv2dReLU(48, 96, 3, pool_size=2, pool_stride=2,
                                                  stride=1, padding=1, bias=bias, **kwargs)
        self.conv11 = ai8x.FusedMaxPoolConv2dReLU(96, 512, 1, pool_size=2, pool_stride=2,
                                                  padding=0, bias=bias, **kwargs)
        self.conv12 = ai8x.FusedConv2dReLU(512, 128, 1, stride=1, padding=0, bias=bias, **kwargs)
        self.conv13 = ai8x.FusedMaxPoolConv2dReLU(128, 128, 3, pool_size=2, pool_stride=2,
                                                  stride=1, padding=1, bias=bias, **kwargs)
        self.conv14 = ai8x.Conv2d(128, num_classes, 1, stride=1, padding=0, bias=bias,
                                  wide=True, **kwargs)
Ejemplo n.º 4
0
    def __init__(self, num_classes=10, num_channels=3, dimensions=(28, 28),
                 fc_inputs=8, bias=False):
        super().__init__()

        # AI84 Limits
        assert dimensions[0] == dimensions[1]  # Only square supported

        # Keep track of image dimensions so one constructor works for all image sizes
        dim = dimensions[0]

        self.conv1 = ai8x.FusedConv2dReLU(num_channels, 8, 3,
                                          padding=1, bias=bias)
        # padding 1 -> no change in dimensions -> 8x28x28

        pad = 2 if dim == 28 else 1
        self.conv2 = ai8x.FusedMaxPoolConv2dReLU(8, 8, 3, pool_size=2, pool_stride=2,
                                                 padding=pad, bias=bias)
        dim //= 2  # pooling, padding 0 -> 8x14x14
        if pad == 2:
            dim += 2  # padding 2 -> 8x16x16

        self.conv3 = ai8x.FusedMaxPoolConv2dReLU(8, fc_inputs, 3,
                                                 pool_size=4, pool_stride=4, padding=1,
                                                 bias=bias)
        dim //= 4  # pooling, padding 0 -> 8x4x4
        # padding 1 -> 8x4x4

        self.fc = ai8x.Linear(fc_inputs*dim*dim, num_classes, bias=True, wide=True)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
Ejemplo n.º 5
0
    def __init__(
            self,
            num_classes=21,
            num_channels=128,
            dimensions=(128, 1),  # pylint: disable=unused-argument
            fc_inputs=7,
            bias=False,
            **kwargs
    ):
        super().__init__()

        self.voice_conv1 = ai8x.FusedConv1dReLU(num_channels, 100, 1, stride=1, padding=0,
                                                bias=bias, **kwargs)

        self.voice_conv2 = ai8x.FusedConv1dReLU(100, 100, 1, stride=1, padding=0,
                                                bias=bias, **kwargs)

        self.voice_conv3 = ai8x.FusedConv1dReLU(100, 50, 1, stride=1, padding=0,
                                                bias=bias, **kwargs)

        self.voice_conv4 = ai8x.FusedConv1dReLU(50, 16, 1, stride=1, padding=0,
                                                bias=bias, **kwargs)

        self.kws_conv1 = ai8x.FusedConv2dReLU(16, 32, 3, stride=1, padding=1,
                                              bias=bias, **kwargs)

        self.kws_conv2 = ai8x.FusedConv2dReLU(32, 64, 3, stride=1, padding=1,
                                              bias=bias, **kwargs)

        self.kws_conv3 = ai8x.FusedConv2dReLU(64, 64, 3, stride=1, padding=1,
                                              bias=bias, **kwargs)

        self.kws_conv4 = ai8x.FusedConv2dReLU(64, 30, 3, stride=1, padding=1,
                                              bias=bias, **kwargs)

        self.kws_conv5 = ai8x.FusedConv2dReLU(30, fc_inputs, 3, stride=1, padding=1,
                                              bias=bias, **kwargs)

        self.fc = ai8x.Linear(fc_inputs * 128, num_classes, bias=bias, wide=True, **kwargs)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
Ejemplo n.º 6
0
    def __init__(self, num_classes=2, num_channels=3, dimensions=(64, 64),
                 fc_inputs=30, bias=False, **kwargs):
        super().__init__()

        # AI85 Limits
        assert dimensions[0] == dimensions[1]  # Only square supported

        # Keep track of image dimensions so one constructor works for all image sizes
        dim = dimensions[0]

        self.conv1 = ai8x.FusedConv2dReLU(num_channels, 15, 3,
                                          padding=1, bias=bias, **kwargs)
        # padding 1 -> no change in dimensions -> 15x64x64

        pad = 2 if dim == 28 else 1
        self.conv2 = ai8x.FusedMaxPoolConv2dReLU(15, 30, 3, pool_size=2, pool_stride=2,
                                                 padding=pad, bias=bias, **kwargs)
        dim //= 2  # pooling, padding 0 -> 30x32x32
        if pad == 2:
            dim += 2  # padding 2 -> 30x16x16

        self.conv3 = ai8x.FusedMaxPoolConv2dReLU(30, 60, 3, pool_size=2, pool_stride=2, padding=1,
                                                 bias=bias, **kwargs)
        dim //= 2  # pooling, padding 0 -> 60x16x16

        self.conv4 = ai8x.FusedMaxPoolConv2dReLU(60, 30, 3, pool_size=2, pool_stride=2, padding=1,
                                                 bias=bias, **kwargs)
        dim //= 2  # pooling, padding 0 -> 30x8x8

        self.conv5 = ai8x.FusedMaxPoolConv2dReLU(30, 30, 3, pool_size=2, pool_stride=2, padding=1,
                                                 bias=bias, **kwargs)
        dim //= 2  # pooling, padding 0 -> 30x4x4

        self.conv6 = ai8x.FusedConv2dReLU(30, fc_inputs, 3, padding=1, bias=bias, **kwargs)

        self.fc = ai8x.Linear(fc_inputs*dim*dim, num_classes, bias=True, **kwargs)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
Ejemplo n.º 7
0
    def __init__(  # pylint: disable=too-many-arguments
            self,
            pre_layer_stride,
            bottleneck_settings,
            last_layer_width,
            avg_pool_size=4,
            num_classes=100,
            num_channels=3,
            dimensions=(32, 32),  # pylint: disable=unused-argument
            bias=False,
            depthwise_bias=False,
            **kwargs):
        super().__init__()

        self.pre_stage = ai8x.FusedConv2dBNReLU(num_channels,
                                                bottleneck_settings[0][1],
                                                3,
                                                padding=1,
                                                stride=pre_layer_stride,
                                                bias=bias,
                                                **kwargs)

        self.feature_stage = nn.ModuleList([])
        for setting in bottleneck_settings:
            self._create_bottleneck_stage(setting, bias, depthwise_bias,
                                          **kwargs)

        self.post_stage = ai8x.FusedConv2dReLU(bottleneck_settings[-1][2],
                                               last_layer_width,
                                               1,
                                               padding=0,
                                               stride=1,
                                               bias=False,
                                               **kwargs)

        self.classifier = ai8x.FusedAvgPoolConv2d(last_layer_width,
                                                  num_classes,
                                                  1,
                                                  padding=0,
                                                  stride=1,
                                                  pool_size=avg_pool_size,
                                                  pool_stride=avg_pool_size,
                                                  bias=False,
                                                  wide=True,
                                                  **kwargs)
Ejemplo n.º 8
0
    def __init__(self, num_classes=10, num_channels=3, dimensions=(28, 28),
                 planes=60, pool=2, fc_inputs=12, bias=False):
        super().__init__()

        # Limits
        assert planes + num_channels <= ai8x.dev.WEIGHT_INPUTS
        assert planes + fc_inputs <= ai8x.dev.WEIGHT_DEPTH-1

        # Keep track of image dimensions so one constructor works for all image sizes
        dim = dimensions[0]

        self.conv1 = ai8x.FusedConv2dReLU(num_channels, planes, 3,
                                          padding=1, bias=bias)
        # padding 1 -> no change in dimensions -> MNIST: 28x28 | CIFAR: 32x32

        pad = 2 if dim == 28 else 1
        self.conv2 = ai8x.FusedMaxPoolConv2dReLU(planes, planes, 3, pool_size=2, pool_stride=2,
                                                 padding=pad, bias=bias)
        dim //= 2  # pooling, padding 0 -> MNIST: 14x14 | CIFAR: 16x16
        if pad == 2:
            dim += 2  # MNIST: padding 2 -> 16x16 | CIFAR: padding 1 -> 16x16

        self.conv3 = ai8x.FusedMaxPoolConv2dReLU(planes, 128-planes-fc_inputs, 3,
                                                 pool_size=2, pool_stride=2, padding=1,
                                                 bias=bias)
        dim //= 2  # pooling, padding 0 -> 8x8
        # padding 1 -> no change in dimensions

        self.conv4 = ai8x.FusedAvgPoolConv2dReLU(128-planes-fc_inputs,
                                                 fc_inputs, 3,
                                                 pool_size=pool, pool_stride=2, padding=1,
                                                 bias=bias)
        dim //= pool  # pooling, padding 0 -> 4x4
        # padding 1 -> no change in dimensions

        self.fc = ai8x.Linear(fc_inputs*dim*dim, num_classes, bias=True, wide=True)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
Ejemplo n.º 9
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 bias=False,
                 se_ratio=None,
                 expand_ratio=1,
                 fused=False,
                 **kwargs):
        super().__init__()

        self.has_se = (se_ratio is not None) and (0 < se_ratio <= 1)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.fused = fused

        # Expansion phase (Inverted Bottleneck)
        inp = in_channels  # number of input channels
        out = in_channels * expand_ratio  # number of output channels
        if expand_ratio != 1:
            if fused is True:
                self.expand_conv = ai8x.FusedConv2dBNReLU(
                    inp,
                    out,
                    kernel_size=kernel_size,
                    padding=1,
                    batchnorm='Affine',
                    bias=bias,
                    eps=1e-03,
                    momentum=0.01,
                    **kwargs)
            else:
                self.expand_conv = ai8x.FusedConv2dBNReLU(inp,
                                                          out,
                                                          1,
                                                          batchnorm='Affine',
                                                          bias=bias,
                                                          eps=1e-03,
                                                          momentum=0.01,
                                                          **kwargs)
        # Depthwise Convolution phase
        if fused is not True:
            self.depthwise_conv = ai8x.FusedConv2dBNReLU(
                in_channels=out,
                out_channels=out,
                groups=out,  # groups makes it depthwise
                padding=1,
                kernel_size=kernel_size,
                stride=stride,
                batchnorm='Affine',
                bias=bias,
                eps=1e-03,
                momentum=0.01,
                **kwargs)
        # Squeeze and Excitation phase
        if self.has_se:
            num_squeezed_channels = max(1, int(in_channels * se_ratio))
            self.se_reduce = ai8x.FusedConv2dReLU(
                in_channels=out,
                out_channels=num_squeezed_channels,
                kernel_size=1,
                stride=1,
                bias=bias,
                **kwargs)
            self.se_expand = ai8x.Conv2d(in_channels=num_squeezed_channels,
                                         out_channels=out,
                                         kernel_size=1,
                                         stride=1,
                                         bias=bias,
                                         **kwargs)
        # Output Convolution phase
        final_out = out_channels
        self.project_conv = ai8x.FusedConv2dBN(in_channels=out,
                                               out_channels=final_out,
                                               kernel_size=1,
                                               batchnorm='Affine',
                                               bias=bias,
                                               eps=1e-03,
                                               momentum=0.01,
                                               **kwargs)
        # Skip connection
        self.resid = ai8x.Add()
Ejemplo n.º 10
0
    def __init__(self,
                 num_classes=2,
                 num_channels=1,
                 dimensions=(80, 80),
                 planes=8,
                 pool=2,
                 fc_inputs=2,
                 bias=False,
                 **kwargs):
        super().__init__()

        # Limits
        assert planes + num_channels <= ai8x.dev.WEIGHT_INPUTS
        assert planes + fc_inputs <= ai8x.dev.WEIGHT_DEPTH - 1

        # 1x80x80 --> 8x80x80 (padding by 1 so same dimension)
        self.conv1 = ai8x.FusedConv2dReLU(1,
                                          8,
                                          3,
                                          padding=1,
                                          bias=False,
                                          **kwargs)
        self.conv2 = ai8x.FusedConv2dReLU(8,
                                          8,
                                          3,
                                          padding=1,
                                          bias=False,
                                          **kwargs)

        # 8x80x80 --> 16x40x40 (padding by 1 so same dimension)
        self.conv3 = ai8x.FusedMaxPoolConv2dReLU(8,
                                                 16,
                                                 3,
                                                 padding=1,
                                                 bias=False,
                                                 pool_size=2,
                                                 pool_stride=2,
                                                 **kwargs)
        self.conv4 = ai8x.FusedConv2dReLU(16,
                                          16,
                                          3,
                                          padding=1,
                                          bias=False,
                                          **kwargs)

        # 16x40x40 --> 32x20x20 (padding by 1 so increase dimension)
        self.conv5 = ai8x.FusedMaxPoolConv2dReLU(16,
                                                 32,
                                                 3,
                                                 padding=1,
                                                 bias=False,
                                                 pool_size=2,
                                                 pool_stride=2,
                                                 **kwargs)
        self.conv6 = ai8x.FusedConv2dReLU(32,
                                          32,
                                          3,
                                          padding=1,
                                          bias=False,
                                          **kwargs)

        # 32x20x20 --> 64x12x12 (padding by 2 so increase dimension)
        self.conv7 = ai8x.FusedMaxPoolConv2dReLU(32,
                                                 64,
                                                 3,
                                                 padding=2,
                                                 bias=False,
                                                 pool_size=2,
                                                 pool_stride=2,
                                                 **kwargs)
        self.conv8 = ai8x.FusedConv2dReLU(64,
                                          64,
                                          3,
                                          padding=1,
                                          bias=False,
                                          **kwargs)

        # 64x12x12 --> 64x6x6 (padding by 1 so same dimension)
        self.conv9 = ai8x.FusedMaxPoolConv2dReLU(64,
                                                 64,
                                                 3,
                                                 padding=1,
                                                 bias=False,
                                                 pool_size=2,
                                                 pool_stride=2,
                                                 **kwargs)

        # 64x6x6 --> 64x3x3 (passing by 1 so same dimension)
        self.conv10 = ai8x.FusedMaxPoolConv2dReLU(64,
                                                  64,
                                                  3,
                                                  padding=1,
                                                  bias=False,
                                                  pool_size=2,
                                                  pool_stride=2,
                                                  **kwargs)

        # flatten to fully connected layer
        self.fc1 = ai8x.FusedLinearReLU(64 * 3 * 3, 10, bias=True, **kwargs)
        self.fc2 = ai8x.Linear(10, 2, bias=True, wide=True, **kwargs)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')