示例#1
0
    def __init__(
            self,
            num_classes=100,
            num_channels=3,
            dimensions=(32, 32),  # pylint: disable=unused-argument
            bias=False,
            **kwargs
    ):
        super().__init__()

        self.conv1 = ai8x.FusedConv2dReLU(num_channels, 16, 3, stride=1, padding=1, bias=bias,
                                          **kwargs)
        self.conv2 = ai8x.FusedConv2dReLU(16, 20, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv3 = ai8x.FusedConv2dReLU(20, 20, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv4 = ai8x.FusedConv2dReLU(20, 20, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv5 = ai8x.FusedMaxPoolConv2dReLU(20, 20, 3, pool_size=2, pool_stride=2,
                                                 stride=1, padding=1, bias=bias, **kwargs)
        self.conv6 = ai8x.FusedConv2dReLU(20, 20, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv7 = ai8x.FusedConv2dReLU(20, 44, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv8 = ai8x.FusedMaxPoolConv2dReLU(44, 48, 3, pool_size=2, pool_stride=2,
                                                 stride=1, padding=1, bias=bias, **kwargs)
        self.conv9 = ai8x.FusedConv2dReLU(48, 48, 3, stride=1, padding=1, bias=bias, **kwargs)
        self.conv10 = ai8x.FusedMaxPoolConv2dReLU(48, 96, 3, pool_size=2, pool_stride=2,
                                                  stride=1, padding=1, bias=bias, **kwargs)
        self.conv11 = ai8x.FusedMaxPoolConv2dReLU(96, 512, 1, pool_size=2, pool_stride=2,
                                                  padding=0, bias=bias, **kwargs)
        self.conv12 = ai8x.FusedConv2dReLU(512, 128, 1, stride=1, padding=0, bias=bias, **kwargs)
        self.conv13 = ai8x.FusedMaxPoolConv2dReLU(128, 128, 3, pool_size=2, pool_stride=2,
                                                  stride=1, padding=1, bias=bias, **kwargs)
        self.conv14 = ai8x.Conv2d(128, num_classes, 1, stride=1, padding=0, bias=bias,
                                  wide=True, **kwargs)
示例#2
0
    def __init__(self, num_classes=10, num_channels=3, dimensions=(28, 28),
                 fc_inputs=8, bias=False):
        super().__init__()

        # AI84 Limits
        assert dimensions[0] == dimensions[1]  # Only square supported

        # Keep track of image dimensions so one constructor works for all image sizes
        dim = dimensions[0]

        self.conv1 = ai8x.FusedConv2dReLU(num_channels, 8, 3,
                                          padding=1, bias=bias)
        # padding 1 -> no change in dimensions -> 8x28x28

        pad = 2 if dim == 28 else 1
        self.conv2 = ai8x.FusedMaxPoolConv2dReLU(8, 8, 3, pool_size=2, pool_stride=2,
                                                 padding=pad, bias=bias)
        dim //= 2  # pooling, padding 0 -> 8x14x14
        if pad == 2:
            dim += 2  # padding 2 -> 8x16x16

        self.conv3 = ai8x.FusedMaxPoolConv2dReLU(8, fc_inputs, 3,
                                                 pool_size=4, pool_stride=4, padding=1,
                                                 bias=bias)
        dim //= 4  # pooling, padding 0 -> 8x4x4
        # padding 1 -> 8x4x4

        self.fc = ai8x.Linear(fc_inputs*dim*dim, num_classes, bias=True, wide=True)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
示例#3
0
    def __init__(
            self,
            num_classes=None,  # pylint: disable=unused-argument
            num_channels=3,
            dimensions=(160, 120),  # pylint: disable=unused-argument
            bias=True,
            **kwargs
    ):
        super().__init__()

        self.conv1 = ai8x.FusedConv2dReLU(num_channels, 16, 3, padding=1,
                                          bias=False, **kwargs)
        self.conv2 = ai8x.FusedMaxPoolConv2dReLU(16, 32, 3, pool_size=2, pool_stride=2,
                                                 padding=1, bias=False, **kwargs)
        self.conv3 = ai8x.FusedMaxPoolConv2dReLU(32, 32, 3, pool_size=2, pool_stride=2,
                                                 padding=1, bias=bias, **kwargs)
        self.conv4 = ai8x.FusedMaxPoolConv2dReLU(32, 64, 3, pool_size=2, pool_stride=2,
                                                 padding=1, bias=bias, **kwargs)
        self.conv5 = ai8x.FusedMaxPoolConv2dReLU(64, 64, 3, pool_size=2, pool_stride=2,
                                                 padding=1, bias=bias, **kwargs)
        self.conv6 = ai8x.FusedConv2dReLU(64, 64, 3, padding=1, bias=bias, **kwargs)
        self.conv7 = ai8x.FusedConv2dReLU(64, 64, 3, padding=1, bias=bias, **kwargs)
        self.conv8 = ai8x.FusedMaxPoolConv2d(64, 512, 1, pool_size=2, pool_stride=2,
                                             padding=0, bias=False, **kwargs)
        self.avgpool = ai8x.AvgPool2d((5, 3))
示例#4
0
    def __init__(self, num_classes=2, num_channels=3, dimensions=(64, 64),
                 fc_inputs=30, bias=False, **kwargs):
        super().__init__()

        # AI85 Limits
        assert dimensions[0] == dimensions[1]  # Only square supported

        # Keep track of image dimensions so one constructor works for all image sizes
        dim = dimensions[0]

        self.conv1 = ai8x.FusedConv2dReLU(num_channels, 15, 3,
                                          padding=1, bias=bias, **kwargs)
        # padding 1 -> no change in dimensions -> 15x64x64

        pad = 2 if dim == 28 else 1
        self.conv2 = ai8x.FusedMaxPoolConv2dReLU(15, 30, 3, pool_size=2, pool_stride=2,
                                                 padding=pad, bias=bias, **kwargs)
        dim //= 2  # pooling, padding 0 -> 30x32x32
        if pad == 2:
            dim += 2  # padding 2 -> 30x16x16

        self.conv3 = ai8x.FusedMaxPoolConv2dReLU(30, 60, 3, pool_size=2, pool_stride=2, padding=1,
                                                 bias=bias, **kwargs)
        dim //= 2  # pooling, padding 0 -> 60x16x16

        self.conv4 = ai8x.FusedMaxPoolConv2dReLU(60, 30, 3, pool_size=2, pool_stride=2, padding=1,
                                                 bias=bias, **kwargs)
        dim //= 2  # pooling, padding 0 -> 30x8x8

        self.conv5 = ai8x.FusedMaxPoolConv2dReLU(30, 30, 3, pool_size=2, pool_stride=2, padding=1,
                                                 bias=bias, **kwargs)
        dim //= 2  # pooling, padding 0 -> 30x4x4

        self.conv6 = ai8x.FusedConv2dReLU(30, fc_inputs, 3, padding=1, bias=bias, **kwargs)

        self.fc = ai8x.Linear(fc_inputs*dim*dim, num_classes, bias=True, **kwargs)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
示例#5
0
    def __init__(self, num_classes=10, num_channels=3, dimensions=(28, 28),
                 planes=60, pool=2, fc_inputs=12, bias=False):
        super().__init__()

        # Limits
        assert planes + num_channels <= ai8x.dev.WEIGHT_INPUTS
        assert planes + fc_inputs <= ai8x.dev.WEIGHT_DEPTH-1

        # Keep track of image dimensions so one constructor works for all image sizes
        dim = dimensions[0]

        self.conv1 = ai8x.FusedConv2dReLU(num_channels, planes, 3,
                                          padding=1, bias=bias)
        # padding 1 -> no change in dimensions -> MNIST: 28x28 | CIFAR: 32x32

        pad = 2 if dim == 28 else 1
        self.conv2 = ai8x.FusedMaxPoolConv2dReLU(planes, planes, 3, pool_size=2, pool_stride=2,
                                                 padding=pad, bias=bias)
        dim //= 2  # pooling, padding 0 -> MNIST: 14x14 | CIFAR: 16x16
        if pad == 2:
            dim += 2  # MNIST: padding 2 -> 16x16 | CIFAR: padding 1 -> 16x16

        self.conv3 = ai8x.FusedMaxPoolConv2dReLU(planes, 128-planes-fc_inputs, 3,
                                                 pool_size=2, pool_stride=2, padding=1,
                                                 bias=bias)
        dim //= 2  # pooling, padding 0 -> 8x8
        # padding 1 -> no change in dimensions

        self.conv4 = ai8x.FusedAvgPoolConv2dReLU(128-planes-fc_inputs,
                                                 fc_inputs, 3,
                                                 pool_size=pool, pool_stride=2, padding=1,
                                                 bias=bias)
        dim //= pool  # pooling, padding 0 -> 4x4
        # padding 1 -> no change in dimensions

        self.fc = ai8x.Linear(fc_inputs*dim*dim, num_classes, bias=True, wide=True)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
示例#6
0
    def __init__(self,
                 num_channels=3,
                 num_classes=10,
                 dimensions=(32, 32),
                 bias=False,
                 **kwargs):
        super().__init__()
        dim1 = dimensions[0]
        dim2 = dimensions[1]
        # 3x32x32
        self.conv1 = ai8x.FusedMaxPoolConv2dReLU(in_channels=num_channels,
                                                 out_channels=64,
                                                 kernel_size=3,
                                                 padding=1,
                                                 bias=bias,
                                                 **kwargs)
        dim1 //= 2
        dim2 //= 2
        # 64x16x16
        self.fire1 = ai8x_fire.Fire(in_planes=64,
                                    squeeze_planes=16,
                                    expand1x1_planes=64,
                                    expand3x3_planes=64,
                                    bias=bias,
                                    **kwargs)
        # 128x16x16
        self.fire2 = ai8x_fire.Fire(in_planes=128,
                                    squeeze_planes=16,
                                    expand1x1_planes=64,
                                    expand3x3_planes=64,
                                    bias=bias,
                                    **kwargs)
        # 128x16x16
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2,
                                  padding=0)  # check if kernel size=3
        dim1 //= 2
        dim2 //= 2
        # 128x8x8
        self.fire3 = ai8x_fire.Fire(in_planes=128,
                                    squeeze_planes=32,
                                    expand1x1_planes=128,
                                    expand3x3_planes=128,
                                    bias=bias,
                                    **kwargs)
        # 256x8x8
        self.fire4 = ai8x_fire.Fire(in_planes=256,
                                    squeeze_planes=32,
                                    expand1x1_planes=128,
                                    expand3x3_planes=128,
                                    bias=bias,
                                    **kwargs)
        # 256x8x8
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2,
                                  padding=0)  # check if kernel size=3
        dim1 //= 2
        dim2 //= 2
        # 256x4x4
        self.fire5 = ai8x_fire.Fire(in_planes=256,
                                    squeeze_planes=48,
                                    expand1x1_planes=192,
                                    expand3x3_planes=192,
                                    bias=bias,
                                    **kwargs)
        # 384x4x4
        self.fire6 = ai8x_fire.Fire(in_planes=384,
                                    squeeze_planes=48,
                                    expand1x1_planes=192,
                                    expand3x3_planes=192,
                                    bias=bias,
                                    **kwargs)
        # 384x4x4
        self.fire7 = ai8x_fire.Fire(in_planes=384,
                                    squeeze_planes=48,
                                    expand1x1_planes=256,
                                    expand3x3_planes=256,
                                    bias=bias,
                                    **kwargs)
        # 512x4x4
        self.fire8 = ai8x_fire.Fire(in_planes=512,
                                    squeeze_planes=64,
                                    expand1x1_planes=256,
                                    expand3x3_planes=256,
                                    bias=bias,
                                    **kwargs)
        # 512x4x4
        # self.conv2 = ai8x.FusedAvgPoolConv2dReLU(in_channels=512, out_channels=num_classes,
        #                                          kernel_size=1, pool_size=4, pool_stride=4)
        self.fc = ai8x.SoftwareLinear(512 * dim1 * dim2,
                                      num_classes,
                                      bias=bias)
        # num_classesx1x1

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
    def __init__(self,
                 num_classes=2,
                 num_channels=1,
                 dimensions=(80, 80),
                 planes=8,
                 pool=2,
                 fc_inputs=2,
                 bias=False,
                 **kwargs):
        super().__init__()

        # Limits
        assert planes + num_channels <= ai8x.dev.WEIGHT_INPUTS
        assert planes + fc_inputs <= ai8x.dev.WEIGHT_DEPTH - 1

        # 1x80x80 --> 8x80x80 (padding by 1 so same dimension)
        self.conv1 = ai8x.FusedConv2dReLU(1,
                                          8,
                                          3,
                                          padding=1,
                                          bias=False,
                                          **kwargs)
        self.conv2 = ai8x.FusedConv2dReLU(8,
                                          8,
                                          3,
                                          padding=1,
                                          bias=False,
                                          **kwargs)

        # 8x80x80 --> 16x40x40 (padding by 1 so same dimension)
        self.conv3 = ai8x.FusedMaxPoolConv2dReLU(8,
                                                 16,
                                                 3,
                                                 padding=1,
                                                 bias=False,
                                                 pool_size=2,
                                                 pool_stride=2,
                                                 **kwargs)
        self.conv4 = ai8x.FusedConv2dReLU(16,
                                          16,
                                          3,
                                          padding=1,
                                          bias=False,
                                          **kwargs)

        # 16x40x40 --> 32x20x20 (padding by 1 so increase dimension)
        self.conv5 = ai8x.FusedMaxPoolConv2dReLU(16,
                                                 32,
                                                 3,
                                                 padding=1,
                                                 bias=False,
                                                 pool_size=2,
                                                 pool_stride=2,
                                                 **kwargs)
        self.conv6 = ai8x.FusedConv2dReLU(32,
                                          32,
                                          3,
                                          padding=1,
                                          bias=False,
                                          **kwargs)

        # 32x20x20 --> 64x12x12 (padding by 2 so increase dimension)
        self.conv7 = ai8x.FusedMaxPoolConv2dReLU(32,
                                                 64,
                                                 3,
                                                 padding=2,
                                                 bias=False,
                                                 pool_size=2,
                                                 pool_stride=2,
                                                 **kwargs)
        self.conv8 = ai8x.FusedConv2dReLU(64,
                                          64,
                                          3,
                                          padding=1,
                                          bias=False,
                                          **kwargs)

        # 64x12x12 --> 64x6x6 (padding by 1 so same dimension)
        self.conv9 = ai8x.FusedMaxPoolConv2dReLU(64,
                                                 64,
                                                 3,
                                                 padding=1,
                                                 bias=False,
                                                 pool_size=2,
                                                 pool_stride=2,
                                                 **kwargs)

        # 64x6x6 --> 64x3x3 (passing by 1 so same dimension)
        self.conv10 = ai8x.FusedMaxPoolConv2dReLU(64,
                                                  64,
                                                  3,
                                                  padding=1,
                                                  bias=False,
                                                  pool_size=2,
                                                  pool_stride=2,
                                                  **kwargs)

        # flatten to fully connected layer
        self.fc1 = ai8x.FusedLinearReLU(64 * 3 * 3, 10, bias=True, **kwargs)
        self.fc2 = ai8x.Linear(10, 2, bias=True, wide=True, **kwargs)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')