Пример #1
0
    def __init__(self, classes=1000):
        super(alexnet, self).__init__()
        self.conv1 = nn.NormalConv2D(filters=64,
                                     kernel_size=11,
                                     strides=4,
                                     padding='same',
                                     activation='relu',
                                     use_bias=True)
        self.bn1 = nn.NormalBatchNormalization(center=False, scale=False)
        self.pool1 = nn.NormalMaxPool2D(pool_size=2, strides=2)
        self.enter_int = nn.EnterInteger(scale=1.0)

        self.conv2 = nn.BinaryConv2D(filters=192,
                                     kernel_size=5,
                                     strides=1,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn2 = nn.BatchNormalization(self.conv2)
        self.pool2 = nn.MaxPool2D(pool_size=2, strides=2)

        self.conv3 = nn.BinaryConv2D(filters=384,
                                     kernel_size=3,
                                     strides=1,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn3 = nn.BatchNormalization(self.conv3)

        self.conv4 = nn.BinaryConv2D(filters=384,
                                     kernel_size=3,
                                     strides=1,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn4 = nn.BatchNormalization(self.conv4)

        self.conv5 = nn.BinaryConv2D(filters=256,
                                     kernel_size=3,
                                     strides=1,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn5 = nn.BatchNormalization(self.conv5)
        self.pool5 = nn.MaxPool2D(pool_size=2, strides=2)
        self.exit_int = nn.ExitInteger()

        self.flatten = nn.Flatten()
        self.dense6 = nn.NormalDense(4096, use_bias=True, activation='relu')
        self.bn6 = nn.NormalBatchNormalization()
        self.dense7 = nn.NormalDense(4096, use_bias=True, activation='relu')
        self.bn7 = nn.NormalBatchNormalization()
        self.dense8 = nn.NormalDense(classes, use_bias=True)
        #self.scalu = nn.Scalu()
        self.softmax = nn.Activation('softmax')
Пример #2
0
    def __init__(self, classes=1000):
        super(vggnet, self).__init__()

        self.conv1 = nn.NormalConv2D(
            filters=96,
            kernel_size=7,
            strides=2,
            padding='same',
            activation=None,
            use_bias=False)
        self.pool1 = nn.NormalMaxPool2D(pool_size=2, strides=2)
        self.bn1 = nn.NormalBatchNormalization(center=False, scale=False)
        self.quantize = nn.EnterInteger(1.0)

        self.conv2 = nn.BinaryConv2D(
            filters=256,
            kernel_size=3,
            strides=1,
            padding='same',
            activation='relu',
            use_bias=False)
        self.bn2 = nn.BatchNormalization(self.conv2)

        self.conv3 = nn.BinaryConv2D(
            filters=256,
            kernel_size=3,
            strides=1,
            padding='same',
            activation='relu',
            use_bias=False)
        self.bn3 = nn.BatchNormalization(self.conv3)

        self.conv4 = nn.BinaryConv2D(
            filters=256,
            kernel_size=3,
            strides=1,
            padding='same',
            activation='relu',
            use_bias=False)
        self.pool4 = nn.MaxPool2D(pool_size=2, strides=2)
        self.bn4 = nn.BatchNormalization(self.conv4)

        self.conv5 = nn.BinaryConv2D(
            filters=512,
            kernel_size=3,
            strides=1,
            padding='same',
            activation='relu',
            use_bias=False)
        self.bn5 = nn.BatchNormalization(self.conv5)

        self.conv6 = nn.BinaryConv2D(
            filters=512,
            kernel_size=3,
            strides=1,
            padding='same',
            activation='relu',
            use_bias=False)
        self.bn6 = nn.BatchNormalization(self.conv6)

        self.conv7 = nn.BinaryConv2D(
            filters=512,
            kernel_size=3,
            strides=1,
            padding='same',
            activation='relu',
            use_bias=False)
        self.pool7 = nn.MaxPool2D(pool_size=2, strides=2)
        self.bn7 = nn.BatchNormalization(self.conv7)

        self.conv8 = nn.BinaryConv2D(
            filters=512,
            kernel_size=3,
            strides=1,
            padding='same',
            activation='relu',
            use_bias=False)
        self.bn8 = nn.BatchNormalization(self.conv8)

        self.conv9 = nn.BinaryConv2D(
            filters=512,
            kernel_size=3,
            strides=1,
            padding='same',
            activation='relu',
            use_bias=False)
        self.bn9 = nn.BatchNormalization(self.conv9)

        self.conv10 = nn.BinaryConv2D(
            filters=512,
            kernel_size=3,
            strides=1,
            padding='same',
            activation='relu',
            use_bias=False)
        self.pool10 = nn.MaxPool2D(pool_size=2, strides=2)
        self.bn10 = nn.BatchNormalization(self.conv10)
        self.flatten = nn.Flatten()
Пример #3
0
    def __init__(self, classes=1000):
        super(resnet18, self).__init__()

        # Input Layer
        self.conv1 = nn.NormalConv2D(filters=64,
                                     kernel_size=7,
                                     strides=2,
                                     padding='same',
                                     activation=None,
                                     use_bias=False)
        self.pool1 = nn.NormalMaxPool2D(pool_size=3, strides=2, padding='same')
        self.bn1 = nn.NormalBatchNormalization(center=False, scale=False)
        self.quantize = nn.EnterInteger(1.0)

        # BasicBlock 1
        self.block1_conv1 = nn.BinaryConv2D(filters=64,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block1_bn1 = nn.BatchNormalization()
        self.block1_conv2 = nn.BinaryConv2D(filters=64,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block1_bn2 = nn.BatchNormalization()
        self.block1_res = nn.ResidualConnect()

        # BasicBlock 2
        self.block2_conv1 = nn.BinaryConv2D(filters=64,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block2_bn1 = nn.BatchNormalization()
        self.block2_conv2 = nn.BinaryConv2D(filters=64,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block2_bn2 = nn.BatchNormalization()
        self.block2_res = nn.ResidualConnect()

        # BasicBlock 3
        self.block3_conv1 = nn.BinaryConv2D(filters=128,
                                            kernel_size=3,
                                            strides=2,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block3_bn1 = nn.BatchNormalization()
        self.block3_conv2 = nn.BinaryConv2D(filters=128,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block3_bn2 = nn.BatchNormalization()
        self.block3_down_conv = nn.BinaryConv2D(filters=128,
                                                kernel_size=1,
                                                strides=2,
                                                padding='valid',
                                                activation=None,
                                                use_bias=False)
        self.block3_down_bn = nn.BatchNormalization()
        self.block3_res = nn.ResidualConnect()

        # BasicBlock 4
        self.block4_conv1 = nn.BinaryConv2D(filters=128,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block4_bn1 = nn.BatchNormalization()
        self.block4_conv2 = nn.BinaryConv2D(filters=128,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block4_bn2 = nn.BatchNormalization()
        self.block4_res = nn.ResidualConnect()

        # BasicBlock 5
        self.block5_conv1 = nn.BinaryConv2D(filters=256,
                                            kernel_size=3,
                                            strides=2,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block5_bn1 = nn.BatchNormalization()
        self.block5_conv2 = nn.BinaryConv2D(filters=256,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block5_bn2 = nn.BatchNormalization()
        self.block5_down_conv = nn.BinaryConv2D(filters=256,
                                                kernel_size=1,
                                                strides=2,
                                                padding='valid',
                                                activation=None,
                                                use_bias=False)
        self.block5_down_bn = nn.BatchNormalization()
        self.block5_res = nn.ResidualConnect()

        # BasicBlock 6
        self.block6_conv1 = nn.BinaryConv2D(filters=256,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block6_bn1 = nn.BatchNormalization()
        self.block6_conv2 = nn.BinaryConv2D(filters=256,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block6_bn2 = nn.BatchNormalization()
        self.block6_res = nn.ResidualConnect()

        # BasicBlock 7
        self.block7_conv1 = nn.BinaryConv2D(filters=512,
                                            kernel_size=3,
                                            strides=2,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block7_bn1 = nn.BatchNormalization()
        self.block7_conv2 = nn.BinaryConv2D(filters=512,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block7_bn2 = nn.BatchNormalization()
        self.block7_down_conv = nn.BinaryConv2D(filters=512,
                                                kernel_size=1,
                                                strides=2,
                                                padding='valid',
                                                activation=None,
                                                use_bias=False)
        self.block7_down_bn = nn.BatchNormalization()
        self.block7_res = nn.ResidualConnect()

        # BasicBlock 8
        self.block8_conv1 = nn.BinaryConv2D(filters=512,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block8_bn1 = nn.BatchNormalization()
        self.block8_conv2 = nn.BinaryConv2D(filters=512,
                                            kernel_size=3,
                                            strides=1,
                                            padding='same',
                                            activation=None,
                                            use_bias=False)
        self.block8_bn2 = nn.BatchNormalization()
        self.block8_res = nn.ResidualConnect()

        self.avg_pool = nn.GlobalAveragePooling2D()
        self.flatten = nn.Flatten()
        self.dense = nn.BinaryDense(classes, use_bias=False)
        self.scalu = nn.Scalu()
Пример #4
0
    def __init__(self, classes=1000):
        super(SqueezeNet, self).__init__()
        self.classes = classes

        self.c0 = nn.NormalConv2D(kernel_size=7,
                                  strides=4,
                                  filters=96,
                                  padding='same',
                                  activation='relu')
        self.b0 = nn.NormalBatchNormalization(momentum=bnmomemtum)
        #self.mp0 = nn.MaxPool2D(pool_size=2)
        self.enter_int = nn.EnterInteger(scale=1.0)

        # Fire 1
        self.f1c1 = nn.BinaryConv2D(filters=32,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f1b1 = nn.BatchNormalization(self.f1c1, momentum=bnmomemtum)
        self.f1c2 = nn.BinaryConv2D(filters=64,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f1b2 = nn.BatchNormalization(self.f1c2, momentum=bnmomemtum)
        self.f1c3 = nn.BinaryConv2D(filters=64,
                                    kernel_size=3,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f1b3 = nn.BatchNormalization(self.f1c3, momentum=bnmomemtum)
        self.f1concat = tf.keras.layers.Concatenate(axis=-1)

        # Fire 2
        self.f2c1 = nn.BinaryConv2D(filters=32,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f2b1 = nn.BatchNormalization(self.f2c1, momentum=bnmomemtum)
        self.f2c2 = nn.BinaryConv2D(filters=64,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f2b2 = nn.BatchNormalization(self.f2c2, momentum=bnmomemtum)
        self.f2c3 = nn.BinaryConv2D(filters=64,
                                    kernel_size=3,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f2b3 = nn.BatchNormalization(self.f2c3, momentum=bnmomemtum)
        self.f2concat = tf.keras.layers.Concatenate(axis=-1)

        # Fire 3
        self.f3c1 = nn.BinaryConv2D(filters=32,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f3b1 = nn.BatchNormalization(self.f3c1, momentum=bnmomemtum)
        self.f3c2 = nn.BinaryConv2D(filters=128,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f3b2 = nn.BatchNormalization(self.f3c2, momentum=bnmomemtum)
        self.f3c3 = nn.BinaryConv2D(filters=128,
                                    kernel_size=3,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f3b3 = nn.BatchNormalization(self.f3c3, momentum=bnmomemtum)
        self.f3concat = tf.keras.layers.Concatenate(axis=-1)

        self.mp3 = tf.keras.layers.MaxPooling2D(pool_size=2)

        # Fire 4
        self.f4c1 = nn.BinaryConv2D(filters=32,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f4b1 = nn.BatchNormalization(self.f4c1, momentum=bnmomemtum)
        self.f4c2 = nn.BinaryConv2D(filters=128,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f4b2 = nn.BatchNormalization(self.f4c2, momentum=bnmomemtum)
        self.f4c3 = nn.BinaryConv2D(filters=128,
                                    kernel_size=3,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f4b3 = nn.BatchNormalization(self.f4c3, momentum=bnmomemtum)
        self.f4concat = tf.keras.layers.Concatenate(axis=-1)

        # Fire 5
        self.f5c1 = nn.BinaryConv2D(filters=64,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f5b1 = nn.BatchNormalization(self.f5c1, momentum=bnmomemtum)
        self.f5c2 = nn.BinaryConv2D(filters=192,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f5b2 = nn.BatchNormalization(self.f5c2, momentum=bnmomemtum)
        self.f5c3 = nn.BinaryConv2D(filters=192,
                                    kernel_size=3,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f5b3 = nn.BatchNormalization(self.f5c3, momentum=bnmomemtum)
        self.f5concat = tf.keras.layers.Concatenate(axis=-1)

        # Fire 6
        self.f6c1 = nn.BinaryConv2D(filters=64,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f6b1 = nn.BatchNormalization(self.f6c1, momentum=bnmomemtum)
        self.f6c2 = nn.BinaryConv2D(filters=192,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f6b2 = nn.BatchNormalization(self.f6c2, momentum=bnmomemtum)
        self.f6c3 = nn.BinaryConv2D(filters=192,
                                    kernel_size=3,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f6b3 = nn.BatchNormalization(self.f6c3, momentum=bnmomemtum)
        self.f6concat = tf.keras.layers.Concatenate(axis=-1)

        # Fire 7
        self.f7c1 = nn.BinaryConv2D(filters=64,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f7b1 = nn.BatchNormalization(self.f7c1, momentum=bnmomemtum)
        self.f7c2 = nn.BinaryConv2D(filters=256,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f7b2 = nn.BatchNormalization(self.f7c2, momentum=bnmomemtum)
        self.f7c3 = nn.BinaryConv2D(filters=256,
                                    kernel_size=3,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f7b3 = nn.BatchNormalization(self.f7c3, momentum=bnmomemtum)
        self.f7concat = tf.keras.layers.Concatenate(axis=-1)

        self.mp7 = tf.keras.layers.MaxPooling2D(pool_size=2)

        # Fire 8
        self.f8c1 = nn.BinaryConv2D(filters=64,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f8b1 = nn.BatchNormalization(self.f8c1, momentum=bnmomemtum)
        self.f8c2 = nn.BinaryConv2D(filters=256,
                                    kernel_size=1,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f8b2 = nn.BatchNormalization(self.f8c2, momentum=bnmomemtum)
        self.f8c3 = nn.BinaryConv2D(filters=256,
                                    kernel_size=3,
                                    activation='relu',
                                    padding='same',
                                    use_bias=False)
        self.f8b3 = nn.BatchNormalization(self.f8c3, momentum=bnmomemtum)
        self.f8concat = tf.keras.layers.Concatenate(axis=-1)
        self.exit_int = nn.ExitInteger()

        # Output
        self.avgpool = tf.keras.layers.GlobalAveragePooling2D()
        self.classifier = tf.keras.layers.Dense(1000, activation='softmax')
Пример #5
0
    def __init__(self, classes=1000):
        super(vgg11, self).__init__()

        # Set up configurable maxpool or stride dimension reduction.
        self.scope = nn.Config.current
        use_maxpool = self.scope.use_maxpool
        if use_maxpool:
            reduce_stride = 1
        else:
            reduce_stride = 2

        self.conv1 = nn.NormalConv2D(filters=64,
                                     kernel_size=3,
                                     strides=1,
                                     padding='same',
                                     activation='relu')
        self.bn1 = nn.NormalBatchNormalization()
        self.pool1 = nn.NormalMaxPool2D(pool_size=2, strides=2)
        self.quantize = nn.EnterInteger(1.0)

        self.conv2 = nn.BinaryConv2D(filters=128,
                                     kernel_size=3,
                                     strides=reduce_stride,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn2 = nn.BatchNormalization()
        self.pool2 = nn.MaxPool2D(pool_size=2, strides=2)

        self.conv3 = nn.BinaryConv2D(filters=256,
                                     kernel_size=3,
                                     strides=1,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn3 = nn.BatchNormalization()
        self.conv4 = nn.BinaryConv2D(filters=256,
                                     kernel_size=3,
                                     strides=reduce_stride,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn4 = nn.BatchNormalization()
        self.pool3 = nn.MaxPool2D(pool_size=2, strides=2)

        self.conv5 = nn.BinaryConv2D(filters=512,
                                     kernel_size=3,
                                     strides=1,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn5 = nn.BatchNormalization()
        self.conv6 = nn.BinaryConv2D(filters=512,
                                     kernel_size=3,
                                     strides=reduce_stride,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn6 = nn.BatchNormalization()
        self.pool4 = nn.MaxPool2D(pool_size=2, strides=2)

        self.conv7 = nn.BinaryConv2D(filters=512,
                                     kernel_size=3,
                                     strides=1,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn7 = nn.BatchNormalization()
        self.conv8 = nn.BinaryConv2D(filters=512,
                                     kernel_size=3,
                                     strides=reduce_stride,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
        self.bn8 = nn.BatchNormalization()
        self.pool5 = nn.MaxPool2D(pool_size=2, strides=2)

        self.avgpool = nn.GlobalAveragePooling2D()
        self.classifier = nn.BinaryDense(classes, use_bias=False)
        self.scalu = nn.Scalu()
        self.softmax = nn.Activation('softmax')