def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(
                in_channels=1,
                out_channels=6,
                kernel_size=3,
                stride=1,
                padding=1,
                weight_attr=conv2d_w1_attr,
                bias_attr=False),
            BatchNorm2D(6),
            ReLU(),
            MaxPool2D(
                kernel_size=2, stride=2),
            Conv2D(
                in_channels=6,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=0,
                weight_attr=conv2d_w2_attr,
                bias_attr=conv2d_b2_attr),
            BatchNorm2D(16),
            PReLU(),
            MaxPool2D(
                kernel_size=2, stride=2))

        self.fc = Sequential(
            Linear(
                in_features=400,
                out_features=120,
                weight_attr=fc_w1_attr,
                bias_attr=fc_b1_attr),
            LeakyReLU(),
            Linear(
                in_features=120,
                out_features=84,
                weight_attr=fc_w2_attr,
                bias_attr=fc_b2_attr),
            Sigmoid(),
            Linear(
                in_features=84,
                out_features=num_classes,
                weight_attr=fc_w3_attr,
                bias_attr=fc_b3_attr),
            Softmax())
Ejemplo n.º 2
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(LeNet, self).__init__()
        self.num_classes = num_classes
        self.features = Sequential(Conv2D(1, 6, 3, stride=1, padding=1),
                                   Pool2D(2, 'max', 2),
                                   Conv2D(6, 16, 5, stride=1, padding=0),
                                   Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = Sequential(Linear(400, 120), Linear(120, 84),
                                 Linear(84, 10, act=classifier_activation))
Ejemplo n.º 3
0
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(
                in_channels=1,
                out_channels=6,
                kernel_size=3,
                stride=1,
                padding=1,
                weight_attr=conv2d_w1_attr,
                bias_attr=conv2d_b1_attr),
            Pool2D(
                pool_size=2, pool_type='max', pool_stride=2),
            Conv2D(
                in_channels=6,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=0,
                weight_attr=conv2d_w2_attr,
                bias_attr=conv2d_b2_attr),
            Pool2D(
                pool_size=2, pool_type='max', pool_stride=2))

        self.fc = Sequential(
            Linear(
                in_features=400,
                out_features=120,
                weight_attr=fc_w1_attr,
                bias_attr=fc_b1_attr),
            Linear(
                in_features=120,
                out_features=84,
                weight_attr=fc_w2_attr,
                bias_attr=fc_b2_attr),
            Linear(
                in_features=84,
                out_features=num_classes,
                weight_attr=fc_w3_attr,
                bias_attr=fc_b3_attr),
            Softmax())
Ejemplo n.º 4
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(num_channels=1,
                   num_filters=6,
                   filter_size=3,
                   stride=1,
                   padding=1,
                   param_attr=conv2d_w1_attr,
                   bias_attr=conv2d_b1_attr),
            Pool2D(pool_size=2, pool_type='max', pool_stride=2),
            Conv2D(num_channels=6,
                   num_filters=16,
                   filter_size=5,
                   stride=1,
                   padding=0,
                   param_attr=conv2d_w2_attr,
                   bias_attr=conv2d_b2_attr),
            Pool2D(pool_size=2, pool_type='max', pool_stride=2))

        self.fc = Sequential(
            Linear(input_dim=400,
                   output_dim=120,
                   param_attr=fc_w1_attr,
                   bias_attr=fc_b1_attr),
            Linear(input_dim=120,
                   output_dim=84,
                   param_attr=fc_w2_attr,
                   bias_attr=fc_b2_attr),
            Linear(input_dim=84,
                   output_dim=num_classes,
                   act=classifier_activation,
                   param_attr=fc_w3_attr,
                   bias_attr=fc_b3_attr))
Ejemplo n.º 5
0
    def __init__(self, depth=50, num_classes=1000):
        super(ResNet, self).__init__()

        layer_config = {
            50: [3, 4, 6, 3],
            101: [3, 4, 23, 3],
            152: [3, 8, 36, 3],
        }
        assert depth in layer_config.keys(), \
            "supported depth are {} but input layer is {}".format(
                layer_config.keys(), depth)

        layers = layer_config[depth]
        num_in = [64, 256, 512, 1024]
        num_out = [64, 128, 256, 512]

        self.conv = ConvBNLayer(num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=2,
                                act='relu')
        self.pool = Pool2D(pool_size=3,
                           pool_stride=2,
                           pool_padding=1,
                           pool_type='max')

        self.layers = []
        for idx, num_blocks in enumerate(layers):
            blocks = []
            shortcut = False
            for b in range(num_blocks):
                block = BottleneckBlock(
                    num_channels=num_in[idx] if b == 0 else num_out[idx] * 4,
                    num_filters=num_out[idx],
                    stride=2 if b == 0 and idx != 0 else 1,
                    shortcut=shortcut)
                blocks.append(block)
                shortcut = True
            layer = self.add_sublayer("layer_{}".format(idx),
                                      Sequential(*blocks))
            self.layers.append(layer)

        self.global_pool = Pool2D(pool_size=7,
                                  pool_type='avg',
                                  global_pooling=True)

        stdv = 1.0 / math.sqrt(2048 * 1.0)
        self.fc_input_dim = num_out[-1] * 4 * 1 * 1
        self.fc = Linear(
            self.fc_input_dim,
            num_classes,
            act='softmax',
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)))
Ejemplo n.º 6
0
    def __init__(self,
                 features,
                 num_classes=1000,
                 classifier_activation='softmax'):
        super(VGG, self).__init__()
        self.features = features
        self.num_classes = num_classes

        if num_classes > 0:
            classifier = Classifier(num_classes, classifier_activation)
            self.classifier = self.add_sublayer("classifier",
                                                Sequential(classifier))
Ejemplo n.º 7
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3

    for v in cfg:
        if v == 'M':
            layers += [Pool2D(pool_size=2, pool_stride=2)]
        else:
            if batch_norm:
                conv2d = Conv2d(in_channels, v, kernel_size=3, padding=1)
                layers += [conv2d, BatchNorm(v), ReLU()]
            else:
                conv2d = Conv2d(in_channels, v, kernel_size=3, padding=1)
                layers += [conv2d, ReLU()]
            in_channels = v
    return Sequential(*layers)
Ejemplo n.º 8
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3

    for v in cfg:
        if v == 'M':
            layers += [Pool2D(pool_size=2, pool_stride=2)]
        else:
            if batch_norm:
                conv2d = Conv2D(in_channels, v, filter_size=3, padding=1)
                layers += [conv2d, BatchNorm(v, act='relu')]
            else:
                conv2d = Conv2D(
                    in_channels, v, filter_size=3, padding=1, act='relu')
                layers += [conv2d]
            in_channels = v
    return Sequential(*layers)
    def __init__(self, num_classes=10):
        super(AlexNet, self).__init__()
        self.features = Sequential(
            nn.Conv2D(1, 64, kernel_size=11, stride=4, padding=5),
            nn.ReLU(),
            nn.MaxPool2D(kernel_size=2, stride=2),
            nn.Conv2D(64, 192, kernel_size=5, padding=2),
            nn.ReLU(),
            nn.MaxPool2D(kernel_size=2, stride=2),
            nn.Conv2D(192, 384, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.Conv2D(384, 256, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.Conv2D(256, 256, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2D(kernel_size=2, stride=2),
        )

        self.reshape_layer = ReshapeHelp(shape=[-1, 256])
        self.classifier = nn.Linear(256, num_classes)
        self.loss_fn = nn.loss.CrossEntropyLoss()
Ejemplo n.º 10
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000,
                 memory_efficient=False):
        super(DenseNet, self).__init__()

        self.features = Sequential(
            ('conv0', Conv2D(
                3,
                num_init_features,
                filter_size=7,
                stride=2,
                padding=3,
                bias_attr=False)), ('norm0', BatchNorm(
                    num_init_features, act='relu')), ('pool0', Pool2D(
                        pool_size=3,
                        pool_stride=2,
                        pool_padding=1,
                        pool_type='max')))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            if i == 3:
                block = _DenseBlock_rpn(
                    num_layers=num_layers,
                    num_input_features=num_features,
                    bn_size=bn_size,
                    growth_rate=growth_rate,
                    drop_rate=drop_rate,
                    memory_efficient=memory_efficient)

                self.features.add_sublayer('denseblock%d' % (i + 1), block)
                num_features = num_features + num_layers * growth_rate
            else:
                block = _DenseBlock(
                    num_layers=num_layers,
                    num_input_features=num_features,
                    bn_size=bn_size,
                    growth_rate=growth_rate,
                    drop_rate=drop_rate,
                    memory_efficient=memory_efficient)

                self.features.add_sublayer('denseblock%d' % (i + 1), block)
                num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(
                    num_input_features=num_features,
                    num_output_features=num_features // 2)
                self.features.add_sublayer('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_sublayer('norm5', BatchNorm(num_features))

        # Linear layer
        self.classifier = Linear(num_features, num_classes)

        # init num_features
        self.features.num_features = num_features
Ejemplo n.º 11
0
    def __init__(self,
                 Block,
                 depth=50,
                 num_classes=1000,
                 with_pool=True,
                 classifier_activation='softmax'):
        super(ResNet, self).__init__()

        self.num_classes = num_classes
        self.with_pool = with_pool

        layer_config = {
            18: [2, 2, 2, 2],
            34: [3, 4, 6, 3],
            50: [3, 4, 6, 3],
            101: [3, 4, 23, 3],
            152: [3, 8, 36, 3],
        }
        assert depth in layer_config.keys(), \
            "supported depth are {} but input layer is {}".format(
                layer_config.keys(), depth)

        layers = layer_config[depth]

        in_channels = 64
        out_channels = [64, 128, 256, 512]

        self.conv = ConvBNLayer(num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=2,
                                act='relu')
        self.pool = Pool2D(pool_size=3,
                           pool_stride=2,
                           pool_padding=1,
                           pool_type='max')

        self.layers = []
        for idx, num_blocks in enumerate(layers):
            blocks = []
            shortcut = False
            for b in range(num_blocks):
                if b == 1:
                    in_channels = out_channels[idx] * Block.expansion
                block = Block(num_channels=in_channels,
                              num_filters=out_channels[idx],
                              stride=2 if b == 0 and idx != 0 else 1,
                              shortcut=shortcut)
                blocks.append(block)
                shortcut = True
            layer = self.add_sublayer("layer_{}".format(idx),
                                      Sequential(*blocks))
            self.layers.append(layer)

        if with_pool:
            self.global_pool = Pool2D(pool_size=7,
                                      pool_type='avg',
                                      global_pooling=True)

        if num_classes > 0:
            stdv = 1.0 / math.sqrt(out_channels[-1] * Block.expansion * 1.0)
            self.fc_input_dim = out_channels[-1] * Block.expansion * 1 * 1
            self.fc = Linear(
                self.fc_input_dim,
                num_classes,
                act=classifier_activation,
                param_attr=fluid.param_attr.ParamAttr(
                    initializer=fluid.initializer.Uniform(-stdv, stdv)))
 def __init__(self):
     super(SimpleNetPipe, self).__init__()
     self.features = Sequential(EmbeddingNet(), MatmulNet(), BiasNet())