예제 #1
0
    def __init__(self, channels, out_size, stage, p=1, t=2, r=1, **kwargs):
        r"""Residual Attention Block from
        `"Residual Attention Network for Image Classification"
        <https://arxiv.org/abs/1704.06904>`_ paper.

        Parameters
        ----------
        :param channels: int. Number of output channels.
        :param out_size: int. Size of the output feature map, now it only supports square shape.
        :param stage: int. Stage described in Figure 2.
        :param p: int. Number of pre-processing Residual Units before split into trunk branch and mask branch.
        :param t: int. Number of Residual Units in trunk branch.
        :param r: int. Number of Residual Units between adjacent pooling layer in the mask branch.
        :param kwargs:
        """
        super().__init__(**kwargs)
        with self.name_scope():
            self.pre = nn.HybridSequential()
            for i in range(p):
                self.pre.add(BottleneckV2(channels, 1, prefix='pre_%d_' % i))

            self.trunk_branch = nn.HybridSequential()
            for i in range(t):
                self.trunk_branch.add(
                    BottleneckV2(channels, 1, prefix='trunk_%d_' % i))

            self.mask_branch = _MaskBlock(channels,
                                          r,
                                          out_size,
                                          stage,
                                          prefix='mask_')

            self.post = nn.HybridSequential()
            for i in range(p):
                self.post.add(BottleneckV2(channels, 1, prefix='post_%d_' % i))
예제 #2
0
    def _make_layers(self, channels, r, stage, out_size):
        if stage <= 1:
            self.down_sample_1 = nn.MaxPool2D(3, 2, 1)
            self.down_res_unit_1 = nn.HybridSequential()
            for i in range(r):
                self.down_res_unit_1.add(
                    BottleneckV2(channels, 1, prefix="down_res1_%d_" % i))
            self.skip_connection_1 = BottleneckV2(channels, 1)

            self.up_res_unit_1 = nn.HybridSequential()
            for i in range(r):
                self.up_res_unit_1.add(
                    BottleneckV2(channels, 1, prefix="up_res1_%d_" % i))
            self.up_sample_1 = _UpSampleBlock(out_size)
            out_size = out_size // 2

        if stage <= 2:
            self.down_sample_2 = nn.MaxPool2D(3, 2, 1)
            self.down_res_unit_2 = nn.HybridSequential()
            for i in range(r):
                self.down_res_unit_2.add(
                    BottleneckV2(channels, 1, prefix="down_res2_%d_" % i))
            self.skip_connection_2 = BottleneckV2(channels, 1)

            self.up_res_unit_2 = nn.HybridSequential()
            for i in range(r):
                self.up_res_unit_2.add(
                    BottleneckV2(channels, 1, prefix="up_res2_%d_" % i))
            self.up_sample_2 = _UpSampleBlock(out_size)
            out_size = out_size // 2

        if stage <= 3:
            self.down_sample_3 = nn.MaxPool2D(3, 2, 1)
            self.down_res_unit_3 = nn.HybridSequential()
            for i in range(r):
                self.down_res_unit_3.add(
                    BottleneckV2(channels, 1, prefix="down_res3_%d_" % i))

            self.up_res_unit_3 = nn.HybridSequential()
            for i in range(r):
                self.up_res_unit_3.add(
                    BottleneckV2(channels, 1, prefix="up_res3_%d_" % i))
            self.up_sample_3 = _UpSampleBlock(out_size)

        self.output = nn.HybridSequential()
        self.output.add(
            nn.BatchNorm(), nn.Activation('relu'),
            nn.Conv2D(channels, kernel_size=1, strides=1, use_bias=False),
            nn.BatchNorm(), nn.Activation('relu'),
            nn.Conv2D(channels, kernel_size=1, strides=1, use_bias=False),
            nn.Activation('sigmoid'))
예제 #3
0
    def __init__(self,
                 classes,
                 modules,
                 p,
                 t,
                 r,
                 weight_norm=False,
                 feature_norm=False,
                 embedding_size=512,
                 need_cls_layer=True,
                 **kwargs):
        super().__init__(classes, embedding_size, weight_norm, feature_norm,
                         need_cls_layer, **kwargs)
        assert len(modules) == 3
        with self.name_scope():
            self.features = nn.HybridSequential()
            # 112x112
            self.features.add(nn.Conv2D(64, 3, 1, 1, use_bias=False))
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))

            # 56x56
            self.features.add(BottleneckV2(256, 2, True, 64))
            for _ in range(modules[0]):
                self.features.add(AttentionBlock(256, 56, 1, p, t, r))

            # 28x28
            self.features.add(BottleneckV2(512, 2, True, 256))
            for _ in range(modules[1]):
                self.features.add(AttentionBlock(512, 28, 2, p, t, r))

            # 14x14
            self.features.add(BottleneckV2(1024, 2, True, 512))
            for _ in range(modules[2]):
                self.features.add(AttentionBlock(1024, 14, 3, p, t, r))

            # 8x8
            self.features.add(BottleneckV2(2048, 2, True, 1024),
                              BottleneckV2(2048, 1), BottleneckV2(2048, 1))

            # 2048
            self.features.add(nn.BatchNorm(), nn.Activation('relu'),
                              nn.GlobalAvgPool2D(), nn.Flatten())
            # embedding
            self.features.add(nn.Dense(embedding_size, use_bias=False),
                              nn.BatchNorm(scale=False, center=False),
                              nn.PReLU())
예제 #4
0
    def __init__(self, classes, modules, p, t, r, **kwargs):
        super().__init__(**kwargs)
        assert len(modules) == 3
        with self.name_scope():
            self.features = nn.HybridSequential()
            # 112x112
            self.features.add(nn.Conv2D(64, 3, 2, 1, use_bias=False))
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))

            # 56x56
            self.features.add(nn.MaxPool2D(3, 2, 1))
            self.features.add(BottleneckV2(256, 1, True, 64))
            for _ in range(modules[0]):
                self.features.add(AttentionBlock(256, 56, 1, p, t, r))

            # 28x28
            self.features.add(BottleneckV2(512, 2, True, 256))
            for _ in range(modules[1]):
                self.features.add(AttentionBlock(512, 28, 2, p, t, r))

            # 14x14
            self.features.add(BottleneckV2(1024, 2, True, 512))
            for _ in range(modules[2]):
                self.features.add(AttentionBlock(1024, 14, 3, p, t, r))

            # 7x7
            self.features.add(BottleneckV2(2048, 2, True, 1024),
                              BottleneckV2(2048, 1), BottleneckV2(2048, 1))

            # 2048
            self.features.add(nn.BatchNorm(), nn.Activation('relu'),
                              nn.GlobalAvgPool2D(), nn.Flatten())

            # classes
            self.output = nn.Dense(classes)