Example #1
0
    def __init__(self, in_channels: int, out_channels: int, name=None):
        super(TransitionLayer, self).__init__()

        num_in = len(in_channels)
        num_out = len(out_channels)
        self.conv_bn_func_list = []
        for i in range(num_out):
            residual = None
            if i < num_in:
                if in_channels[i] != out_channels[i]:
                    residual = self.add_sublayer(
                        "transition_{}_layer_{}".format(name, i + 1),
                        L.ConvBNReLU(in_channels=in_channels[i],
                                     out_channels=out_channels[i],
                                     kernel_size=3,
                                     padding='same',
                                     bias_attr=False))
            else:
                residual = self.add_sublayer(
                    "transition_{}_layer_{}".format(name, i + 1),
                    L.ConvBNReLU(in_channels=in_channels[-1],
                                 out_channels=out_channels[i],
                                 kernel_size=3,
                                 stride=2,
                                 padding='same',
                                 bias_attr=False))
            self.conv_bn_func_list.append(residual)
Example #2
0
    def __init__(self,
                 num_channels: int,
                 num_filters: int,
                 has_se: bool,
                 stride: int = 1,
                 downsample: bool = False,
                 name: str = None):
        super(BottleneckBlock, self).__init__()

        self.has_se = has_se
        self.downsample = downsample

        self.conv1 = L.ConvBNReLU(in_channels=num_channels,
                                  out_channels=num_filters,
                                  kernel_size=1,
                                  padding='same',
                                  bias_attr=False)

        self.conv2 = L.ConvBNReLU(in_channels=num_filters,
                                  out_channels=num_filters,
                                  kernel_size=3,
                                  stride=stride,
                                  padding='same',
                                  bias_attr=False)

        self.conv3 = L.ConvBN(in_channels=num_filters,
                              out_channels=num_filters * 4,
                              kernel_size=1,
                              padding='same',
                              bias_attr=False)

        if self.downsample:
            self.conv_down = L.ConvBN(in_channels=num_channels,
                                      out_channels=num_filters * 4,
                                      kernel_size=1,
                                      padding='same',
                                      bias_attr=False)

        if self.has_se:
            self.se = SELayer(num_channels=num_filters * 4,
                              num_filters=num_filters * 4,
                              reduction_ratio=16,
                              name=name + '_fc')
Example #3
0
    def __init__(self,
                 in_channels: int,
                 key_channels: int,
                 out_channels: int,
                 dropout_rate: float = 0.1):
        super().__init__()

        self.attention_block = ObjectAttentionBlock(in_channels, key_channels)
        self.conv1x1 = nn.Sequential(
            L.ConvBNReLU(2 * in_channels, out_channels, 1),
            nn.Dropout2D(dropout_rate))
Example #4
0
    def __init__(self,
                 num_classes: int,
                 in_channels: int,
                 ocr_mid_channels: int = 512,
                 ocr_key_channels: int = 256):
        super().__init__()

        self.num_classes = num_classes
        self.spatial_gather = SpatialGatherBlock()
        self.spatial_ocr = SpatialOCRModule(ocr_mid_channels, ocr_key_channels,
                                            ocr_mid_channels)

        self.indices = [-2, -1] if len(in_channels) > 1 else [-1, -1]

        self.conv3x3_ocr = L.ConvBNReLU(in_channels[self.indices[1]],
                                        ocr_mid_channels,
                                        3,
                                        padding=1)
        self.cls_head = nn.Conv2D(ocr_mid_channels, self.num_classes, 1)
        self.aux_head = nn.Sequential(
            L.ConvBNReLU(in_channels[self.indices[0]],
                         in_channels[self.indices[0]], 1),
            nn.Conv2D(in_channels[self.indices[0]], self.num_classes, 1))
Example #5
0
    def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 multi_scale_output: bool = True,
                 name: str = None,
                 align_corners: bool = False):
        super(FuseLayers, self).__init__()

        self._actual_ch = len(in_channels) if multi_scale_output else 1
        self._in_channels = in_channels
        self.align_corners = align_corners

        self.residual_func_list = []
        for i in range(self._actual_ch):
            for j in range(len(in_channels)):
                if j > i:
                    residual_func = self.add_sublayer(
                        "residual_{}_layer_{}_{}".format(name, i + 1, j + 1),
                        L.ConvBN(in_channels=in_channels[j],
                                 out_channels=out_channels[i],
                                 kernel_size=1,
                                 padding='same',
                                 bias_attr=False))
                    self.residual_func_list.append(residual_func)
                elif j < i:
                    pre_num_filters = in_channels[j]
                    for k in range(i - j):
                        if k == i - j - 1:
                            residual_func = self.add_sublayer(
                                "residual_{}_layer_{}_{}_{}".format(
                                    name, i + 1, j + 1, k + 1),
                                L.ConvBN(in_channels=pre_num_filters,
                                         out_channels=out_channels[i],
                                         kernel_size=3,
                                         stride=2,
                                         padding='same',
                                         bias_attr=False))
                            pre_num_filters = out_channels[i]
                        else:
                            residual_func = self.add_sublayer(
                                "residual_{}_layer_{}_{}_{}".format(
                                    name, i + 1, j + 1, k + 1),
                                L.ConvBNReLU(in_channels=pre_num_filters,
                                             out_channels=out_channels[j],
                                             kernel_size=3,
                                             stride=2,
                                             padding='same',
                                             bias_attr=False))
                            pre_num_filters = out_channels[j]
                        self.residual_func_list.append(residual_func)
Example #6
0
    def __init__(self, in_channels: int, key_channels: int):
        super().__init__()

        self.in_channels = in_channels
        self.key_channels = key_channels

        self.f_pixel = nn.Sequential(
            L.ConvBNReLU(in_channels, key_channels, 1),
            L.ConvBNReLU(key_channels, key_channels, 1))

        self.f_object = nn.Sequential(
            L.ConvBNReLU(in_channels, key_channels, 1),
            L.ConvBNReLU(key_channels, key_channels, 1))

        self.f_down = L.ConvBNReLU(in_channels, key_channels, 1)

        self.f_up = L.ConvBNReLU(key_channels, in_channels, 1)
Example #7
0
    def __init__(self,
                 stage1_num_modules: int = 1,
                 stage1_num_blocks: Tuple[int] = (4, ),
                 stage1_num_channels: Tuple[int] = (64, ),
                 stage2_num_modules: int = 1,
                 stage2_num_blocks: Tuple[int] = (4, 4),
                 stage2_num_channels: Tuple[int] = (18, 36),
                 stage3_num_modules: int = 4,
                 stage3_num_blocks: Tuple[int] = (4, 4, 4),
                 stage3_num_channels: Tuple[int] = (18, 36, 72),
                 stage4_num_modules: int = 3,
                 stage4_num_blocks: Tuple[int] = (4, 4, 4, 4),
                 stage4_num_channels: Tuple[int] = (18, 36, 72, 144),
                 has_se: bool = False,
                 align_corners: bool = False):
        super(HRNet_W18, self).__init__()

        self.stage1_num_modules = stage1_num_modules
        self.stage1_num_blocks = stage1_num_blocks
        self.stage1_num_channels = stage1_num_channels
        self.stage2_num_modules = stage2_num_modules
        self.stage2_num_blocks = stage2_num_blocks
        self.stage2_num_channels = stage2_num_channels
        self.stage3_num_modules = stage3_num_modules
        self.stage3_num_blocks = stage3_num_blocks
        self.stage3_num_channels = stage3_num_channels
        self.stage4_num_modules = stage4_num_modules
        self.stage4_num_blocks = stage4_num_blocks
        self.stage4_num_channels = stage4_num_channels
        self.has_se = has_se
        self.align_corners = align_corners
        self.feat_channels = [sum(stage4_num_channels)]

        self.conv_layer1_1 = L.ConvBNReLU(in_channels=3,
                                          out_channels=64,
                                          kernel_size=3,
                                          stride=2,
                                          padding='same',
                                          bias_attr=False)

        self.conv_layer1_2 = L.ConvBNReLU(in_channels=64,
                                          out_channels=64,
                                          kernel_size=3,
                                          stride=2,
                                          padding='same',
                                          bias_attr=False)

        self.la1 = Layer1(num_channels=64,
                          num_blocks=self.stage1_num_blocks[0],
                          num_filters=self.stage1_num_channels[0],
                          has_se=has_se,
                          name="layer2")

        self.tr1 = TransitionLayer(
            in_channels=[self.stage1_num_channels[0] * 4],
            out_channels=self.stage2_num_channels,
            name="tr1")

        self.st2 = Stage(num_channels=self.stage2_num_channels,
                         num_modules=self.stage2_num_modules,
                         num_blocks=self.stage2_num_blocks,
                         num_filters=self.stage2_num_channels,
                         has_se=self.has_se,
                         name="st2",
                         align_corners=align_corners)

        self.tr2 = TransitionLayer(in_channels=self.stage2_num_channels,
                                   out_channels=self.stage3_num_channels,
                                   name="tr2")
        self.st3 = Stage(num_channels=self.stage3_num_channels,
                         num_modules=self.stage3_num_modules,
                         num_blocks=self.stage3_num_blocks,
                         num_filters=self.stage3_num_channels,
                         has_se=self.has_se,
                         name="st3",
                         align_corners=align_corners)

        self.tr3 = TransitionLayer(in_channels=self.stage3_num_channels,
                                   out_channels=self.stage4_num_channels,
                                   name="tr3")
        self.st4 = Stage(num_channels=self.stage4_num_channels,
                         num_modules=self.stage4_num_modules,
                         num_blocks=self.stage4_num_blocks,
                         num_filters=self.stage4_num_channels,
                         has_se=self.has_se,
                         name="st4",
                         align_corners=align_corners)