Esempio n. 1
0
    def __init__(self, in_channels: int, num_classes: int,
                 block_depth: Tuple[int, int, int, int],
                 init_channels: int,
                 block_channels: Tuple[int, int, int, int],
                 expansion: int = 4,
                 base_width: int = 64,
                 cardinality: int = 1,
                 radix: int = 1):

        assert radix != 1, "ResNest does not support radix = 1"

        block = partial(Bottleneck,
                        expansion=expansion,
                        base_width=base_width,
                        cardinality=cardinality,
                        radix=radix)

        super(ResNest, self).__init__(
            in_channels, num_classes,
            block=block,
            block_depth=block_depth,
            init_channels=init_channels,
            block_channels=block_channels)

        # change the stem to match the ResNest architecture
        self.features.stem = nn.Sequential(
            ConvBnReLU2d(in_channels, init_channels // 2, 3, padding=1, stride=2),
            ConvBnReLU2d(init_channels // 2, init_channels // 2, 3, padding=1),
            ConvBnReLU2d(init_channels // 2, init_channels, 3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
        )
Esempio n. 2
0
    def __init__(self, in_channels: int, out_channels: int,
                 stride: int = 1,
                 expansion: int = 4,
                 base_width: int = 64,
                 cardinality: int = 1,
                 radix: int = 1):
        super(Bottleneck, self).__init__()

        self.radix = radix

        width = int((out_channels / expansion) * (base_width / 64) * cardinality)

        self.conv1 = ConvBnReLU2d(in_channels, width, 1)

        self.conv2 = ConvBnReLU2d(width, width * radix, 3, padding=1, groups=cardinality * radix)
        self.sa = SplitAttention(width, width * radix, 4, cardinality=cardinality, radix=radix)
        self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) if stride == 2 else nn.Identity()
        self.conv3 = ConvBn2d(width, out_channels, 1)

        self.activation = nn.ReLU(inplace=True)

        self.downsample = (
            nn.Sequential(
                nn.AvgPool2d(stride) if stride != 1 else nn.Identity(),
                ConvBn2d(in_channels, out_channels, 1),
            )
            if stride != 1 or in_channels != out_channels
            else nn.Identity()
        )
Esempio n. 3
0
    def __init__(self,
                 in_channels: int,
                 num_classes: int,
                 stages: List[List[nn.Module]],
                 init_channels: int,
                 stage_channels: int,
                 feature_channels: int,
                 dropout_p: float = 0.1):
        features = OrderedDict()
        features["stem"] = ConvBnReLU2d(in_channels,
                                        init_channels,
                                        3,
                                        padding=1,
                                        stride=2)
        for idx, stage in enumerate(stages):
            features[f"stage{idx+ 1}"] = nn.Sequential(*stage)
        features["tail"] = ConvBnReLU2d(stage_channels, feature_channels, 1)
        features = nn.Sequential(features)

        classifier = nn.Sequential(nn.AdaptiveAvgPool2d(1),
                                   nn.Dropout(p=dropout_p), nn.Flatten(),
                                   nn.Linear(feature_channels, num_classes))

        super().__init__(
            OrderedDict([
                ('features', features),
                ('classifier', classifier),
            ]))
Esempio n. 4
0
 def make_layer(in_channels, out_channels, num_blocks):
     layers = [ConvBnReLU2d(in_channels, out_channels, 3, padding=1)]
     for _ in range(1, num_blocks):
         layers += [
             ConvBnReLU2d(out_channels, out_channels, 3, padding=1)
         ]
     return nn.Sequential(*layers)
Esempio n. 5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 group_width=1,
                 se_reduction=4):
        super(BottleneckY, self).__init__()

        self.conv1 = ConvBnReLU2d(in_channels, out_channels, 1)
        self.conv2 = ConvBnReLU2d(out_channels,
                                  out_channels,
                                  3,
                                  padding=1,
                                  stride=stride,
                                  groups=out_channels //
                                  min(out_channels, group_width))
        self.se = SqueezeExcitation(out_channels,
                                    out_channels,
                                    mid_channels=round(in_channels /
                                                       se_reduction))
        self.conv3 = ConvBn2d(out_channels, out_channels, 1)

        self.downsample = (ConvBn2d(
            in_channels, out_channels, 1, stride=stride) if stride != 1
                           or in_channels != out_channels else nn.Identity())

        self.activation = nn.ReLU(inplace=True)
Esempio n. 6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 expansion=4,
                 base_width=64,
                 cardinality=1):
        super(Bottleneck, self).__init__()

        width = int(
            (out_channels / expansion) * (base_width / 64) * cardinality)

        self.conv1 = ConvBnReLU2d(in_channels, width, 1)
        self.conv2 = ConvBnReLU2d(width,
                                  width,
                                  3,
                                  padding=1,
                                  stride=stride,
                                  groups=cardinality)
        self.conv3 = ConvBn2d(width, out_channels, 1)

        self.downsample = (
            ConvBn2d(in_channels, out_channels, 1, stride=stride)
            if in_channels != out_channels or stride != 1 else nn.Identity())

        self.activation = nn.ReLU(inplace=True)
Esempio n. 7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 dilation=1,
                 expansion=4,
                 use_residual=True):
        super(Bottleneck, self).__init__()

        self.use_residual = use_residual

        width = out_channels // expansion

        self.conv1 = ConvBnReLU2d(in_channels, width, 1)
        self.conv2 = ConvBnReLU2d(width,
                                  width,
                                  3,
                                  padding=dilation,
                                  stride=stride,
                                  dilation=dilation)
        self.conv3 = ConvBn2d(width, out_channels, 1)

        if self.use_residual:
            self.downsample = (
                ConvBn2d(in_channels, out_channels, 1, stride=stride) if
                in_channels != out_channels or stride != 1 else nn.Identity())

        self.activation = nn.ReLU(inplace=True)
Esempio n. 8
0
def Classifier(in_channels, out_channels):
    return nn.Sequential(
        ConvBn2d(in_channels, in_channels, 3, padding=1, groups=in_channels),
        ConvBnReLU2d(in_channels, in_channels, 1),
        ConvBn2d(in_channels, in_channels, 3, padding=1, groups=in_channels),
        ConvBnReLU2d(in_channels, in_channels, 1),
        nn.Dropout(0.1),
        nn.Conv2d(in_channels, out_channels, kernel_size=1),
    )
Esempio n. 9
0
 def __init__(self, in_channels, out_channels, bins=[1, 2, 3, 6]):
     super(PyramidPooling, self).__init__()
     self.pyramids = nn.ModuleList([
         nn.Sequential(
             nn.AdaptiveAvgPool2d(output_size=bin),
             ConvBnReLU2d(in_channels, in_channels, 1),
         )
         for bin in bins
     ])
     self.bottleneck = ConvBnReLU2d(in_channels * (len(bins) + 1), out_channels, 1)
Esempio n. 10
0
    def __init__(self, in_channels, out_channels, pyramids=(1, 2, 3, 6)):
        super().__init__()

        self.pyramids = nn.ModuleList([
            nn.Sequential(
                nn.AdaptiveAvgPool2d(bin),
                ConvBnReLU2d(in_channels, in_channels // len(pyramids), 1),
            ) for bin in pyramids
        ])

        self.conv = ConvBnReLU2d(in_channels * 2, out_channels, 1)
Esempio n. 11
0
    def __init__(self, in_channels, out_channels, stride=1, expansion=6):
        super().__init__()

        expansion_channels = expansion * in_channels
        self.conv1 = ConvBnReLU2d(in_channels, expansion_channels, 1)
        self.conv2 = ConvBnReLU2d(expansion_channels,
                                  expansion_channels,
                                  3,
                                  padding=1,
                                  stride=stride,
                                  groups=expansion_channels)
        self.conv3 = ConvBn2d(expansion_channels, out_channels, 1)
Esempio n. 12
0
    def __init__(self, in_channels, out_channels, stride=1, expansion=6):
        super(BottleneckBlock, self).__init__()

        expansion_channels = in_channels * expansion
        self.conv1 = ConvBnReLU2d(in_channels, expansion_channels, 1)
        self.conv2 = ConvBnReLU2d(expansion_channels,
                                  expansion_channels,
                                  3,
                                  padding=1,
                                  stride=stride,
                                  groups=expansion_channels)
        self.conv3 = ConvBn2d(expansion_channels, out_channels, 1)
Esempio n. 13
0
    def __init__(self, in_channels, num_classes, width_multiplier=1.):

        def c(channels):
            return round_channels(width_multiplier * channels,
                                  divisor=8 if width_multiplier >= 0.1 else 4)

        features = nn.Sequential(OrderedDict([
            ('head', ConvBnReLU2d(3, c(16), 3, padding=1, stride=2)),
            ('layer1', nn.Sequential(
                GhostBottleneck(c(16), c(16), c(16)),
                GhostBottleneck(c(16), c(24), c(48), stride=2),
            )),
            ('layer2', nn.Sequential(
                GhostBottleneck(c(24), c(24), c(72)),
                GhostBottleneck(c(24), c(40), c(72), kernel_size=5, stride=2, use_se=True),
            )),
            ('layer3', nn.Sequential(
                GhostBottleneck(c(40), c(40), c(120), kernel_size=5, use_se=True),
                GhostBottleneck(c(40), c(80), c(240), stride=2),
            )),
            ('layer4', nn.Sequential(
                GhostBottleneck(c(80), c(80), c(200)),
                GhostBottleneck(c(80), c(80), c(184)),
                GhostBottleneck(c(80), c(80), c(184)),
                GhostBottleneck(c(80), c(112), c(480), use_se=True),
                GhostBottleneck(c(112), c(112), c(672), use_se=True),
                GhostBottleneck(c(112), c(160), c(672), kernel_size=5, stride=2, use_se=True),
            )),
            ('layer5', nn.Sequential(
                GhostBottleneck(c(160), c(160), c(960), kernel_size=5),
                GhostBottleneck(c(160), c(160), c(960), kernel_size=5, use_se=True),
                GhostBottleneck(c(160), c(160), c(960), kernel_size=5),
                GhostBottleneck(c(160), c(160), c(960), kernel_size=5, use_se=True),
                ConvBnReLU2d(c(160), c(960), 1),
            )),
        ]))

        classifier = nn.Sequential(
            nn.AdaptiveAvgPool2d(output_size=1),
            nn.Conv2d(c(960), c(1280), 1),
            nn.ReLU(inplace=True),
            nn.Flatten(),
            nn.Dropout(p=0.2),
            nn.Linear(c(1280), num_classes),
        )

        super(GhostNet, self).__init__(OrderedDict([
            ('features', features),
            ('classifier', classifier),
        ]))
Esempio n. 14
0
    def __init__(self, in_channels, out_channels, kernel_size, dropout_p=0.01):
        super().__init__()

        self.conv1 = nn.Sequential(
            ConvBnReLU2d(in_channels, out_channels, (kernel_size, 1), padding=(kernel_size//2, 0)),
            ConvBnReLU2d(out_channels, out_channels, (1, kernel_size), padding=(0, kernel_size//2)),
        )

        self.conv2 = nn.Sequential(
            ConvBnReLU2d(out_channels, out_channels, (kernel_size, 1), padding=(kernel_size//2, 0)),
            ConvBn2d(out_channels, out_channels, (1, kernel_size), padding=(0, kernel_size//2)),
        )

        self.activation = nn.ReLU(inplace=True)
        self.dropout = nn.Dropout2d(p=dropout_p)
Esempio n. 15
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 reduction_channels,
                 num_blocks=5):
        super().__init__()

        convs = []
        for i in range(num_blocks):
            channels = in_channels if i == 0 else reduction_channels
            convs += [ConvBnReLU2d(channels, reduction_channels, 3, padding=1)]
        self.convs = nn.ModuleList(convs)

        channels = in_channels + reduction_channels * num_blocks
        self.transition = ConvBnReLU2d(channels, out_channels, 1)
Esempio n. 16
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 dilation=1,
                 use_residual=True):
        super(BasicBlock, self).__init__()

        self.use_residual = use_residual

        self.conv1 = ConvBnReLU2d(in_channels,
                                  out_channels,
                                  3,
                                  padding=dilation,
                                  stride=stride,
                                  dilation=dilation)
        self.conv2 = ConvBn2d(out_channels,
                              out_channels,
                              3,
                              padding=dilation,
                              dilation=dilation)

        if self.use_residual:
            self.downsample = (
                ConvBn2d(in_channels, out_channels, 1, stride=stride) if
                in_channels != out_channels or stride != 1 else nn.Identity())

        self.activation = nn.ReLU(inplace=True)
Esempio n. 17
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 scale_factor=4,
                 width_multiplier=1):
        super(ContextNet, self).__init__()

        self.scale_factor = scale_factor

        def c(channels):
            return int(channels * width_multiplier)

        self.spatial = nn.Sequential(
            ConvBnReLU2d(in_channels, c(32), 3, padding=1, stride=2),
            ConvBnReLU2d(c(32),
                         c(32),
                         kernel_size=3,
                         padding=1,
                         stride=2,
                         groups=c(32)),
            ConvBnReLU2d(c(32), c(64), 1),
            ConvBnReLU2d(c(64),
                         c(64),
                         kernel_size=3,
                         padding=1,
                         stride=2,
                         groups=c(64)),
            ConvBnReLU2d(c(64), c(128), 1),
            ConvBnReLU2d(c(128),
                         c(128),
                         kernel_size=3,
                         padding=1,
                         stride=1,
                         groups=c(128)),
            ConvBnReLU2d(c(128), c(128), 1),
        )

        self.context = nn.Sequential(
            ConvBnReLU2d(in_channels, c(32), 3, padding=1, stride=2),
            BottleneckBlock(c(32), c(32), expansion=1),
            BottleneckBlock(c(32), c(32), expansion=6),
            LinearBottleneck(c(32), c(48), 3, stride=2),
            LinearBottleneck(c(48), c(64), 3, stride=2),
            LinearBottleneck(c(64), c(96), 2),
            LinearBottleneck(c(96), c(128), 2),
            ConvBnReLU2d(c(128), c(128), 3, padding=1),
        )

        self.feature_fusion = FeatureFusionModule((c(128), c(128)), c(128))

        self.classifier = Classifier(c(128), out_channels)
Esempio n. 18
0
    def __init__(self, in_channels, out_channels, dilation_rates=(2, 5, 9), dropout_p=0.01):
        super().__init__()

        self.conv1 = nn.Sequential(
            ConvBnReLU2d(in_channels, out_channels, (3, 1), padding=(1, 0)),
            ConvBnReLU2d(out_channels, out_channels, (1, 3), padding=(0, 1)),
        )

        self.conv2 = nn.ModuleList(
            nn.Sequential(
                ConvBnReLU2d(out_channels, out_channels, (3, 1), padding=(dilation, 0), dilation=(dilation, 1)),
                ConvBn2d(out_channels, out_channels, (1, 3), padding=(0, dilation), dilation=(1, dilation)),
            )
            for dilation in dilation_rates
        )

        self.activation = nn.ReLU(inplace=True)
        self.dropout = nn.Dropout2d(p=dropout_p)
Esempio n. 19
0
    def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 stride: int = 1,
                 expansion: int = 4):
        super(Bottleneck, self).__init__()

        width = out_channels // expansion

        self.conv1 = ConvBnReLU2d(in_channels, width, 1)
        self.conv2 = ConvBnReLU2d(width, width, 3, padding=1, stride=stride)
        self.conv3 = ConvBn2d(width, out_channels, 1)

        self.downsample = (
            ConvBn2d(in_channels, out_channels, 1, stride=stride)
            if in_channels != out_channels or stride != 1 else nn.Identity())

        self.activation = nn.ReLU(inplace=True)
Esempio n. 20
0
    def __init__(self, in_channels, out_channels, stride=1, group_width=1):
        super(BottleneckX, self).__init__()

        self.downsample = (ConvBn2d(
            in_channels, out_channels, 1, stride=stride) if stride != 1
                           or in_channels != out_channels else nn.Identity())

        self.conv1 = ConvBnReLU2d(in_channels, out_channels, 1)
        self.conv2 = ConvBnReLU2d(out_channels,
                                  out_channels,
                                  3,
                                  padding=1,
                                  stride=stride,
                                  groups=max(out_channels // group_width, 1))

        self.conv3 = ConvBn2d(out_channels, out_channels, 1)

        self.activation = nn.ReLU(inplace=True)
Esempio n. 21
0
    def __init__(self, in_channels, out_channels):
        super(APNModule, self).__init__()

        self.conv1 = ConvBnReLU2d(in_channels, in_channels, 3, 1, stride=2)
        self.conv2 = ConvBnReLU2d(in_channels, in_channels, 5, 2, stride=2)
        self.conv3 = ConvBnReLU2d(in_channels, in_channels, 7, 3, stride=2)

        self.level1 = ConvBnReLU2d(in_channels, out_channels, 1)
        self.level2 = ConvBnReLU2d(in_channels, out_channels, 1)
        self.level3 = ConvBnReLU2d(in_channels, out_channels, 1)
        self.level4 = ConvBnReLU2d(in_channels, out_channels, 1)
        self.level5 = ConvBnReLU2d(in_channels, out_channels, 1)
Esempio n. 22
0
    def __init__(self, in_channels: int, num_classes: int,
                 block_depth: Tuple[int, int, int, int], init_channels: int,
                 block_channels: Tuple[int, int, int, int]):
        def make_layer(in_channels, out_channels, depth, stride=2):
            layers = [
                InvertedResidualBlock(in_channels, out_channels, stride=stride)
            ]
            for _ in range(1, depth):
                layers += [InvertedResidualBlock(out_channels, out_channels)]
            return nn.Sequential(*layers)

        features = nn.Sequential(
            OrderedDict([
                ('stem',
                 nn.Sequential(
                     ConvBnReLU2d(in_channels,
                                  init_channels,
                                  3,
                                  padding=1,
                                  stride=2),
                     nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
                 )),
                ('layer1',
                 make_layer(init_channels, block_channels[0], block_depth[0])),
                ('layer2',
                 make_layer(block_channels[0], block_channels[1],
                            block_depth[1])),
                ('layer3',
                 make_layer(block_channels[1], block_channels[2],
                            block_depth[2])),
                ('tail', ConvBnReLU2d(block_channels[2], block_channels[3],
                                      1)),
            ]))

        classifier = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Flatten(),
                                   nn.Linear(block_channels[4], num_classes))

        super(ShuffleNetV2, self).__init__(
            OrderedDict([
                ('features', features),
                ('classifier', classifier),
            ]))
Esempio n. 23
0
    def __init__(self,
                 in_channels: int,
                 num_classes: int,
                 growth: int,
                 block_depth: Tuple[int, int, int, int],
                 init_channels: int,
                 expansion: int = 4,
                 dropout_p: float = 0.0):

        layers = OrderedDict()

        layers["stem"] = nn.Sequential(
            OrderedDict([
                ('conv',
                 ConvBnReLU2d(in_channels,
                              init_channels,
                              7,
                              padding=3,
                              stride=2)),
                ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        channels = init_channels
        for idx, depth in enumerate(block_depth):
            out_channels = channels + growth * depth
            layer = [
                DenseBlock(channels + growth * idx,
                           channels + growth * (idx + 1),
                           growth=growth,
                           expansion=expansion,
                           dropout_p=dropout_p) for idx in range(depth)
            ]
            if idx != len(block_depth) - 1:
                # densenet does not have a transition block in the last layer
                layer += TransitionBlock(out_channels, out_channels // 2)
                out_channels = out_channels // 2

            layers[f"layer{idx+1}"] = nn.Sequential(*layer)
            channels = out_channels

        layers['tail'] = nn.Sequential(nn.BatchNorm2d(channels),
                                       nn.ReLU(inplace=True))

        features = nn.Sequential(layers)

        classifier = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Flatten(),
                                   nn.Linear(channels, num_classes))

        super(DenseNet, self).__init__(
            OrderedDict([
                ('features', features),
                ('classifier', classifier),
            ]))
Esempio n. 24
0
    def __init__(self, in_channels, out_channels):
        super().__init__()

        # The Learning to Downsample module
        # It encodes low level features in a efficient way
        # It is composed by three convolutional layers, where the first one is
        # a regular conv and the other two are depthwise separable conv
        # layers.
        # The first convolutional layer is a regular conv because there is no
        # advantage in using a ds conv in such small number of channels.
        # All layers are a spatial kernel of 3x3 and have a stride of 2 for a total downsample of 8 times.
        # Also, there is no nonlinearity between the depthwise and pointwise conv.
        self.downsample = nn.Sequential(
            ConvBnReLU2d(in_channels, 32, 3, padding=1, stride=1),
            ConvBn2d(32, 32, 3, padding=1, stride=2, groups=32),
            ConvBnReLU2d(32, 48, 1),
            ConvBn2d(48, 48, 3, padding=1, stride=2, groups=48),
            ConvBnReLU2d(48, 64, 1),
        )

        # The Global Feature Extractor module is aimed at capturing the global
        # context for the task of image segmentation.
        # This module directly takes the 1/8 downsampled output of the
        # Learning to Downsample module, performs a feature encoding using the
        # MobileNet bottleneck residual block and then performs a pyramid pooling
        # at the end to aggregate the different region-based context information.
        self.features = nn.Sequential(
            BottleneckModule(64, 64, expansion=6, repeats=3, stride=2),
            BottleneckModule(64, 96, expansion=6, repeats=3, stride=2),
            BottleneckModule(96, 128, expansion=6, repeats=3, stride=1),
            PyramidPoolingModule(128, 128))

        # The Feature Fusion adds the low-resolution features from the
        # Global Feature Encoder and the high-resolution features from the
        # Learning to Downsample Module.
        self.fusion = FeatureFusionModule((128, 64), 128, scale_factor=4)

        # The classifier discriminates the classes from the features produced
        # by fusion module.
        self.classifier = Classifier(128, out_channels)
Esempio n. 25
0
    def __init__(self, in_channels, out_channels, dilation=1, dropout_p=0.0):
        super(SSnbtBlock, self).__init__()

        if in_channels != out_channels:
            raise ValueError("input and output channels must match")

        channels = in_channels // 2
        self.left = nn.Sequential(
            ConvBnReLU2d(channels, channels, (3, 1), padding=(1, 0)),
            ConvBnReLU2d(channels, channels, (1, 3), padding=(0, 1)),
            ConvBnReLU2d(channels,
                         channels, (3, 1),
                         padding=(dilation, 0),
                         dilation=(dilation, 1)),
            ConvBn2d(channels,
                     channels, (1, 3),
                     padding=(0, dilation),
                     dilation=(1, dilation)),
        )
        self.right = nn.Sequential(
            ConvBnReLU2d(channels, channels, (3, 1), padding=(1, 0)),
            ConvBnReLU2d(channels, channels, (1, 3), padding=(0, 1)),
            ConvBnReLU2d(channels,
                         channels, (3, 1),
                         padding=(dilation, 0),
                         dilation=(dilation, 1)),
            ConvBn2d(channels,
                     channels, (1, 3),
                     padding=(0, dilation),
                     dilation=(1, dilation)),
        )

        self.activation = nn.ReLU(inplace=True)
        self.dropout = nn.Dropout2d(p=dropout_p)
Esempio n. 26
0
    def __init__(self, in_channels: int, out_channels: int, stride: int = 1):
        super(InvertedResidualBlock, self).__init__()

        branch_channels = out_channels // 2

        self.left = (ConvBnReLU2d(in_channels,
                                  branch_channels,
                                  3,
                                  padding=1,
                                  stride=stride,
                                  groups=in_channels) if stride != 1 else None)

        self.right = nn.Sequential(
            ConvBnReLU2d(in_channels if stride > 1 else branch_channels,
                         branch_channels, 1),
            ConvBnReLU2d(branch_channels,
                         branch_channels,
                         kernel_size=3,
                         padding=1,
                         stride=stride,
                         groups=branch_channels),
        )
Esempio n. 27
0
    def __init__(self, in_channels, out_channels):
        super(FeatureFusionModule, self).__init__()

        lowres_channels, highres_channels = in_channels
        self.lowres = nn.Sequential(
            ConvBnReLU2d(lowres_channels,
                         lowres_channels,
                         kernel_size=3,
                         padding=4,
                         dilation=4,
                         groups=lowres_channels),
            ConvBn2d(lowres_channels, out_channels, 1))
        self.highres = ConvBn2d(highres_channels, out_channels, 1)
Esempio n. 28
0
    def __init__(self, in_channels: int, num_classes: int, block: nn.Module,
                 block_depth: Tuple[int, int, int, int], init_channels: int,
                 block_channels: Tuple[int, int, int, int]):
        def make_layer(in_channels, out_channels, num_blocks, stride=2):
            layers = [block(in_channels, out_channels, stride=stride)]
            for _ in range(1, num_blocks):
                layers += [block(out_channels, out_channels)]
            return nn.Sequential(*layers)

        features = nn.Sequential(
            OrderedDict([
                ('stem',
                 nn.Sequential(
                     OrderedDict([
                         ('conv',
                          ConvBnReLU2d(in_channels,
                                       init_channels,
                                       7,
                                       stride=2,
                                       padding=3)),
                         ('pool',
                          nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
                     ]))),
                ('layer1',
                 make_layer(init_channels,
                            block_channels[0],
                            block_depth[0],
                            stride=1)),
                ('layer2',
                 make_layer(block_channels[0], block_channels[1],
                            block_depth[1])),
                ('layer3',
                 make_layer(block_channels[1], block_channels[2],
                            block_depth[2])),
                ('layer4',
                 make_layer(block_channels[2], block_channels[3],
                            block_depth[3])),
            ]))

        classifier = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(block_channels[3], num_classes),
        )

        super().__init__(
            OrderedDict([
                ('features', features),
                ('classifier', classifier),
            ]))
Esempio n. 29
0
    def __init__(self, in_channels, out_channels, scale_factor):
        super().__init__()
        lowres_channels, highres_channels = in_channels

        self.lowres = nn.Sequential(
            ConvBnReLU2d(lowres_channels,
                         lowres_channels,
                         3,
                         padding=scale_factor,
                         dilation=scale_factor,
                         groups=lowres_channels),
            ConvBn2d(lowres_channels, out_channels, 1),
        )

        self.highres = ConvBn2d(highres_channels, out_channels, 1)
Esempio n. 30
0
    def __init__(self, in_channels: int, out_channels: int, stride: int = 1):
        super(BasicBlock, self).__init__()

        self.conv1 = ConvBnReLU2d(in_channels,
                                  out_channels,
                                  3,
                                  padding=1,
                                  stride=stride)
        self.conv2 = ConvBn2d(out_channels, out_channels, 3, padding=1)

        self.downsample = (
            ConvBn2d(in_channels, out_channels, 1, stride=stride)
            if in_channels != out_channels or stride != 1 else nn.Identity())

        self.activation = nn.ReLU(inplace=True)