Пример #1
0
 def __init__(self, num_classes, in_channels, isa_channels, down_factor,
              enable_auxiliary_loss):
     super(ISAHead, self).__init__()
     self.in_channels = in_channels[-1]
     inter_channels = self.in_channels // 4
     self.down_factor = down_factor
     self.enable_auxiliary_loss = enable_auxiliary_loss
     self.in_conv = layers.ConvBNReLU(self.in_channels,
                                      inter_channels,
                                      3,
                                      bias_attr=False)
     self.global_relation = SelfAttentionBlock(inter_channels, isa_channels)
     self.local_relation = SelfAttentionBlock(inter_channels, isa_channels)
     self.out_conv = layers.ConvBNReLU(inter_channels * 2,
                                       inter_channels,
                                       1,
                                       bias_attr=False)
     self.cls = nn.Sequential(nn.Dropout2D(p=0.1),
                              nn.Conv2D(inter_channels, num_classes, 1))
     self.aux = nn.Sequential(
         layers.ConvBNReLU(in_channels=1024,
                           out_channels=256,
                           kernel_size=3,
                           bias_attr=False), nn.Dropout2D(p=0.1),
         nn.Conv2D(256, num_classes, 1))
Пример #2
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 ema_channels,
                 gc_channels,
                 num_bases,
                 stage_num,
                 momentum,
                 concat_input=True,
                 enable_auxiliary_loss=True):
        super(EMAHead, self).__init__()

        self.in_channels = in_channels[-1]
        self.concat_input = concat_input
        self.enable_auxiliary_loss = enable_auxiliary_loss

        self.emau = EMAU(ema_channels, num_bases, stage_num, momentum=momentum)
        self.ema_in_conv = layers.ConvBNReLU(
            in_channels=self.in_channels,
            out_channels=ema_channels,
            kernel_size=3)
        self.ema_mid_conv = nn.Conv2D(ema_channels, ema_channels, kernel_size=1)
        self.ema_out_conv = layers.ConvBNReLU(
            in_channels=ema_channels, out_channels=ema_channels, kernel_size=1)
        self.bottleneck = layers.ConvBNReLU(
            in_channels=ema_channels, out_channels=gc_channels, kernel_size=3)
        self.cls = nn.Sequential(
            nn.Dropout2D(p=0.1), nn.Conv2D(gc_channels, num_classes, 1))
        self.aux = nn.Sequential(
            layers.ConvBNReLU(
                in_channels=1024, out_channels=256, kernel_size=3),
            nn.Dropout2D(p=0.1), nn.Conv2D(256, num_classes, 1))
        if self.concat_input:
            self.conv_cat = layers.ConvBNReLU(
                self.in_channels + gc_channels, gc_channels, kernel_size=3)
Пример #3
0
    def __init__(self, inp_dim, out_dim, n_layer=2, edge_dim=2, batch_norm=False, dropout=0.0):
        super(NodeUpdateNetwork, self).__init__()
        # set size
        self.edge_dim = edge_dim
        num_dims_list = [out_dim] * n_layer  # [num_features * r for r in ratio]
        if n_layer > 1:
            num_dims_list[0] = 2 * out_dim

        # layers
        layer_list = OrderedDict()
        for l in range(len(num_dims_list)):
            layer_list['conv{}'.format(l)] = nn.Conv2D(
                in_channels=num_dims_list[l - 1] if l > 0 else (self.edge_dim + 1) * inp_dim,
                out_channels=num_dims_list[l],
                kernel_size=1,
                bias_attr=False)
            if batch_norm:
                layer_list['norm{}'.format(l)] = nn.BatchNorm2D(num_features=num_dims_list[l])
            layer_list['relu{}'.format(l)] = nn.LeakyReLU()

            if dropout > 0 and l == (len(num_dims_list) - 1):
                layer_list['drop{}'.format(l)] = nn.Dropout2D(p=dropout)

        self.network = nn.Sequential()
        for i in layer_list:
            self.network.add_sublayer(i, layer_list[i])
Пример #4
0
    def __init__(self,
                 num_classes,
                 backbone,
                 embedding_dim,
                 align_corners=False,
                 pretrained=None):
        super(SegFormer, self).__init__()

        self.pretrained = pretrained
        self.align_corners = align_corners
        self.backbone = backbone
        self.num_classes = num_classes
        c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.backbone.feat_channels

        self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=embedding_dim)
        self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=embedding_dim)
        self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=embedding_dim)
        self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=embedding_dim)

        self.dropout = nn.Dropout2D(0.1)
        self.linear_fuse = layers.ConvBNReLU(in_channels=embedding_dim * 4,
                                             out_channels=embedding_dim,
                                             kernel_size=1,
                                             bias_attr=False)

        self.linear_pred = nn.Conv2D(embedding_dim,
                                     self.num_classes,
                                     kernel_size=1)

        self.init_weight()
Пример #5
0
    def __init__(self, in_features, hidden_features, n_layer=3, top_k=-1,
                 edge_dim=2, batch_norm=False, dropout=0.0, adj_type='dist', activation='softmax'):
        super(EdgeUpdateNetwork, self).__init__()
        self.top_k = top_k
        self.adj_type = adj_type
        self.edge_dim = edge_dim
        self.activation = activation

        num_dims_list = [hidden_features] * n_layer  # [num_features * r for r in ratio]
        if n_layer > 1:
            num_dims_list[0] = 2 * hidden_features
        if n_layer > 3:
            num_dims_list[1] = 2 * hidden_features
        # layers
        layer_list = OrderedDict()
        for l in range(len(num_dims_list)):
            # set layer
            layer_list['conv{}'.format(l)] = nn.Conv2D(in_channels=num_dims_list[l - 1] if l > 0 else in_features,
                                                       out_channels=num_dims_list[l],
                                                       kernel_size=1,
                                                       bias_attr=False)
            if batch_norm:
                layer_list['norm{}'.format(l)] = nn.BatchNorm2D(num_features=num_dims_list[l], )
            layer_list['relu{}'.format(l)] = nn.LeakyReLU()

            if dropout > 0:
                layer_list['drop{}'.format(l)] = nn.Dropout2D(p=dropout)

        layer_list['conv_out'] = nn.Conv2D(in_channels=num_dims_list[-1],
                                           out_channels=1,
                                           kernel_size=1)
        self.sim_network = nn.Sequential()
        for i in layer_list:
            self.sim_network.add_sublayer(i, layer_list[i])
Пример #6
0
    def __init__(self,
                 in_channels,
                 num_classes,
                 backbone,
                 drop_prob,
                 proj_dim,
                 align_corners=False,
                 pretrained=None):
        super().__init__()
        self.in_channels = in_channels
        self.backbone = backbone
        self.num_classes = num_classes
        self.proj_dim = proj_dim
        self.align_corners = align_corners

        self.cls_head = nn.Sequential(
            layers.ConvBNReLU(
                in_channels, in_channels, kernel_size=3, stride=1, padding=1),
            nn.Dropout2D(drop_prob),
            nn.Conv2D(
                in_channels,
                num_classes,
                kernel_size=1,
                stride=1,
                bias_attr=False),
        )
        self.proj_head = ProjectionHead(
            dim_in=in_channels, proj_dim=self.proj_dim)

        self.pretrained = pretrained
        self.init_weight()
Пример #7
0
    def __init__(self,
                 num_classes,
                 in_channels=3,
                 scale=1.0,
                 drop_prob=0.1,
                 pretrained=None):
        super().__init__()
        self.backbone = EESPNetBackbone(in_channels, drop_prob, scale)
        self.in_channels = self.backbone.out_channels
        self.proj_l4_c = layers.ConvBNPReLU(self.in_channels[3],
                                            self.in_channels[2],
                                            1,
                                            stride=1,
                                            bias_attr=False)
        psp_size = 2 * self.in_channels[2]
        self.eesp_psp = nn.Sequential(
            EESP(psp_size,
                 psp_size // 2,
                 stride=1,
                 branches=4,
                 kernel_size_maximum=7),
            PSPModule(psp_size // 2, psp_size // 2),
        )

        self.project_l3 = nn.Sequential(
            nn.Dropout2D(p=drop_prob),
            nn.Conv2D(psp_size // 2, num_classes, 1, 1, bias_attr=False),
        )
        self.act_l3 = BNPReLU(num_classes)
        self.project_l2 = layers.ConvBNPReLU(self.in_channels[1] + num_classes,
                                             num_classes,
                                             1,
                                             stride=1,
                                             bias_attr=False)
        self.project_l1 = nn.Sequential(
            nn.Dropout2D(p=drop_prob),
            nn.Conv2D(self.in_channels[0] + num_classes,
                      num_classes,
                      1,
                      1,
                      bias_attr=False),
        )

        self.pretrained = pretrained

        self.init_weight()
Пример #8
0
    def __init__(self,
                 block,
                 depth,
                 num_classes=1000,
                 with_pool=True,
                 dropout=0.5):
        super(ResNet, self).__init__()
        layer_cfg = {
            18: [2, 2, 2, 2],
            34: [3, 4, 6, 3],
            50: [3, 4, 6, 3],
            101: [3, 4, 23, 3],
            152: [3, 8, 36, 3]
        }
        layers = layer_cfg[depth]
        self.num_classes = num_classes
        self.with_pool = with_pool
        self._norm_layer = nn.BatchNorm2D

        self.inplanes = 64
        self.dilation = 1
        self.bn0 = nn.BatchNorm2D(128)
        self.conv1 = nn.Conv2D(1,
                               self.inplanes,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias_attr=False)
        self.bn1 = self._norm_layer(self.inplanes)
        self.relu = nn.ReLU()
        self.relu2 = nn.ReLU()
        self.maxpool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.drop1 = nn.Dropout2D(dropout)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.drop2 = nn.Dropout2D(dropout)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.drop3 = nn.Dropout2D(dropout)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.drop4 = nn.Dropout2D(dropout)
        self.drop = nn.Dropout(dropout)
        self.extra_fc = nn.Linear(512 * block.expansion, 1024 * 2)
        if with_pool:
            self.avgpool = nn.AdaptiveAvgPool2D((1, 1))
        if num_classes > 0:
            self.fc = nn.Linear(1024 * 2, num_classes)
Пример #9
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 reduction,
                 use_scale,
                 mode,
                 temperature,
                 concat_input=True,
                 enable_auxiliary_loss=True,
                 **kwargs):
        super(DNLHead, self).__init__()
        self.in_channels = in_channels[-1]
        self.concat_input = concat_input
        self.enable_auxiliary_loss = enable_auxiliary_loss
        inter_channels = self.in_channels // 4

        self.dnl_block = DisentangledNonLocal2D(in_channels=inter_channels,
                                                reduction=reduction,
                                                use_scale=use_scale,
                                                temperature=temperature,
                                                mode=mode)
        self.conv0 = layers.ConvBNReLU(in_channels=self.in_channels,
                                       out_channels=inter_channels,
                                       kernel_size=3,
                                       bias_attr=False)
        self.conv1 = layers.ConvBNReLU(in_channels=inter_channels,
                                       out_channels=inter_channels,
                                       kernel_size=3,
                                       bias_attr=False)
        self.cls = nn.Sequential(nn.Dropout2D(p=0.1),
                                 nn.Conv2D(inter_channels, num_classes, 1))
        self.aux = nn.Sequential(
            layers.ConvBNReLU(in_channels=1024,
                              out_channels=256,
                              kernel_size=3,
                              bias_attr=False), nn.Dropout2D(p=0.1),
            nn.Conv2D(256, num_classes, 1))
        if self.concat_input:
            self.conv_cat = layers.ConvBNReLU(self.in_channels +
                                              inter_channels,
                                              inter_channels,
                                              kernel_size=3,
                                              bias_attr=False)
Пример #10
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 internal_ratio=4,
                 return_indices=False,
                 dropout_prob=0,
                 bias=False,
                 relu=True):
        super(DownsamplingBottleneck, self).__init__()

        self.return_indices = return_indices

        if internal_ratio <= 1 or internal_ratio > in_channels:
            raise RuntimeError(
                "Value out of range. Expected value in the "
                "interval [1, {0}], got internal_scale={1}. ".format(
                    in_channels, internal_ratio))

        internal_channels = in_channels // internal_ratio

        if relu:
            activation = nn.ReLU
        else:
            activation = nn.PReLU

        self.main_max1 = nn.MaxPool2D(2, stride=2, return_mask=return_indices)

        self.ext_conv1 = nn.Sequential(
            nn.Conv2D(in_channels,
                      internal_channels,
                      kernel_size=2,
                      stride=2,
                      bias_attr=bias), layers.SyncBatchNorm(internal_channels),
            activation())

        self.ext_conv2 = nn.Sequential(
            nn.Conv2D(internal_channels,
                      internal_channels,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias_attr=bias), layers.SyncBatchNorm(internal_channels),
            activation())

        self.ext_conv3 = nn.Sequential(
            nn.Conv2D(internal_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=bias), layers.SyncBatchNorm(out_channels),
            activation())

        self.ext_regul = nn.Dropout2D(p=dropout_prob)

        self.out_activation = activation()
Пример #11
0
    def __init__(self,
                 in_channels: int,
                 key_channels: int,
                 out_channels: int,
                 dropout_rate: float = 0.1):
        super().__init__()

        self.attention_block = ObjectAttentionBlock(in_channels, key_channels)
        self.conv1x1 = nn.Sequential(
            L.ConvBNReLU(2 * in_channels, out_channels, 1),
            nn.Dropout2D(dropout_rate))
Пример #12
0
    def __init__(self, in_channels, r=2, s=4, k=4, dropout=0.2, **kwargs):
        super().__init__(in_channels)

        inter_channels = in_channels // r
        self.conv1 = nn.Sequential(nn.Conv2D(in_channels, inter_channels, 1),
                                   nn.BatchNorm2D(inter_channels),
                                   nn.ReLU())
        self.batransform = BATransform(inter_channels, s, k)
        self.conv2 = nn.Sequential(nn.Conv2D(inter_channels, in_channels, 1),
                                   nn.BatchNorm2D(in_channels),
                                   nn.ReLU())
        self.dropout = nn.Dropout2D(p=dropout)
Пример #13
0
    def __init__(self,
                 in_channels,
                 key_channels,
                 out_channels,
                 dropout_rate=0.1):
        super().__init__()

        self.attention_block = ObjectAttentionBlock(in_channels, key_channels)
        self.dropout_rate = dropout_rate
        self.conv1x1 = nn.Sequential(
            layers.ConvBNReLU(2 * in_channels, out_channels, 1),
            nn.Dropout2D(0.1))
Пример #14
0
    def __init__(
        self,
        num_class=19,
        feature_strides=[4, 8, 16, 32],
        in_channels=[256, 256, 256, 256],
        channels=128,
        in_index=[0, 1, 2, 3],
        dropout_ratio=0.1,
        conv_cfg='Conv2D',
        input_transform='multiple_select',
        align_corners=False,
    ):
        super(FPNHead, self).__init__()
        assert len(feature_strides) == len(in_channels)
        assert min(feature_strides) == feature_strides[0]
        self.feature_strides = feature_strides
        self.in_channels = in_channels
        self.channels = channels
        self.in_index = in_index
        self.num_class = num_class
        self.conv_cfg = conv_cfg
        self.dropout_ratio = dropout_ratio
        self.input_transform = input_transform
        self.align_corners = align_corners
        self.scale_heads = nn.LayerList()

        for i in range(len(feature_strides)):
            head_length = max(
                1,
                int(np.log2(feature_strides[i]) - np.log2(feature_strides[0])))
            scale_head = []
            for k in range(head_length):
                scale_head.append(
                    ConvModule(
                        self.in_channels[i] if k == 0 else self.channels,
                        self.channels,
                        3,
                        padding=1,
                        conv_cfg=self.conv_cfg))
                if feature_strides[i] != feature_strides[0]:
                    scale_head.append(
                        Upsample(scale_factor=2,
                                 mode='bilinear',
                                 align_corners=self.align_corners))
            self.scale_heads.append(nn.Sequential(*scale_head))

        self.conv_seg = nn.Conv2D(self.channels, self.num_class, kernel_size=1)

        if self.dropout_ratio is not None:
            self.dropout = nn.Dropout2D(self.dropout_ratio)
        else:
            self.dropout = None
Пример #15
0
    def __init__(self, num_classes, in_channels):
        super().__init__()
        in_channels = in_channels[-1]
        inter_channels = in_channels // 4

        self.channel_conv = layers.ConvBNReLU(in_channels, inter_channels, 3)
        self.position_conv = layers.ConvBNReLU(in_channels, inter_channels, 3)
        self.pam = PAM(inter_channels)
        self.cam = CAM()
        self.conv1 = layers.ConvBNReLU(inter_channels, inter_channels, 3)
        self.conv2 = layers.ConvBNReLU(inter_channels, inter_channels, 3)

        self.aux_head = nn.Sequential(nn.Dropout2D(0.1),
                                      nn.Conv2D(in_channels, num_classes, 1))

        self.aux_head_pam = nn.Sequential(
            nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))

        self.aux_head_cam = nn.Sequential(
            nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))

        self.cls_head = nn.Sequential(
            nn.Dropout2D(0.1), nn.Conv2D(inter_channels, num_classes, 1))
Пример #16
0
 def __init__(self,
              in_channels,
              key_channels,
              out_channels,
              scale=1,
              dropout=0.1,
              norm_layer=nn.BatchNorm2D,
              align_corners=True,
              ):
     super(SpatialOCR_Module, self).__init__()
     self.object_context_block = ObjectAttentionBlock2D(in_channels, key_channels, scale,
                                                        norm_layer, align_corners)
     _in_channels = 2 * in_channels
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2D(_in_channels, out_channels, kernel_size=1, padding=0, bias_attr=False),
         nn.Sequential(norm_layer(out_channels), nn.ReLU()),
         nn.Dropout2D(dropout)
     )
Пример #17
0
    def __init__(self,
                 channels,
                 internal_ratio=4,
                 kernel_size=3,
                 padding=0,
                 dilation=1,
                 asymmetric=False,
                 dropout_prob=0,
                 bias=False,
                 relu=True):
        super(RegularBottleneck, self).__init__()

        if internal_ratio <= 1 or internal_ratio > channels:
            raise RuntimeError(
                "Value out of range. Expected value in the "
                "interval [1, {0}], got internal_scale={1}.".format(
                    channels, internal_ratio))

        internal_channels = channels // internal_ratio

        if relu:
            activation = nn.ReLU
        else:
            activation = nn.PReLU

        self.ext_conv1 = nn.Sequential(
            nn.Conv2D(channels,
                      internal_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=bias), layers.SyncBatchNorm(internal_channels),
            activation())

        if asymmetric:
            self.ext_conv2 = nn.Sequential(
                nn.Conv2D(internal_channels,
                          internal_channels,
                          kernel_size=(kernel_size, 1),
                          stride=1,
                          padding=(padding, 0),
                          dilation=dilation,
                          bias_attr=bias),
                layers.SyncBatchNorm(internal_channels), activation(),
                nn.Conv2D(internal_channels,
                          internal_channels,
                          kernel_size=(1, kernel_size),
                          stride=1,
                          padding=(0, padding),
                          dilation=dilation,
                          bias_attr=bias),
                layers.SyncBatchNorm(internal_channels), activation())
        else:
            self.ext_conv2 = nn.Sequential(
                nn.Conv2D(internal_channels,
                          internal_channels,
                          kernel_size=kernel_size,
                          stride=1,
                          padding=padding,
                          dilation=dilation,
                          bias_attr=bias),
                layers.SyncBatchNorm(internal_channels), activation())

        self.ext_conv3 = nn.Sequential(
            nn.Conv2D(internal_channels,
                      channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=bias), layers.SyncBatchNorm(channels),
            activation())

        self.ext_regul = nn.Dropout2D(p=dropout_prob)

        self.out_activation = activation()
Пример #18
0
    def func_test_layer_str(self):
        module = nn.ELU(0.2)
        self.assertEqual(str(module), 'ELU(alpha=0.2)')

        module = nn.CELU(0.2)
        self.assertEqual(str(module), 'CELU(alpha=0.2)')

        module = nn.GELU(True)
        self.assertEqual(str(module), 'GELU(approximate=True)')

        module = nn.Hardshrink()
        self.assertEqual(str(module), 'Hardshrink(threshold=0.5)')

        module = nn.Hardswish(name="Hardswish")
        self.assertEqual(str(module), 'Hardswish(name=Hardswish)')

        module = nn.Tanh(name="Tanh")
        self.assertEqual(str(module), 'Tanh(name=Tanh)')

        module = nn.Hardtanh(name="Hardtanh")
        self.assertEqual(str(module),
                         'Hardtanh(min=-1.0, max=1.0, name=Hardtanh)')

        module = nn.PReLU(1, 0.25, name="PReLU", data_format="NCHW")
        self.assertEqual(
            str(module),
            'PReLU(num_parameters=1, data_format=NCHW, init=0.25, dtype=float32, name=PReLU)'
        )

        module = nn.ReLU()
        self.assertEqual(str(module), 'ReLU()')

        module = nn.ReLU6()
        self.assertEqual(str(module), 'ReLU6()')

        module = nn.SELU()
        self.assertEqual(
            str(module),
            'SELU(scale=1.0507009873554805, alpha=1.6732632423543772)')

        module = nn.LeakyReLU()
        self.assertEqual(str(module), 'LeakyReLU(negative_slope=0.01)')

        module = nn.Sigmoid()
        self.assertEqual(str(module), 'Sigmoid()')

        module = nn.Hardsigmoid()
        self.assertEqual(str(module), 'Hardsigmoid()')

        module = nn.Softplus()
        self.assertEqual(str(module), 'Softplus(beta=1, threshold=20)')

        module = nn.Softshrink()
        self.assertEqual(str(module), 'Softshrink(threshold=0.5)')

        module = nn.Softsign()
        self.assertEqual(str(module), 'Softsign()')

        module = nn.Swish()
        self.assertEqual(str(module), 'Swish()')

        module = nn.Tanhshrink()
        self.assertEqual(str(module), 'Tanhshrink()')

        module = nn.ThresholdedReLU()
        self.assertEqual(str(module), 'ThresholdedReLU(threshold=1.0)')

        module = nn.LogSigmoid()
        self.assertEqual(str(module), 'LogSigmoid()')

        module = nn.Softmax()
        self.assertEqual(str(module), 'Softmax(axis=-1)')

        module = nn.LogSoftmax()
        self.assertEqual(str(module), 'LogSoftmax(axis=-1)')

        module = nn.Maxout(groups=2)
        self.assertEqual(str(module), 'Maxout(groups=2, axis=1)')

        module = nn.Linear(2, 4, name='linear')
        self.assertEqual(
            str(module),
            'Linear(in_features=2, out_features=4, dtype=float32, name=linear)'
        )

        module = nn.Upsample(size=[12, 12])
        self.assertEqual(
            str(module),
            'Upsample(size=[12, 12], mode=nearest, align_corners=False, align_mode=0, data_format=NCHW)'
        )

        module = nn.UpsamplingNearest2D(size=[12, 12])
        self.assertEqual(
            str(module),
            'UpsamplingNearest2D(size=[12, 12], data_format=NCHW)')

        module = nn.UpsamplingBilinear2D(size=[12, 12])
        self.assertEqual(
            str(module),
            'UpsamplingBilinear2D(size=[12, 12], data_format=NCHW)')

        module = nn.Bilinear(in1_features=5, in2_features=4, out_features=1000)
        self.assertEqual(
            str(module),
            'Bilinear(in1_features=5, in2_features=4, out_features=1000, dtype=float32)'
        )

        module = nn.Dropout(p=0.5)
        self.assertEqual(str(module),
                         'Dropout(p=0.5, axis=None, mode=upscale_in_train)')

        module = nn.Dropout2D(p=0.5)
        self.assertEqual(str(module), 'Dropout2D(p=0.5, data_format=NCHW)')

        module = nn.Dropout3D(p=0.5)
        self.assertEqual(str(module), 'Dropout3D(p=0.5, data_format=NCDHW)')

        module = nn.AlphaDropout(p=0.5)
        self.assertEqual(str(module), 'AlphaDropout(p=0.5)')

        module = nn.Pad1D(padding=[1, 2], mode='constant')
        self.assertEqual(
            str(module),
            'Pad1D(padding=[1, 2], mode=constant, value=0.0, data_format=NCL)')

        module = nn.Pad2D(padding=[1, 0, 1, 2], mode='constant')
        self.assertEqual(
            str(module),
            'Pad2D(padding=[1, 0, 1, 2], mode=constant, value=0.0, data_format=NCHW)'
        )

        module = nn.ZeroPad2D(padding=[1, 0, 1, 2])
        self.assertEqual(str(module),
                         'ZeroPad2D(padding=[1, 0, 1, 2], data_format=NCHW)')

        module = nn.Pad3D(padding=[1, 0, 1, 2, 0, 0], mode='constant')
        self.assertEqual(
            str(module),
            'Pad3D(padding=[1, 0, 1, 2, 0, 0], mode=constant, value=0.0, data_format=NCDHW)'
        )

        module = nn.CosineSimilarity(axis=0)
        self.assertEqual(str(module), 'CosineSimilarity(axis=0, eps=1e-08)')

        module = nn.Embedding(10, 3, sparse=True)
        self.assertEqual(str(module), 'Embedding(10, 3, sparse=True)')

        module = nn.Conv1D(3, 2, 3)
        self.assertEqual(str(module),
                         'Conv1D(3, 2, kernel_size=[3], data_format=NCL)')

        module = nn.Conv1DTranspose(2, 1, 2)
        self.assertEqual(
            str(module),
            'Conv1DTranspose(2, 1, kernel_size=[2], data_format=NCL)')

        module = nn.Conv2D(4, 6, (3, 3))
        self.assertEqual(str(module),
                         'Conv2D(4, 6, kernel_size=[3, 3], data_format=NCHW)')

        module = nn.Conv2DTranspose(4, 6, (3, 3))
        self.assertEqual(
            str(module),
            'Conv2DTranspose(4, 6, kernel_size=[3, 3], data_format=NCHW)')

        module = nn.Conv3D(4, 6, (3, 3, 3))
        self.assertEqual(
            str(module),
            'Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)')

        module = nn.Conv3DTranspose(4, 6, (3, 3, 3))
        self.assertEqual(
            str(module),
            'Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)')

        module = nn.PairwiseDistance()
        self.assertEqual(str(module), 'PairwiseDistance(p=2.0)')

        module = nn.InstanceNorm1D(2)
        self.assertEqual(str(module),
                         'InstanceNorm1D(num_features=2, epsilon=1e-05)')

        module = nn.InstanceNorm2D(2)
        self.assertEqual(str(module),
                         'InstanceNorm2D(num_features=2, epsilon=1e-05)')

        module = nn.InstanceNorm3D(2)
        self.assertEqual(str(module),
                         'InstanceNorm3D(num_features=2, epsilon=1e-05)')

        module = nn.GroupNorm(num_channels=6, num_groups=6)
        self.assertEqual(
            str(module),
            'GroupNorm(num_groups=6, num_channels=6, epsilon=1e-05)')

        module = nn.LayerNorm([2, 2, 3])
        self.assertEqual(
            str(module),
            'LayerNorm(normalized_shape=[2, 2, 3], epsilon=1e-05)')

        module = nn.BatchNorm1D(1)
        self.assertEqual(
            str(module),
            'BatchNorm1D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCL)'
        )

        module = nn.BatchNorm2D(1)
        self.assertEqual(
            str(module),
            'BatchNorm2D(num_features=1, momentum=0.9, epsilon=1e-05)')

        module = nn.BatchNorm3D(1)
        self.assertEqual(
            str(module),
            'BatchNorm3D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCDHW)'
        )

        module = nn.SyncBatchNorm(2)
        self.assertEqual(
            str(module),
            'SyncBatchNorm(num_features=2, momentum=0.9, epsilon=1e-05)')

        module = nn.LocalResponseNorm(size=5)
        self.assertEqual(
            str(module),
            'LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1.0)')

        module = nn.AvgPool1D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'AvgPool1D(kernel_size=2, stride=2, padding=0)')

        module = nn.AvgPool2D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'AvgPool2D(kernel_size=2, stride=2, padding=0)')

        module = nn.AvgPool3D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'AvgPool3D(kernel_size=2, stride=2, padding=0)')

        module = nn.MaxPool1D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'MaxPool1D(kernel_size=2, stride=2, padding=0)')

        module = nn.MaxPool2D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'MaxPool2D(kernel_size=2, stride=2, padding=0)')

        module = nn.MaxPool3D(kernel_size=2, stride=2, padding=0)
        self.assertEqual(str(module),
                         'MaxPool3D(kernel_size=2, stride=2, padding=0)')

        module = nn.AdaptiveAvgPool1D(output_size=16)
        self.assertEqual(str(module), 'AdaptiveAvgPool1D(output_size=16)')

        module = nn.AdaptiveAvgPool2D(output_size=3)
        self.assertEqual(str(module), 'AdaptiveAvgPool2D(output_size=3)')

        module = nn.AdaptiveAvgPool3D(output_size=3)
        self.assertEqual(str(module), 'AdaptiveAvgPool3D(output_size=3)')

        module = nn.AdaptiveMaxPool1D(output_size=16, return_mask=True)
        self.assertEqual(
            str(module), 'AdaptiveMaxPool1D(output_size=16, return_mask=True)')

        module = nn.AdaptiveMaxPool2D(output_size=3, return_mask=True)
        self.assertEqual(str(module),
                         'AdaptiveMaxPool2D(output_size=3, return_mask=True)')

        module = nn.AdaptiveMaxPool3D(output_size=3, return_mask=True)
        self.assertEqual(str(module),
                         'AdaptiveMaxPool3D(output_size=3, return_mask=True)')

        module = nn.SimpleRNNCell(16, 32)
        self.assertEqual(str(module), 'SimpleRNNCell(16, 32)')

        module = nn.LSTMCell(16, 32)
        self.assertEqual(str(module), 'LSTMCell(16, 32)')

        module = nn.GRUCell(16, 32)
        self.assertEqual(str(module), 'GRUCell(16, 32)')

        module = nn.PixelShuffle(3)
        self.assertEqual(str(module), 'PixelShuffle(upscale_factor=3)')

        module = nn.SimpleRNN(16, 32, 2)
        self.assertEqual(
            str(module),
            'SimpleRNN(16, 32, num_layers=2\n  (0): RNN(\n    (cell): SimpleRNNCell(16, 32)\n  )\n  (1): RNN(\n    (cell): SimpleRNNCell(32, 32)\n  )\n)'
        )

        module = nn.LSTM(16, 32, 2)
        self.assertEqual(
            str(module),
            'LSTM(16, 32, num_layers=2\n  (0): RNN(\n    (cell): LSTMCell(16, 32)\n  )\n  (1): RNN(\n    (cell): LSTMCell(32, 32)\n  )\n)'
        )

        module = nn.GRU(16, 32, 2)
        self.assertEqual(
            str(module),
            'GRU(16, 32, num_layers=2\n  (0): RNN(\n    (cell): GRUCell(16, 32)\n  )\n  (1): RNN(\n    (cell): GRUCell(32, 32)\n  )\n)'
        )

        module1 = nn.Sequential(
            ('conv1', nn.Conv2D(1, 20, 5)), ('relu1', nn.ReLU()),
            ('conv2', nn.Conv2D(20, 64, 5)), ('relu2', nn.ReLU()))
        self.assertEqual(
            str(module1),
            'Sequential(\n  '\
            '(conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n  '\
            '(relu1): ReLU()\n  '\
            '(conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n  '\
            '(relu2): ReLU()\n)'
        )

        module2 = nn.Sequential(
            nn.Conv3DTranspose(4, 6, (3, 3, 3)),
            nn.AvgPool3D(kernel_size=2, stride=2, padding=0),
            nn.Tanh(name="Tanh"), module1, nn.Conv3D(4, 6, (3, 3, 3)),
            nn.MaxPool3D(kernel_size=2, stride=2, padding=0), nn.GELU(True))
        self.assertEqual(
            str(module2),
            'Sequential(\n  '\
            '(0): Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n  '\
            '(1): AvgPool3D(kernel_size=2, stride=2, padding=0)\n  '\
            '(2): Tanh(name=Tanh)\n  '\
            '(3): Sequential(\n    (conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n    (relu1): ReLU()\n'\
            '    (conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n    (relu2): ReLU()\n  )\n  '\
            '(4): Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n  '\
            '(5): MaxPool3D(kernel_size=2, stride=2, padding=0)\n  '\
            '(6): GELU(approximate=True)\n)'
        )
Пример #19
0
    def __init__(self,
                 num_class,
                 fpn_inplanes,
                 channels,
                 dropout_ratio=0.1,
                 fpn_dim=256,
                 enable_auxiliary_loss=False,
                 align_corners=False):
        super(PFPNHead, self).__init__()
        self.enable_auxiliary_loss = enable_auxiliary_loss
        self.align_corners = align_corners
        self.lateral_convs = nn.LayerList()
        self.fpn_out = nn.LayerList()

        for fpn_inplane in fpn_inplanes:
            self.lateral_convs.append(
                nn.Sequential(nn.Conv2D(fpn_inplane, fpn_dim, 1),
                              layers.SyncBatchNorm(fpn_dim), nn.ReLU()))
            self.fpn_out.append(
                nn.Sequential(
                    layers.ConvBNReLU(fpn_dim, fpn_dim, 3, bias_attr=False)))

        self.scale_heads = nn.LayerList()
        for index in range(len(fpn_inplanes)):
            head_length = max(
                1,
                int(np.log2(fpn_inplanes[index]) - np.log2(fpn_inplanes[0])))
            scale_head = nn.LayerList()
            for head_index in range(head_length):
                scale_head.append(
                    layers.ConvBNReLU(
                        fpn_dim,
                        channels,
                        3,
                        padding=1,
                    ))
                if fpn_inplanes[index] != fpn_inplanes[0]:
                    scale_head.append(
                        nn.Upsample(scale_factor=2,
                                    mode='bilinear',
                                    align_corners=align_corners))
            self.scale_heads.append(nn.Sequential(*scale_head))

        if dropout_ratio:
            self.dropout = nn.Dropout2D(dropout_ratio)
            if self.enable_auxiliary_loss:
                self.dsn = nn.Sequential(
                    layers.ConvBNReLU(fpn_inplanes[2],
                                      fpn_inplanes[2],
                                      3,
                                      padding=1), nn.Dropout2D(dropout_ratio),
                    nn.Conv2D(fpn_inplanes[2], num_class, kernel_size=1))
        else:
            self.dropout = None
            if self.enable_auxiliary_loss:
                self.dsn = nn.Sequential(
                    layers.ConvBNReLU(fpn_inplanes[2],
                                      fpn_inplanes[2],
                                      3,
                                      padding=1),
                    nn.Conv2D(fpn_inplanes[2], num_class, kernel_size=1))

        self.conv_last = nn.Sequential(
            layers.ConvBNReLU(len(fpn_inplanes) * fpn_dim,
                              fpn_dim,
                              3,
                              bias_attr=False),
            nn.Conv2D(fpn_dim, num_class, kernel_size=1))
        self.conv_seg = nn.Conv2D(channels, num_class, kernel_size=1)