Example #1
0
 def __init__(self, ch_in, ch_out):
     super(ConvBlock, self).__init__()
     self.conv = nn.Sequential(
         nn.Conv2D(ch_in, ch_out, kernel_size=3, stride=1, padding=1),
         nn.BatchNorm2D(ch_out), nn.ReLU(),
         nn.Conv2D(ch_out, ch_out, kernel_size=3, stride=1, padding=1),
         nn.BatchNorm2D(ch_out), nn.ReLU())
Example #2
0
 def __init__(self, num_classes=16, max_points=1024):
     super(VFE_Clas, self).__init__()
     self.vfe = VFE(max_points=max_points)
     self.fc = self.fc = nn.Sequential(nn.Linear(max_points, 512),
                                       nn.ReLU(), nn.Linear(512, 256),
                                       nn.ReLU(), nn.Dropout(p=0.7),
                                       nn.Linear(256, num_classes))
Example #3
0
    def __init__(self, block, depth, class_dim=1000, with_pool=True):
        super(RedNet, self).__init__(block=block,
                                     depth=50,
                                     num_classes=class_dim,
                                     with_pool=with_pool)
        layer_cfg = {
            26: [1, 2, 4, 1],
            38: [2, 3, 5, 2],
            50: [3, 4, 6, 3],
            101: [3, 4, 23, 3],
            152: [3, 8, 36, 3],
        }
        layers = layer_cfg[depth]

        self.conv1 = None
        self.bn1 = None
        self.relu = None
        self.inplanes = 64
        self.class_dim = class_dim

        self.stem = nn.Sequential(
            nn.Sequential(
                (
                    "conv",
                    nn.Conv2D(
                        in_channels=3,
                        out_channels=self.inplanes // 2,
                        kernel_size=3,
                        stride=2,
                        padding=1,
                        bias_attr=False,
                    ),
                ),
                ("bn", nn.BatchNorm2D(self.inplanes // 2)),
                ("activate", nn.ReLU()),
            ),
            Involution(self.inplanes // 2, 3, 1),
            nn.BatchNorm2D(self.inplanes // 2),
            nn.ReLU(),
            nn.Sequential(
                (
                    "conv",
                    nn.Conv2D(
                        in_channels=self.inplanes // 2,
                        out_channels=self.inplanes,
                        kernel_size=3,
                        stride=1,
                        padding=1,
                        bias_attr=False,
                    ),
                ),
                ("bn", nn.BatchNorm2D(self.inplanes)),
                ("activate", nn.ReLU()),
            ),
        )

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
Example #4
0
def batch_relu_conv3d(in_channels,
                      out_channels,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bn3d=True,
                      conv_param_attr=nn.initializer.KaimingNormal(),
                      conv_bias_attr=False,
                      bn_param_attr=None,
                      bn_bias_attr=None):
    if bn3d:
        # 3D batchnorm + relu + convolutional layer
        return nn.Sequential(
            nn.BatchNorm3D(num_features=in_channels), nn.ReLU(),
            nn.Conv3D(in_channels=in_channels,
                      out_channels=out_channels,
                      kernel_size=kernel_size,
                      padding=padding,
                      stride=stride,
                      weight_attr=conv_param_attr,
                      bias_attr=conv_bias_attr))
    else:
        # 3D relu + convolutional layer
        return nn.Sequential(
            nn.ReLU(),
            nn.Conv3D(in_channels=in_channels,
                      out_channels=out_channels,
                      kernel_size=kernel_size,
                      padding=padding,
                      stride=stride,
                      weight_attr=conv_param_attr,
                      bias_attr=conv_bias_attr))
Example #5
0
    def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
        m = []
        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?
            for _ in range(int(math.log(scale, 2))):
                m.append(conv(n_feats, 4 * n_feats, 3, bias))
                m.append(nn.PixelShuffle(2))
                if bn: m.append(nn.BatchNorm2D(n_feats))

                if act == 'relu':
                    m.append(nn.ReLU())
                elif act == 'prelu':
                    m.append(nn.PReLU(n_feats))

        elif scale == 3:
            m.append(conv(n_feats, 9 * n_feats, 3, bias))
            m.append(nn.PixelShuffle(3))
            if bn: m.append(nn.BatchNorm2D(n_feats))

            if act == 'relu':
                m.append(nn.ReLU())
            elif act == 'prelu':
                m.append(nn.PReLU(n_feats))
        else:
            raise NotImplementedError

        super(Upsampler, self).__init__(*m)
Example #6
0
 def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
     super(BasicBlock, self).__init__()
     self.bn1 = nn.BatchNorm2D(in_planes)
     self.relu1 = nn.ReLU()
     self.conv1 = nn.Conv2D(in_planes,
                            out_planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            weight_attr=nn.initializer.KaimingNormal())
     self.bn2 = nn.BatchNorm2D(out_planes)
     self.relu2 = nn.ReLU()
     self.conv2 = nn.Conv2D(out_planes,
                            out_planes,
                            kernel_size=3,
                            stride=1,
                            padding=1,
                            weight_attr=nn.initializer.KaimingNormal())
     self.droprate = dropRate
     self.equalInOut = (in_planes == out_planes)
     self.convShortcut = (not self.equalInOut) and nn.Conv2D(
         in_planes,
         out_planes,
         kernel_size=1,
         stride=stride,
         padding=0,
         weight_attr=nn.initializer.KaimingNormal()) or None
Example #7
0
    def __init__(self,
                 num_layers,
                 emb_dim,
                 drop_ratio=0.5,
                 JK="last",
                 residual=False,
                 gnn_type='gin'):
        '''
            emb_dim (int): node embedding dimensionality
        '''

        super(GNN_node_Virtualnode, self).__init__()
        self.num_layers = num_layers
        self.drop_ratio = drop_ratio
        self.JK = JK
        ### add residual connection or not
        self.residual = residual

        if self.num_layers < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.atom_encoder = AtomEncoder(emb_dim)

        ### set the initial virtual node embedding to 0.
        #  self.virtualnode_embedding = paddle.nn.Embedding(1, emb_dim)
        self.virtualnode_embedding = self.create_parameter(
            shape=[1, emb_dim],
            dtype='float32',
            default_initializer=nn.initializer.Constant(value=0.0))

        ### List of GNNs
        self.convs = []
        ### batch norms applied to node embeddings
        self.batch_norms = []

        ### List of MLPs to transform virtual node at every layer
        self.mlp_virtualnode_list = []

        for layer in range(num_layers):
            if gnn_type == 'gin':
                self.convs.append(GINConv(emb_dim))
            elif gnn_type == 'gcn':
                self.convs.append(GCNConv(emb_dim))
            else:
                ValueError('Undefined GNN type called {}'.format(gnn_type))

            self.batch_norms.append(paddle.nn.BatchNorm1D(emb_dim))

        for layer in range(num_layers - 1):
            self.mlp_virtualnode_list.append(
                nn.Sequential(nn.Linear(emb_dim, emb_dim),
                              nn.BatchNorm1D(emb_dim), nn.ReLU(),
                              nn.Linear(emb_dim, emb_dim),
                              nn.BatchNorm1D(emb_dim), nn.ReLU()))

        self.pool = gnn.GraphPool(pool_type="sum")

        self.convs = nn.LayerList(self.convs)
        self.batch_norms = nn.LayerList(self.batch_norms)
        self.mlp_virtualnode_list = nn.LayerList(self.mlp_virtualnode_list)
Example #8
0
 def __init__(self, input_channels, output_channels, stride=1):
     super(ResidualBlock, self).__init__()
     self.input_channels = input_channels
     self.output_channels = output_channels
     self.stride = stride
     self.bn1 = nn.BatchNorm2D(input_channels)
     self.relu = nn.ReLU()
     self.conv1 = nn.Conv2D(input_channels,
                            output_channels // 4,
                            1,
                            1,
                            bias_attr=False)
     self.bn2 = nn.BatchNorm2D(output_channels // 4)
     self.relu = nn.ReLU()
     self.conv2 = nn.Conv2D(output_channels // 4,
                            output_channels // 4,
                            3,
                            stride,
                            padding=1,
                            bias_attr=False)
     self.bn3 = nn.BatchNorm2D(output_channels // 4)
     self.relu = nn.ReLU()
     self.conv3 = nn.Conv2D(output_channels // 4,
                            output_channels,
                            1,
                            1,
                            bias_attr=False)
     self.conv4 = nn.Conv2D(input_channels,
                            output_channels,
                            1,
                            stride,
                            bias_attr=False)
Example #9
0
    def __init__(self, in_channels, nclass):
        super().__init__()
        self.nclass = nclass
        inter_channels = in_channels // 4
        self.inp = paddle.zeros(shape=(nclass, 300), dtype='float32')
        self.inp = paddle.create_parameter(
            shape=self.inp.shape,
            dtype=str(self.inp.numpy().dtype),
            default_initializer=paddle.nn.initializer.Assign(self.inp))
        self.inp.stop_gradient = True

        self.fc1 = nn.Sequential(nn.Linear(300, 128), nn.BatchNorm1D(128),
                                 nn.ReLU())
        self.fc2 = nn.Sequential(nn.Linear(128, 256), nn.BatchNorm1D(256),
                                 nn.ReLU())
        self.conv5 = layers.ConvBNReLU(in_channels,
                                       inter_channels,
                                       3,
                                       padding=1,
                                       bias_attr=False,
                                       stride=1)

        self.gloru = GlobalReasonUnit(in_channels=inter_channels,
                                      num_state=256,
                                      num_node=84,
                                      nclass=nclass)
        self.conv6 = nn.Sequential(nn.Dropout(0.1),
                                   nn.Conv2D(inter_channels, nclass, 1))
Example #10
0
    def make_layers(self, cfg, data_dict, batch_norm=False) -> nn.Sequential:
        layers = []
        in_channels = 3
        block = 1
        number = 1
        for v in cfg:
            if v == 'M':
                layers += [nn.MaxPool2D(kernel_size=2, stride=2)]
                block += 1
                number = 1
            else:
                conv2d = nn.Conv2D(in_channels, v, kernel_size=3, padding=1)
                """ set value """
                weight = paddle.to_tensor(
                    self.get_conv_filter(data_dict, f'conv{block}_{number}'))
                weight = weight.transpose((3, 2, 0, 1))
                bias = paddle.to_tensor(
                    self.get_bias(data_dict, f'conv{block}_{number}'))
                conv2d.weight.set_value(weight)
                conv2d.bias.set_value(bias)
                number += 1
                if batch_norm:
                    layers += [conv2d, nn.BatchNorm2D(v), nn.ReLU()]
                else:
                    layers += [conv2d, nn.ReLU()]
                in_channels = v

        return nn.Sequential(*layers)
 def __init__(self, input_size, output_size):
     super(SimpleNet, self).__init__()
     self.linear1 = nn.Linear(input_size, output_size)
     self.relu1 = nn.ReLU()
     self.linear2 = nn.Linear(input_size, output_size)
     self.relu2 = nn.ReLU()
     self.linear3 = nn.Linear(input_size, output_size)
Example #12
0
    def __init__(self, model_config):
        """
        Initialization
        """
        super(MolTransModel, self).__init__()
        # Basic config
        self.model_config = model_config
        self.drug_max_seq = model_config['drug_max_seq']
        self.target_max_seq = model_config['target_max_seq']
        self.emb_size = model_config['emb_size']
        self.dropout_ratio = model_config['dropout_ratio']
        self.input_drug_dim = model_config['input_drug_dim']
        self.input_target_dim = model_config['input_target_dim']
        self.layer_size = model_config['layer_size']
        self.gpus = 1

        # Model config
        self.interm_size = model_config['interm_size']
        self.num_attention_heads = model_config['num_attention_heads']
        self.attention_dropout_ratio = model_config['attention_dropout_ratio']
        self.hidden_dropout_ratio = model_config['hidden_dropout_ratio']
        self.flatten_dim = model_config['flatten_dim']
        self.hidden_size = model_config['emb_size']

        # Enhanced embeddings
        self.drug_emb = EnhancedEmbedding(self.input_drug_dim, self.emb_size,
                                          self.drug_max_seq,
                                          self.dropout_ratio)
        self.target_emb = EnhancedEmbedding(self.input_target_dim,
                                            self.emb_size, self.target_max_seq,
                                            self.dropout_ratio)
        # Encoder module
        self.encoder = EncoderModule(self.layer_size, self.hidden_size,
                                     self.interm_size,
                                     self.num_attention_heads,
                                     self.attention_dropout_ratio,
                                     self.hidden_dropout_ratio)
        # Cross information
        self.interaction_cnn = nn.Conv2D(1, 3, 3, padding=1)  # Conv2D
        #self.involution = Involution2D(in_channel=1, out_channel=3) # Involution2D

        # Decoder module
        self.decoder = nn.Sequential(
            nn.Linear(self.flatten_dim, 512),
            nn.ReLU(),

            # nn.LayerNorm(512),
            LayerNorm(512),
            # nn.BatchNorm(512),
            nn.Linear(512, 64),
            nn.ReLU(),
            # nn.LeakyReLU(),

            # nn.LayerNorm(64),
            LayerNorm(64),
            # nn.BatchNorm(64),
            nn.Linear(64, 32),
            nn.ReLU(),
            # nn.LeakyReLU(),
            nn.Linear(32, 1))
Example #13
0
 def __init__(self,
              in_ch,
              out_ch,
              kernel_size,
              stride,
              expansion_factor,
              bn_momentum=0.1):
     super(_InvertedResidual, self).__init__()
     assert stride in [1, 2]
     assert kernel_size in [3, 5]
     mid_ch = in_ch * expansion_factor
     self.apply_residual = (in_ch == out_ch and stride == 1)
     self.layers = nn.Sequential(
         # Pointwise
         nn.Conv2D(in_ch, mid_ch, 1, bias_attr=False),
         nn.BatchNorm2D(mid_ch, momentum=bn_momentum),
         nn.ReLU(),
         # Depthwise
         nn.Conv2D(mid_ch,
                   mid_ch,
                   kernel_size,
                   padding=kernel_size // 2,
                   stride=stride,
                   groups=mid_ch,
                   bias_attr=False),
         nn.BatchNorm2D(mid_ch, momentum=bn_momentum),
         nn.ReLU(),
         # Linear pointwise. Note that there's no activation.
         nn.Conv2D(mid_ch, out_ch, 1, bias_attr=False),
         nn.BatchNorm2D(out_ch, momentum=bn_momentum))
Example #14
0
    def _make_transition_layer(
            self, num_channels_pre_layer, num_channels_cur_layer):
        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(nn.Sequential(
                        nn.Conv2D(num_channels_pre_layer[i],
                                  num_channels_cur_layer[i],
                                  kernel_size=3,
                                  stride=1,
                                  padding=1,
                                  bias_attr=False),
                        self.norm_layer(num_channels_cur_layer[i]),
                        nn.ReLU()))
                else:
                    transition_layers.append(None)
            else:
                conv3x3s = []
                for j in range(i + 1 - num_branches_pre):
                    inchannels = num_channels_pre_layer[-1]
                    outchannels = num_channels_cur_layer[i] \
                        if j == i - num_branches_pre else inchannels
                    conv3x3s.append(nn.Sequential(
                        nn.Conv2D(inchannels, outchannels,
                                  kernel_size=3, stride=2, padding=1, bias_attr=False),
                        self.norm_layer(outchannels),
                        nn.ReLU()))
                transition_layers.append(nn.Sequential(*conv3x3s))

        return nn.LayerList(transition_layers)
Example #15
0
    def __init__(self, in_channels, out_channels, size=(8, 8)):
        super(AttentionModule_stage3_cifar, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.middle_2r_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.conv1_1_blocks = nn.Sequential(
            nn.BatchNorm2D(out_channels), nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.BatchNorm2D(out_channels),
            nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              relu=True,
              bn=True):
     super(BasicConv, self).__init__()
     self.out_channels = out_planes
     if bn:
         self.conv = nn.Conv2D(in_planes,
                               out_planes,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding,
                               dilation=dilation,
                               groups=groups,
                               bias_attr=None)
         self.bn = nn.BatchNorm2D(out_planes, epsilon=1e-5, momentum=0.01)
         self.relu = nn.ReLU() if relu else None
     else:
         self.conv = nn.Conv2D(in_planes,
                               out_planes,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding,
                               dilation=dilation,
                               groups=groups,
                               bias_attr=None)
         self.bn = None
         self.relu = nn.ReLU() if relu else None
Example #17
0
 def __init__(self, num_classes):
     super(ResidualAttentionModel_92_32input_update, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2D(3,
                   32,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias_attr=False), nn.BatchNorm2D(32), nn.ReLU())  # 32*32
     self.residual_block1 = ResidualBlock(32, 128)  # 32*32
     self.attention_module1 = AttentionModule_stage1_cifar(
         128, 128, size1=(32, 32), size2=(16, 16))  # 32*32
     self.residual_block2 = ResidualBlock(128, 256, 2)  # 16*16
     self.attention_module2 = AttentionModule_stage2_cifar(
         256, 256, size=(16, 16))  # 16*16
     self.attention_module2_2 = AttentionModule_stage2_cifar(
         256, 256, size=(16, 16))  # 16*16 # tbq add
     self.residual_block3 = ResidualBlock(256, 512, 2)  # 4*4
     self.attention_module3 = AttentionModule_stage3_cifar(512, 512)  # 8*8
     self.attention_module3_2 = AttentionModule_stage3_cifar(
         512, 512)  # 8*8 # tbq add
     self.attention_module3_3 = AttentionModule_stage3_cifar(
         512, 512)  # 8*8 # tbq add
     self.residual_block4 = ResidualBlock(512, 1024)  # 8*8
     self.residual_block5 = ResidualBlock(1024, 1024)  # 8*8
     self.residual_block6 = ResidualBlock(1024, 1024)  # 8*8
     self.mpool2 = nn.Sequential(nn.BatchNorm2D(1024), nn.ReLU(),
                                 nn.AvgPool2D(kernel_size=8))
     self.fc = nn.Linear(1024, num_classes)
Example #18
0
 def __init__(self, num_classes):
     super(ResidualAttentionModel_448input, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2D(3,
                   64,
                   kernel_size=7,
                   stride=2,
                   padding=3,
                   bias_attr=False), nn.BatchNorm2D(64), nn.ReLU())
     self.mpool1 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
     # tbq add
     # 112*112
     self.residual_block0 = ResidualBlock(64, 128)
     self.attention_module0 = AttentionModule_stage0(128, 128)
     # tbq add end
     self.residual_block1 = ResidualBlock(128, 256, 2)
     # 56*56
     self.attention_module1 = AttentionModule_stage1(256, 256)
     self.residual_block2 = ResidualBlock(256, 512, 2)
     self.attention_module2 = AttentionModule_stage2(512, 512)
     self.attention_module2_2 = AttentionModule_stage2(512, 512)  # tbq add
     self.residual_block3 = ResidualBlock(512, 1024, 2)
     self.attention_module3 = AttentionModule_stage3(1024, 1024)
     self.attention_module3_2 = AttentionModule_stage3(1024,
                                                       1024)  # tbq add
     self.attention_module3_3 = AttentionModule_stage3(1024,
                                                       1024)  # tbq add
     self.residual_block4 = ResidualBlock(1024, 2048, 2)
     self.residual_block5 = ResidualBlock(2048, 2048)
     self.residual_block6 = ResidualBlock(2048, 2048)
     self.mpool2 = nn.Sequential(nn.BatchNorm2D(2048), nn.ReLU(),
                                 nn.AvgPool2D(kernel_size=7, stride=1))
     self.fc = nn.Linear(2048, num_classes)
Example #19
0
 def __init__(self,
              inplanes: int,
              planes: int,
              stride: int = 2,
              norm_layer: nn.Layer = nn.BatchNorm2D):
     super(UpBottleneck, self).__init__()
     self.expansion = 4
     self.residual_layer = UpsampleConvLayer(inplanes,
                                             planes * self.expansion,
                                             kernel_size=1,
                                             stride=1,
                                             upsample=stride)
     conv_block = []
     conv_block += [
         norm_layer(inplanes),
         nn.ReLU(),
         nn.Conv2D(inplanes, planes, kernel_size=1, stride=1)
     ]
     conv_block += [
         norm_layer(planes),
         nn.ReLU(),
         UpsampleConvLayer(planes,
                           planes,
                           kernel_size=3,
                           stride=1,
                           upsample=stride)
     ]
     conv_block += [
         norm_layer(planes),
         nn.ReLU(),
         nn.Conv2D(planes, planes * self.expansion, kernel_size=1, stride=1)
     ]
     self.conv_block = nn.Sequential(*conv_block)
Example #20
0
 def __init__(self, num_classes=751):
     super(Net, self).__init__()
     # 3 128 64
     self.conv = nn.Sequential(
         nn.Conv2D(3, 64, 3, stride=1, padding=1),
         nn.BatchNorm2D(64),
         nn.ReLU(),
         # nn.Conv2d(32,32,3,stride=1,padding=1),
         # nn.BatchNorm2d(32),
         # nn.ReLU(inplace=True),
         nn.MaxPool2D(3, 2, padding=1),
     )
     # 32 64 32
     self.layer1 = make_layers(64, 64, 2, False)
     # 32 64 32
     self.layer2 = make_layers(64, 128, 2, True)
     # 64 32 16
     self.layer3 = make_layers(128, 256, 2, True)
     # 128 16 8
     self.layer4 = make_layers(256, 512, 2, True)
     # 256 8 4
     self.avgpool = nn.AvgPool2D((8, 4), 1)
     # 256 1 1
     self.classifier = nn.Sequential(
         nn.Linear(512, 256),
         nn.BatchNorm1D(256),
         nn.ReLU(),
         nn.Dropout(),
         nn.Linear(256, num_classes),
     )
Example #21
0
    def __init__(self, num_convs=0, in_channels=2048, out_channels=256):
        super(MaskFeat, self).__init__()
        self.num_convs = num_convs
        self.in_channels = in_channels
        self.out_channels = out_channels
        fan_conv = out_channels * 3 * 3
        fan_deconv = out_channels * 2 * 2

        mask_conv = nn.Sequential()
        for i in range(self.num_convs):
            conv_name = 'mask_inter_feat_{}'.format(i + 1)
            mask_conv.add_sublayer(
                conv_name,
                nn.Conv2D(in_channels=in_channels if i == 0 else out_channels,
                          out_channels=out_channels,
                          kernel_size=3,
                          padding=1,
                          weight_attr=paddle.ParamAttr(
                              initializer=KaimingNormal(fan_in=fan_conv))))
            mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
        mask_conv.add_sublayer(
            'conv5_mask',
            nn.Conv2DTranspose(
                in_channels=self.in_channels,
                out_channels=self.out_channels,
                kernel_size=2,
                stride=2,
                weight_attr=paddle.ParamAttr(initializer=KaimingNormal(
                    fan_in=fan_deconv))))
        mask_conv.add_sublayer('conv5_mask' + 'act', nn.ReLU())
        self.upsample = mask_conv
 def __init__(self, backbone):
     super(FMFModel, self).__init__()
     self.backbone = backbone
     self.se1, self.se2, self.se3, self.se4, self.se5 = SEModule(
         64), SEModule(64), SEModule(64), SEModule(64), SEModule(64)
     self.squeeze5 = nn.Sequential(nn.Conv2D(2048, 64, 3, 1, 1),
                                   nn.BatchNorm2D(64), nn.ReLU())
     self.squeeze4 = nn.Sequential(nn.Conv2D(1024, 64, 3, 1, 1),
                                   nn.BatchNorm2D(64), nn.ReLU())
     self.squeeze3 = nn.Sequential(nn.Conv2D(512, 64, 3, 1, 1),
                                   nn.BatchNorm2D(64), nn.ReLU())
     self.squeeze2 = nn.Sequential(nn.Conv2D(256, 64, 3, 1, 1),
                                   nn.BatchNorm2D(64), nn.ReLU())
     self.squeeze1 = nn.Sequential(nn.Conv2D(64, 64, 3, 1, 1),
                                   nn.BatchNorm2D(64), nn.ReLU())
     self.fa1, self.fa2, self.fa3, self.fa4, self.fa5 = SSM(), \
                                                        SSM(), \
                                                        SSM(), \
                                                        SSM(), \
                                                        SSM()
     self.FMF1, self.FMF2 = FMFMs(), FMFMs()
     self.FMF3 = FMFMs()
     self.mso = Progressive_fusion_module()
     self.linear = nn.Conv2D(64, 1, 3, 1, 1)
     for p in self.backbone.parameters():
         p.optimize_attr['learning_rate'] /= 10.0
Example #23
0
    def __init__(self, num_classes, backbone, BatchNorm):
        super(Decoder, self).__init__()
        if backbone == 'resnet' or backbone == 'drn' or backbone == 'resnet_edge':
            low_level_inplanes = 256
        elif backbone == 'xception':
            low_level_inplanes = 128
        elif backbone == 'mobilenet':
            low_level_inplanes = 24
        else:
            raise NotImplementedError

        self.conv1 = nn.Conv2D(low_level_inplanes, 48, 1, bias_attr=False)
        self.bn1 = BatchNorm(48)
        self.relu = nn.ReLU(True)
        self.last_conv = nn.Sequential(
            nn.Conv2D(304,
                      256,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias_attr=False), BatchNorm(256), nn.ReLU(True),
            nn.Sequential(),
            nn.Conv2D(256,
                      256,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias_attr=False), BatchNorm(256), nn.ReLU(True),
            nn.Sequential())
        self._init_weight()
Example #24
0
    def _make_transition_layer(self, num_channels_pre_layer,
                               num_channels_cur_layer):
        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(
                        nn.Sequential(
                            # nn.Conv2d(
                            #     num_channels_pre_layer[i],
                            #     num_channels_cur_layer[i],
                            #     3, 1, 1, bias=False
                            # ),
                            nn.Conv2D(num_channels_pre_layer[i],
                                      num_channels_cur_layer[i],
                                      3,
                                      1,
                                      1,
                                      weight_attr=weight_attr_conv,
                                      bias_attr=False),
                            # nn.BatchNorm2d(num_channels_cur_layer[i]),
                            nn.BatchNorm2D(num_channels_cur_layer[i],
                                           weight_attr=weight_attr_bn,
                                           bias_attr=bias_attr_constant_0),
                            # nn.ReLU(inplace=True)
                            nn.ReLU()))
                else:
                    # transition_layers.append(None)
                    transition_layers.append(equalmap())
            else:
                conv3x3s = []
                for j in range(i + 1 - num_branches_pre):
                    inchannels = num_channels_pre_layer[-1]
                    outchannels = num_channels_cur_layer[i] \
                        if j == i-num_branches_pre else inchannels
                    conv3x3s.append(
                        nn.Sequential(
                            # nn.Conv2d(
                            #     inchannels, outchannels, 3, 2, 1, bias=False
                            # ),
                            nn.Conv2D(inchannels,
                                      outchannels,
                                      3,
                                      2,
                                      1,
                                      weight_attr=weight_attr_conv,
                                      bias_attr=False),
                            # nn.BatchNorm2d(outchannels),
                            nn.BatchNorm2D(outchannels,
                                           weight_attr=weight_attr_bn,
                                           bias_attr=bias_attr_constant_0),
                            # nn.ReLU(inplace=True)
                            nn.ReLU()))
                transition_layers.append(nn.Sequential(*conv3x3s))

        # return nn.ModuleList(transition_layers)
        return nn.LayerList(transition_layers)
Example #25
0
 def __init__(self,
              in_channels,
              out_channels,
              is_batchnorm,
              num_conv=2,
              kernel_size=3,
              stride=1,
              padding=1):
     super(UnetConv2D, self).__init__()
     self.num_conv = num_conv
     for i in range(num_conv):
         conv = (nn.Sequential(nn.Conv2D(in_channels, out_channels, kernel_size, stride, padding),
                               nn.BatchNorm(out_channels),
                               nn.ReLU()) \
                 if is_batchnorm else \
                 nn.Sequential(nn.Conv2D(in_channels, out_channels, kernel_size, stride, padding),
                               nn.ReLU()))
         setattr(self, 'conv%d' % (i + 1), conv)
         in_channels = out_channels
     # initialise the blocks
     for children in self.children():
         children.weight_attr = paddle.framework.ParamAttr(
             initializer=paddle.nn.initializer.KaimingNormal)
         children.bias_attr = paddle.framework.ParamAttr(
             initializer=paddle.nn.initializer.KaimingNormal)
Example #26
0
    def __init__(self, in_channels, out_channels, size=(8, 8)):
        super(AttentionModule_stage2_cifar, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.mpool1 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)  # 4*4

        self.middle_2r_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.interpolation1 = nn.UpsamplingBilinear2D(size=size)  # 8*8

        self.conv1_1_blocks = nn.Sequential(
            nn.BatchNorm2D(out_channels), nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.BatchNorm2D(out_channels),
            nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
Example #27
0
 def __init__(self,
              gate_channel,
              reduction_ratio=16,
              dilation_conv_num=2,
              dilation_val=4):
     super(SpatialGate, self).__init__()
     self.gate_s = nn.Sequential()
     self.gate_s.add_sublayer(
         'gate_s_conv_reduce0',
         nn.Conv2D(gate_channel,
                   gate_channel // reduction_ratio,
                   kernel_size=1))
     self.gate_s.add_sublayer(
         'gate_s_bn_reduce0',
         nn.BatchNorm2D(gate_channel // reduction_ratio))
     self.gate_s.add_sublayer('gate_s_relu_reduce0', nn.ReLU())
     for i in range(dilation_conv_num):
         self.gate_s.add_sublayer( 'gate_s_conv_di_%d'%i, nn.Conv2D(gate_channel//reduction_ratio, gate_channel//reduction_ratio, kernel_size=3, \
   padding=dilation_val, dilation=dilation_val) )
         self.gate_s.add_sublayer(
             'gate_s_bn_di_%d' % i,
             nn.BatchNorm2D(gate_channel // reduction_ratio))
         self.gate_s.add_sublayer('gate_s_relu_di_%d' % i, nn.ReLU())
     self.gate_s.add_sublayer(
         'gate_s_conv_final',
         nn.Conv2D(gate_channel // reduction_ratio, 1, kernel_size=1))
Example #28
0
    def __init__(self, inp, oup, stride=1):
        super(ResidualBlock, self).__init__()

        self.block = nn.Sequential(
            ConvDw(inp, oup, 3, stride=stride),
            nn.Conv2D(in_channels=oup,
                      out_channels=oup,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      groups=oup,
                      bias_attr=False),
            nn.BatchNorm2D(num_features=oup, epsilon=1e-05, momentum=0.1),
            nn.ReLU(),
            nn.Conv2D(in_channels=oup,
                      out_channels=oup,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias_attr=False),
            nn.BatchNorm2D(num_features=oup, epsilon=1e-05, momentum=0.1),
        )
        if inp == oup:
            self.residual = None
        else:
            self.residual = nn.Sequential(
                nn.Conv2D(in_channels=inp,
                          out_channels=oup,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias_attr=False),
                nn.BatchNorm2D(num_features=oup, epsilon=1e-05, momentum=0.1),
            )
        self.relu = nn.ReLU()
Example #29
0
    def __init__(self,
                 block,
                 layers,
                 num_filters,
                 feature_dim,
                 encoder_type='SAP',
                 n_mels=40,
                 log_input=True,
                 **kwargs):
        super(ResNetSE, self).__init__()

        print('Embedding size is %d, encoder %s.' %
              (feature_dim, encoder_type))

        self.inplanes = num_filters[0]
        self.encoder_type = encoder_type
        self.n_mels = n_mels
        self.log_input = log_input

        self.conv1 = nn.Conv2D(1,
                               num_filters[0],
                               kernel_size=3,
                               stride=1,
                               padding=1)
        self.relu = nn.ReLU()
        self.bn1 = nn.BatchNorm2D(num_filters[0])

        self.layer1 = self._make_layer(block, num_filters[0], layers[0])
        self.layer2 = self._make_layer(block,
                                       num_filters[1],
                                       layers[1],
                                       stride=(2, 2))
        self.layer3 = self._make_layer(block,
                                       num_filters[2],
                                       layers[2],
                                       stride=(2, 2))
        self.layer4 = self._make_layer(block,
                                       num_filters[3],
                                       layers[3],
                                       stride=(2, 2))

        outmap_size = int(self.n_mels / 8)

        self.attention = nn.Sequential(
            nn.Conv1D(num_filters[3] * outmap_size, 128, kernel_size=1),
            nn.ReLU(),
            nn.BatchNorm1D(128),
            nn.Conv1D(128, num_filters[3] * outmap_size, kernel_size=1),
            nn.Softmax(axis=2),
        )

        if self.encoder_type == "SAP":
            out_dim = num_filters[3] * outmap_size
        elif self.encoder_type == "ASP":
            out_dim = num_filters[3] * outmap_size * 2
        else:
            raise ValueError('Undefined encoder')

        self.fc = nn.Linear(out_dim, feature_dim)
Example #30
0
 def __init__(self):
     super(LeNetDygraph, self).__init__()
     self.features = nn.Sequential(nn.Conv2D(1, 6, 3, stride=1, padding=1),
                                   nn.ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2),
                                   nn.Conv2D(6, 16, 5, stride=1, padding=0),
                                   nn.ReLU(),
                                   paddle.fluid.dygraph.Pool2D(2, 'max', 2))