Ejemplo n.º 1
0
    def __init__(self):
        super(ASPP, self).__init__()

        in_planes = 2048
        dilations = [1, 6, 12, 18]

        # all aspp module output feature maps with channel 256
        self.aspp1 = _ASPPModule(in_planes, planes=256, kernel_size=1, padding=0, dilation=dilations[0])
        self.aspp2 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[1], dilation=dilations[1])
        self.aspp3 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[2], dilation=dilations[2])
        self.aspp4 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[3], dilation=dilations[3])

        # perform global average pooling on the last feature map of the backbone
        # batchsize must be greater than 1, otherwise exception will be thrown in calculating BatchNorm
        self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                             nn.Conv2d(in_planes, 256, 1, stride=1, bias=False),
                                             nn.BatchNorm2d(256),
                                             nn.ReLU())

        self.p1 = nn.AdaptiveAvgPool2d(1)
        self.p2 = nn.Conv2d(in_planes, 256, 1, stride=1, bias=False)
        self.p3 = nn.BatchNorm2d(256)

        self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(256)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.5)
        self._init_weight()
Ejemplo n.º 2
0
 def __init__(self):
     super().__init__()
     self.conv2d_1 = nn.Conv2d(1,6,5,padding=2)
     self.avgpool2d = nn.AvgPool2d(2,stride=2)
     self.conv2d_2 = nn.Conv2d(6,16,5)
     self.flatten = nn.Flatten()
     self.sig = nn.Sigmoid()
     self.linear_1 = nn.Linear(16*5*5, 120)
     self.linear_2 = nn.Linear(120, 84)
     self.linear_3 = nn.Linear(84, 10)
Ejemplo n.º 3
0
    def __init__(self, block, layers, num_classes=10):
        super(ResNet, self).__init__()
        self.inplanes = 64
        self.initdata = nn.Conv2d(1,
                                  64,
                                  kernel_size=7,
                                  stride=1,
                                  padding=3,
                                  bias=False)
        self.bn0 = nn.BatchNorm2d(64)
        self.relu0 = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self.make_layers(block, 64, layers[0])
        self.layer2 = self.make_layers(block, 128, layers[1], stride=1)
        self.layer3 = self.make_layers(block, 256, layers[2], stride=1)
        self.layer4 = self.make_layers(block, 512, layers[3], stride=2)

        self.avg = nn.AvgPool2d(7, stride=1)
        self.full = nn.Linear(512 * block.expansion, num_classes)
        self.sigmoid = nn.Sigmoid()

        for m in self.modules():
            if isinstance(
                    m,
                    nn.Conv2d):  # isinstance() 函数来判断一个对象是否是一个已知的类型,类似 type()。
                init.kaiming_normal_(m.weight,
                                     mode='fan_out',
                                     nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
Ejemplo n.º 4
0
    def __init__(self, num_classes):
        super(Decoder, self).__init__()
        low_level_in_planes = 256

        self.conv1 = nn.Conv2d(low_level_in_planes, 48, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(48)
        self.relu = nn.ReLU()
        self.last_conv = nn.Sequential(
            nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), nn.BatchNorm2d(256), nn.ReLU(),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), nn.BatchNorm2d(256), nn.ReLU(),
            nn.Dropout(0.1),
            nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
        self._init_weight()
Ejemplo n.º 5
0
def conv1x1(in_planes, out_planes, stride=1):
    """1x1 convolution"""
    return nn.Conv2d(in_planes,
                     out_planes,
                     kernel_size=1,
                     stride=stride,
                     bias=False)
Ejemplo n.º 6
0
def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes,
                     out_planes,
                     kernel_size=3,
                     stride=stride,
                     padding=1,
                     bias=False)
Ejemplo n.º 7
0
    def __init__(self):
        super().__init__()

        self.relu = nn.ReLU()
        self.avgpool2d = nn.AvgPool2d(2, stride=2)

        #输入部分
        self.conv2d_1 = nn.Conv2d(1, 6, kernel_size=5, padding=2)
        self.batchnorm2d = nn.BatchNorm2d(6)

        #中间残差块
        self.conv2d_2 = nn.Conv2d(6, 6, kernel_size=3, padding=1)

        #输出部分
        self.conv2d_3 = nn.Conv2d(6, 6, 5)
        self.flatten = nn.Flatten()
        self.sig = nn.Sigmoid()
        self.linear_1 = nn.Linear(6 * 5 * 5, 64)
        self.linear_2 = nn.Linear(64, 10)
Ejemplo n.º 8
0
 def __init__(self, inplane, plane, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplane, plane, kernel_size=1, bias=False)
     self.bn1 = nn.BatchNorm2d(plane)
     self.conv2 = nn.Conv2d(plane,
                            plane,
                            kernel_size=3,
                            padding=1,
                            stride=stride,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(plane)
     self.conv3 = nn.Conv2d(plane,
                            self.expansion * plane,
                            kernel_size=1,
                            bias=False)
     self.bn3 = nn.BatchNorm2d(self.expansion * plane)
     self.downsample = downsample
     self.stride = stride
     self.relu3 = nn.ReLU(inplace=True)
Ejemplo n.º 9
0
    def __init__(self, rnn_size, embedding_size, input_size, output_size,
                 grids_width, grids_height, dropout_par, device):

        super(VPTLSTM, self).__init__()
        ######参数初始化##########
        self.device = device
        self.rnn_size = rnn_size  # hidden size默认128
        self.embedding_size = embedding_size  # 空间坐标嵌入尺寸64,每个状态用64维向量表示
        self.input_size = input_size  # 输入尺寸6,特征向量长度
        self.output_size = output_size  # 输出尺寸5
        self.grids_width = grids_width
        self.grids_height = grids_height
        self.dropout_par = dropout_par

        ############网络层初始化###############
        # 输入embeded_input,hidden_states
        self.cell = nn.LSTMCell(2 * self.embedding_size, self.rnn_size)

        # 输入Embed层,将长度为input_size的vec映射到embedding_size
        self.input_embedding_layer = nn.Linear(self.input_size,
                                               self.embedding_size)

        # 输入[vehicle_num,grids_height,grids_width,rnn_size]  [26,39,5,128]
        # 输出[vehicle_num,grids_height-12,grids_width-4,rnn_size*4]  [26,27,1,32]
        self.social_tensor_conv1 = nn.Conv2d(in_channels=self.rnn_size,
                                             out_channels=self.rnn_size // 2,
                                             kernel_size=(5, 3),
                                             stride=(2, 1))
        self.social_tensor_conv2 = nn.Conv2d(in_channels=self.rnn_size // 2,
                                             out_channels=self.rnn_size // 4,
                                             kernel_size=(5, 3),
                                             stride=1)
        self.social_tensor_embed = nn.Linear(
            (self.grids_height - 15) * (self.grids_width - 4) *
            self.rnn_size // 4, self.embedding_size)

        # 输出Embed层,将长度为64的hidden_state映射到5
        self.output_layer = nn.Linear(self.rnn_size, self.output_size)

        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(self.dropout_par)
Ejemplo n.º 10
0
    def __init__(self, in_channels, key_channels, out_channels, scale=1, dropout=0.1, bn_type=None):
        super(SpatialOCR_Module, self).__init__()

        self.object_context_block = ObjectAttentionBlock(in_channels, key_channels, scale, bn_type)
        _in_channels = 2 * in_channels

        self.conv_bn_dropout = nn.Sequential(
            nn.Conv2d(_in_channels, out_channels, kernel_size=1, padding=0, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout)
        )
Ejemplo n.º 11
0
 def __init__(self,
              in_planes,
              planes,
              stride=1,
              downsample=None,
              dilation=1):
     super(Bottleneck, self).__init__()
     self.conv1 = conv1x1(in_planes, planes)
     self.bn1 = nn.BatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            dilation=dilation,
                            padding=dilation,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = nn.BatchNorm2d(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Ejemplo n.º 12
0
    def __init__(self, inp_dim, out_dim):
        """
        residual block
        :param inp_dim: input dimension
        :param out_dim: output dimension
        """
        super(Residual, self).__init__()
        # the channel must be at least 1
        out_dim_half = max(1, int(out_dim / 2))

        self.conv1 = nn.Conv2d(inp_dim, out_dim_half, 1, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_dim_half)
        self.relu = nn.ReLU()

        self.conv2 = nn.Conv2d(out_dim_half, out_dim_half, 3, 1, 1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_dim_half)

        self.conv3 = nn.Conv2d(out_dim_half, out_dim, 1, 1, bias=False)
        self.bn3 = nn.BatchNorm2d(out_dim)

        self.skip_layer = nn.Conv2d(inp_dim, out_dim, 1, 1, bias=False)
        self.skip_layer_bn = nn.BatchNorm2d(out_dim)
Ejemplo n.º 13
0
    def __init__(self, in_planes, planes, kernel_size, padding, dilation):
        """
        One single ASPP module
        :param in_planes: input channels
        :param planes: output channels
        :param kernel_size: kernel size in conv
        :param padding: padding
        :param dilation: dilation
        """
        super(_ASPPModule, self).__init__()
        self.atrous_conv = nn.Conv2d(in_planes, planes, kernel_size=kernel_size,
                                     stride=1, padding=padding, dilation=dilation, bias=False)
        self.bn = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU()

        self._init_weight()
Ejemplo n.º 14
0
 def make_layers(self, block, planes, num_blocks, stride=1):
     downsample = None
     layers = []
     if stride != 1 or self.inplanes != block.expansion * planes:
         downsample = nn.Sequential(
             nn.Conv2d(self.inplanes,
                       block.expansion * planes,
                       kernel_size=1,
                       stride=stride,
                       bias=False),
             nn.BatchNorm2d(block.expansion * planes))
     layers.append(
         block(self.inplanes, planes, stride=stride, downsample=downsample))
     self.inplanes = planes * block.expansion
     for i in range(1, num_blocks):
         layers.append(block(self.inplanes, planes))
     return nn.Sequential(*layers)
Ejemplo n.º 15
0
    def __init__(self) -> None:
        super(MaskBranch, self).__init__()
        self.conv1 = Residual(256, 256)
        self.conv2 = Residual(256, 256)
        self.up16to8 = nn.UpsamplingBilinear2d(scale_factor=2)

        self.conv3 = Residual(256, 128)
        self.up8to4 = nn.UpsamplingBilinear2d(scale_factor=2)

        self.conv4 = Residual(128, 64)
        self.up4to2 = nn.UpsamplingBilinear2d(scale_factor=2)

        self.conv5 = Residual(64, 64)
        self.up2to1 = nn.UpsamplingBilinear2d(scale_factor=2)

        self.conv6 = Residual(64, 1)
        self.conv11 = nn.Conv2d(1, 1, kernel_size=1, stride=1)
Ejemplo n.º 16
0
    def _make_multi_grid_layer(self,
                               block,
                               planes,
                               blocks,
                               stride=1,
                               dilation=1):
        """
        Multi-grid unit
        :param block: Bottleneck
        :param planes:
        :param blocks:
        :param stride:
        :param dilation:
        :return:
        """
        downsample = None
        if stride != 1 or self.in_planes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.in_planes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = list()
        layers.append(
            block(self.in_planes,
                  planes,
                  stride,
                  dilation=blocks[0] * dilation,
                  downsample=downsample))
        self.in_planes = planes * block.expansion
        for i in range(1, len(blocks)):
            layers.append(
                block(self.in_planes,
                      planes,
                      stride=1,
                      dilation=blocks[i] * dilation))

        return nn.Sequential(*layers)
Ejemplo n.º 17
0
    def __init__(self):
        super(VectorBranch, self).__init__()
        output_channel = regular_config.num_keypoint * 2

        self.conv1 = Residual(256, 256)
        self.conv2 = Residual(256, 256)
        self.up16to8 = nn.UpsamplingBilinear2d(scale_factor=2)

        self.up8to4 = nn.UpsamplingBilinear2d(scale_factor=2)
        self.conv3 = Residual(256, 128)
        self.conv4 = Residual(128, 64)

        self.up4to2 = nn.UpsamplingBilinear2d(scale_factor=2)
        self.conv5 = Residual(64, 64)

        self.up2to1 = nn.UpsamplingBilinear2d(scale_factor=2)
        self.conv6 = Residual(64, output_channel)

        self.conv11 = nn.Conv2d(output_channel,
                                output_channel,
                                kernel_size=1,
                                stride=1)
Ejemplo n.º 18
0
    def __init__(self, block, layers, BatchNorm=None):
        super(ResNet, self).__init__()
        if BatchNorm is None:
            BatchNorm = nn.BatchNorm2d
        self._BatchNorm = BatchNorm

        # resnet head
        self.in_planes = 64
        self.conv1 = nn.Conv2d(3,
                               self.in_planes,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = BatchNorm(self.in_planes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # middle
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       dilation=1)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       dilation=1)
        blocks = [1, 2, 4]
        self.layer4 = self._make_multi_grid_layer(block,
                                                  512,
                                                  blocks=blocks,
                                                  stride=1,
                                                  dilation=2)
Ejemplo n.º 19
0
 def __init__(self, in_channels, key_channels, scale=1, bn_type=None):
     super(ObjectAttentionBlock, self).__init__()
     self.scale = scale
     self.in_channels = in_channels
     self.key_channels = key_channels
     self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
     self.f_pixel = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, kernel_size=1, stride=1,
                   padding=0, bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True),
         nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels, kernel_size=1, stride=1,
                   padding=0, bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
     self.f_object = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True),
         nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
     self.f_down = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
     self.f_up = nn.Sequential(
         nn.Conv2d(in_channels=self.key_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
Ejemplo n.º 20
0
 def __init__(self):
     super(CNNNet, self).__init__()
     self.cnn_layer = nn.Sequential(nn.Conv2d(in_channels=1))
     self.fc_layer = nn.Sequential()
Ejemplo n.º 21
0
 def __init__(self):
     nn.Module.__init__(self)
     self.cn1 = nn.Conv2d(3, 3, kernel_size=3, stride=1)
     self.mp1 = nn.MaxPool2d(kernel_size=2,
                             stride=2,
                             return_indices=True)
Ejemplo n.º 22
0
 def __init__(self):
     nn.Module.__init__(self)
     self.cn1 = nn.Conv2d(3, 3, kernel_size=3, stride=1)
     self.mp1 = nn.Conv2d(3, 3, kernel_size=2, stride=2)
Ejemplo n.º 23
0
 def __init__(self, num_class=10):
     super(VGG16, self).__init__()
     self.feature = modules.Sequential(
         # #1,
         modules.Conv2d(3, 64, kernel_size=3, padding=1),
         modules.BatchNorm2d(64),
         modules.ReLU(True),
         #2
         modules.Conv2d(64, 64, kernel_size=3, padding=1),
         modules.BatchNorm2d(64),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #3
         modules.Conv2d(64, 128, kernel_size=3, padding=1),
         modules.BatchNorm2d(128),
         modules.ReLU(True),
         # modules.MaxPool2d(kernel_size=2,stride=2),
         #4
         modules.Conv2d(128, 128, kernel_size=3, padding=1),
         modules.BatchNorm2d(128),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #5
         modules.Conv2d(128, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         #6
         modules.Conv2d(256, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         #7
         modules.Conv2d(256, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #8
         modules.Conv2d(256, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #9
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #10
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #11
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #12
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #13
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         modules.AvgPool2d(kernel_size=1, stride=1),
     )
     # 全连接层
     self.classifier = modules.Sequential(
         # #14
         modules.Linear(512, 4096),
         modules.ReLU(True),
         modules.Dropout(),
         #15
         modules.Linear(4096, 4096),
         modules.ReLU(True),
         modules.Dropout(),
         #16
         modules.Linear(4096, num_class),
     )
Ejemplo n.º 24
0
        x_out = self.spatial(x_compress)
        scale = torch.sigmoid(x_out)
        # broadcasting [None, chn, 2048] * [None, 1, 2048]=>[None, chn, 2048]
        return x * scale


class CBAM(nn.Module):
    def __init__(self, gate_channels, reduction_ratio=16):
        super(CBAM, self).__init__()
        self.ChannelGate = ChannelGate(gate_channels, reduction_ratio)
        self.SpatialGate = SpatialGate()

    def forward(self, x):
        """
        :param x: [None, chn, 2048]
        :return:  [None, chn, 2048]
        """
        x_out = self.ChannelGate(x)  # [None, chn, 2048]
        x_out = self.SpatialGate(x_out)
        return x_out


if __name__ == '__main__':
    pass
    l1 = nn.Conv2d(10, 12, kernel_size=2)
    l2 = nn.AdaptiveAvgPool1d(1)
    for name, parameter in l2.named_parameters():
        print(name, ':', parameter.size())
    # print(l1.parameters().shape)
    # print(l1.named_parameters())