Пример #1
0
    def __init__(self, in_channels: int, out_channels: int, index: int):
        super(SSH, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.half_out_channels = int(out_channels / 2)
        self.quater_out_channels = int(self.half_out_channels / 2)
        self.index = index
        
        self.ssh_3x3 = nn.Sequential(
            nn.Conv2d(in_channels=self.in_channels, out_channels=self.half_out_channels, kernel_size=3, stride=1, padding=1)
        )

        self.ssh_dimred = nn.Sequential(
            nn.Conv2d(in_channels=self.in_channels, out_channels=self.quater_out_channels, kernel_size=3, stride=1, padding=1),
            nn.ReLU()
        )

        self.ssh_5x5 = nn.Sequential(
            nn.Conv2d(in_channels=self.quater_out_channels, out_channels=self.quater_out_channels, kernel_size=3, stride=1, padding=1)
        )

        self.ssh_7x7 = nn.Sequential(
            nn.Conv2d(in_channels=self.quater_out_channels, out_channels=self.quater_out_channels, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(in_channels=self.quater_out_channels, out_channels=self.quater_out_channels, kernel_size=3, stride=1, padding=1)
        )

        self.out_relu = nn.ReLU()
Пример #2
0
    def __init__(self, in_planes, out_planes, stride=1, scale=0.1, map_reduce=8, vision=1, groups=1):
        super(BasicRFB, self).__init__()
        self.scale = scale
        self.out_channels = out_planes
        inter_planes = in_planes // map_reduce

        self.branch0 = nn.Sequential(
            BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
            BasicConv(inter_planes, 2 * inter_planes, kernel_size=(3, 3), stride=stride, padding=(1, 1), groups=groups),
            BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1, padding=vision + 1, dilation=vision + 1, relu=False, groups=groups)
        )
        self.branch1 = nn.Sequential(
            BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
            BasicConv(inter_planes, 2 * inter_planes, kernel_size=(3, 3), stride=stride, padding=(1, 1), groups=groups),
            BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1, padding=vision + 2, dilation=vision + 2, relu=False, groups=groups)
        )
        self.branch2 = nn.Sequential(
            BasicConv(in_planes, inter_planes, kernel_size=1, stride=1, groups=groups, relu=False),
            BasicConv(inter_planes, (inter_planes // 2) * 3, kernel_size=3, stride=1, padding=1, groups=groups),
            BasicConv((inter_planes // 2) * 3, 2 * inter_planes, kernel_size=3, stride=stride, padding=1, groups=groups),
            BasicConv(2 * inter_planes, 2 * inter_planes, kernel_size=3, stride=1, padding=vision + 4, dilation=vision + 4, relu=False, groups=groups)
        )

        self.ConvLinear = BasicConv(6 * inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
        self.shortcut = BasicConv(in_planes, out_planes, kernel_size=1, stride=stride, relu=False)
        self.relu = nn.ReLU(inplace=False)
        self.eltadd = nn.EltAdd()
Пример #3
0
 def multibox(self, num_classes):
     loc_layers = []
     conf_layers = []
     loc_layers += [nn.Conv2d(self.model3.out_channels, 3 * 14, kernel_size=3, padding=1, bias=True)]
     conf_layers += [nn.Conv2d(self.model3.out_channels, 3 * num_classes, kernel_size=3, padding=1, bias=True)]
     loc_layers += [nn.Conv2d(self.model4.out_channels, 2 * 14, kernel_size=3, padding=1, bias=True)]
     conf_layers += [nn.Conv2d(self.model4.out_channels, 2 * num_classes, kernel_size=3, padding=1, bias=True)]
     loc_layers += [nn.Conv2d(self.model5.out_channels, 2 * 14, kernel_size=3, padding=1, bias=True)]
     conf_layers += [nn.Conv2d(self.model5.out_channels, 2 * num_classes, kernel_size=3, padding=1, bias=True)]
     loc_layers += [nn.Conv2d(self.model6.out_channels, 3 * 14, kernel_size=3, padding=1, bias=True)]
     conf_layers += [nn.Conv2d(self.model6.out_channels, 3 * num_classes, kernel_size=3, padding=1, bias=True)]
     return nn.Sequential(*loc_layers), nn.Sequential(*conf_layers)
Пример #4
0
    def build_conv_block(self,
                         in_channels:  int,
                         out_channels: int,
                         kernel_size:  int = 3,
                         stride:       int = 1,
                         padding:      int = 1,
                         n_conv:       int = 2,
                         with_pool:    bool = False):
        layers = []

        if with_pool:
            layers.append(nn.MaxPool2d(kernel_size=2, stride=2))

        # convx_1
        layers += [
            nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
            nn.ReLU()
        ]
        # convx_2 -> convx_(n_conv)
        for i in range(1, n_conv):
            add_layers = [
                nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
                nn.ReLU()
            ]
            layers += add_layers

        # return as sequential
        return nn.Sequential(*layers)
Пример #5
0
 def multibox(self, num_classes):
     loc_layers = []
     conf_layers = []
     loc_layers += [nn.Conv2d(128, 21 * 4, kernel_size=3, padding=1)]
     conf_layers += [
         nn.Conv2d(128, 21 * num_classes, kernel_size=3, padding=1)
     ]
     loc_layers += [nn.Conv2d(256, 1 * 4, kernel_size=3, padding=1)]
     conf_layers += [
         nn.Conv2d(256, 1 * num_classes, kernel_size=3, padding=1)
     ]
     loc_layers += [nn.Conv2d(256, 1 * 4, kernel_size=3, padding=1)]
     conf_layers += [
         nn.Conv2d(256, 1 * num_classes, kernel_size=3, padding=1)
     ]
     return nn.Sequential(*loc_layers), nn.Sequential(*conf_layers)
Пример #6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_sizes,
                 strides=None,
                 paddings=None,
                 with_pool=True):
        super(Conv_Block, self).__init__()
        assert len(in_channels) == len(out_channels)
        assert len(out_channels) == len(kernel_sizes)
        if strides is not None:
            assert len(kernel_sizes) == len(strides)

        self.pool = None
        if with_pool:
            self.pool = nn.MaxPool2d(kernel_size=2, stride=2)

        groups = len(in_channels)
        convs = []
        for i in range(groups):
            convs.append(
                nn.Conv2d(in_channels=in_channels[i],
                          out_channels=out_channels[i],
                          kernel_size=kernel_sizes[i],
                          stride=strides[i],
                          padding=paddings[i]))
            convs.append(nn.ReLU(inplace=True))
        self.feature = nn.Sequential(*convs)
Пример #7
0
    def __init__(self):
        super(LFFDv1, self).__init__()

        self.backbone = nn.Sequential(
            nn.Conv2d(in_channels=3,
                      out_channels=64,
                      kernel_size=3,
                      stride=2,
                      padding=0),  # downsample by 2
            nn.ReLU(),
            nn.Conv2d(in_channels=64,
                      out_channels=64,
                      kernel_size=3,
                      stride=2,
                      padding=0),  # downsample by 2
            ResBlock(64),
            ResBlock(64),
            ResBlock(64))

        self.rb1 = ResBlock(64, det_out=True)
        self.det1 = DetBlock(64)

        self.relu_conv10 = nn.ReLU()
        self.conv11 = nn.Conv2d(in_channels=64,
                                out_channels=64,
                                kernel_size=3,
                                stride=2,
                                padding=0)
        self.rb2 = ResBlock(64)
        self.det2 = DetBlock(64)

        self.rb3 = ResBlock(64, det_out=True)
        self.det3 = DetBlock(64)

        self.relu_conv15 = nn.ReLU()
        self.conv16 = nn.Conv2d(in_channels=64,
                                out_channels=128,
                                kernel_size=3,
                                stride=2,
                                padding=0)
        self.rb4 = ResBlock(128)
        self.det4 = DetBlock(64)

        self.relu_conv18 = nn.ReLU()
        self.conv19 = nn.Conv2d(in_channels=128,
                                out_channels=128,
                                kernel_size=3,
                                stride=2,
                                padding=0)
        self.rb5 = ResBlock(128)
        self.det5 = DetBlock(128)

        self.rb6 = ResBlock(128, det_out=True)
        self.det6 = DetBlock(128)

        self.rb7 = ResBlock(128, det_out=True)
        self.det7 = DetBlock(128)

        self.relu_conv25 = nn.ReLU()
        self.det8 = DetBlock(128)
Пример #8
0
def _conv_dw(in_channels, out_channels, stride):
    return nn.Sequential(
        nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=stride, padding=1, groups=in_channels, bias=False),
        nn.BatchNorm2d(in_channels),
        nn.ReLU(inplace=True),
        nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=False),
        nn.BatchNorm2d(out_channels),
        nn.ReLU(inplace=True)
    )
Пример #9
0
def conv_dw(inp, oup, stride, leaky=0.1):
    return nn.Sequential(
        nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
        nn.BatchNorm2d(inp),
        nn.LeakyReLU(negative_slope=leaky, inplace=True),
        nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
        nn.BatchNorm2d(oup),
        nn.LeakyReLU(negative_slope=leaky, inplace=True),
    )
Пример #10
0
    def __init__(self):
        super(LFFDv2, self).__init__()

        self.backbone = nn.Sequential(
            nn.Conv2d(in_channels=3,
                      out_channels=64,
                      kernel_size=3,
                      stride=2,
                      padding=0),  # downsample by 2
            nn.ReLU(),
            nn.Conv2d(in_channels=64,
                      out_channels=64,
                      kernel_size=3,
                      stride=2,
                      padding=0),  # downsample by 2
            ResBlock(64),
            ResBlock(64),
            ResBlock(64))

        self.relu_conv8 = nn.ReLU()
        self.conv9 = nn.Conv2d(in_channels=64,
                               out_channels=64,
                               kernel_size=3,
                               stride=2,
                               padding=0)  # downsample by 2
        self.rb1 = ResBlock(64)
        self.det1 = DetBlock(64)

        self.relu_conv11 = nn.ReLU()
        self.conv12 = nn.Conv2d(in_channels=64,
                                out_channels=64,
                                kernel_size=3,
                                stride=2,
                                padding=0)  # downsample by 2
        self.rb2 = ResBlock(64)
        self.det2 = DetBlock(64)

        self.relu_conv14 = nn.ReLU()
        self.conv15 = nn.Conv2d(in_channels=64,
                                out_channels=128,
                                kernel_size=3,
                                stride=2,
                                padding=0)  # downsample by 2
        self.rb3 = ResBlock(128)
        self.det3 = DetBlock(64)

        self.relu_conv17 = nn.ReLU()
        self.conv18 = nn.Conv2d(in_channels=128,
                                out_channels=128,
                                kernel_size=3,
                                stride=2,
                                padding=0)  # downsample by 2
        self.rb4 = ResBlock(128)
        self.det4 = DetBlock(128)

        self.relu_conv20 = nn.ReLU()
        self.det5 = DetBlock(128)
Пример #11
0
    def __init__(self, mode='slim'):
        super(ULFG, self).__init__()
        self.mode = mode

        self.base_channel = 8 * 2
        self.backbone = nn.Sequential(
            _conv_bn(3, self.base_channel, 2),  # 160*120
            _conv_dw(self.base_channel, self.base_channel * 2, 1),
            _conv_dw(self.base_channel * 2, self.base_channel * 2, 2),  # 80*60
            _conv_dw(self.base_channel * 2, self.base_channel * 2, 1),
            _conv_dw(self.base_channel * 2, self.base_channel * 4, 2),  # 40*30
            _conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
            _conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
            _conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
            _conv_dw(self.base_channel * 4, self.base_channel * 8, 2),  # 20*15
            _conv_dw(self.base_channel * 8, self.base_channel * 8, 1),
            _conv_dw(self.base_channel * 8, self.base_channel * 8, 1),
            _conv_dw(self.base_channel * 8, self.base_channel * 16, 2),  # 10*8
            _conv_dw(self.base_channel * 16, self.base_channel * 16, 1)
        )
        if self.mode == 'rfb':
            self.backbone[7] = BasicRFB(self.base_channel * 4, self.base_channel * 4, stride=1, scale=1.0)

        self.source_layer_indexes = [8, 11, 13]
        self.extras = nn.Sequential(
            nn.Conv2d(in_channels=self.base_channel * 16, out_channels=self.base_channel * 4, kernel_size=1),
            nn.ReLU(),
            _seperable_conv2d(in_channels=self.base_channel * 4, out_channels=self.base_channel * 16, kernel_size=3, stride=2, padding=1),
            nn.ReLU()
        )
        self.regression_headers = nn.ModuleList([
            _seperable_conv2d(in_channels=self.base_channel * 4, out_channels=3 * 4, kernel_size=3, padding=1),
            _seperable_conv2d(in_channels=self.base_channel * 8, out_channels=2 * 4, kernel_size=3, padding=1),
            _seperable_conv2d(in_channels=self.base_channel * 16, out_channels=2 * 4, kernel_size=3, padding=1),
            nn.Conv2d(in_channels=self.base_channel * 16, out_channels=3 * 4, kernel_size=3, padding=1)
        ])
        self.classification_headers = nn.ModuleList([
            _seperable_conv2d(in_channels=self.base_channel * 4, out_channels=3 * 2, kernel_size=3, padding=1),
            _seperable_conv2d(in_channels=self.base_channel * 8, out_channels=2 * 2, kernel_size=3, padding=1),
            _seperable_conv2d(in_channels=self.base_channel * 16, out_channels=2 * 2, kernel_size=3, padding=1),
            nn.Conv2d(in_channels=self.base_channel * 16, out_channels=3 * 2, kernel_size=3, padding=1)
        ])
        self.softmax = nn.Softmax(dim=2)
Пример #12
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
Пример #13
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)
Пример #14
0
 def __init__(self):
     super(MobileNetV1, self).__init__()
     self.stage1 = nn.Sequential(
         conv_bn(3, 8, 2, leaky=0.1),  # 3
         conv_dw(8, 16, 1),  # 7
         conv_dw(16, 32, 2),  # 11
         conv_dw(32, 32, 1),  # 19
         conv_dw(32, 64, 2),  # 27
         conv_dw(64, 64, 1),  # 43
     )
     self.stage2 = nn.Sequential(
         conv_dw(64, 128, 2),  # 43 + 16 = 59
         conv_dw(128, 128, 1),  # 59 + 32 = 91
         conv_dw(128, 128, 1),  # 91 + 32 = 123
         conv_dw(128, 128, 1),  # 123 + 32 = 155
         conv_dw(128, 128, 1),  # 155 + 32 = 187
         conv_dw(128, 128, 1),  # 187 + 32 = 219
     )
     self.stage3 = nn.Sequential(
         conv_dw(128, 256, 2),  # 219 +3 2 = 241
         conv_dw(256, 256, 1),  # 241 + 64 = 301
     )
     # self.avg = nn.AdaptiveAvgPool2d((1,1))
     self.fc = nn.Linear(256, 1000)
Пример #15
0
def upsample(in_channels, out_channels):  # should use F.inpterpolate
    return nn.Sequential(
        nn.Conv2d(in_channels=in_channels,
                  out_channels=in_channels,
                  kernel_size=(3, 3),
                  stride=1,
                  padding=1,
                  groups=in_channels,
                  bias=False),
        nn.Conv2d(in_channels=in_channels,
                  out_channels=out_channels,
                  kernel_size=1,
                  stride=1,
                  padding=0,
                  bias=False), nn.BatchNorm2d(out_channels), nn.ReLU())
Пример #16
0
    def __init__(self, inp, oup, stride, expand_ratio, model_type=32):
        super(InvertedResidual_dwc, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = self.stride == 1 and inp == oup

        self.conv = []

        if expand_ratio == 1:
            self.conv.append(
                nn.Conv2d(inp,
                          hidden_dim,
                          kernel_size=(3, 3),
                          stride=stride,
                          padding=1,
                          groups=hidden_dim))
            self.conv.append(nn.BatchNorm2d(hidden_dim))
            self.conv.append(nn.PReLU())
            self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
            self.conv.append(nn.BatchNorm2d(oup))
            if model_type == 32:
                self.conv.append(nn.PReLU())
        else:
            self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False))
            self.conv.append(nn.BatchNorm2d(hidden_dim))
            self.conv.append(nn.PReLU())
            self.conv.append(
                nn.Conv2d(hidden_dim,
                          hidden_dim,
                          kernel_size=(3, 3),
                          stride=stride,
                          padding=1,
                          groups=hidden_dim))
            self.conv.append(nn.BatchNorm2d(hidden_dim))
            self.conv.append(nn.PReLU())
            self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
            self.conv.append(nn.BatchNorm2d(oup))
            if model_type == 32:
                self.conv.append(nn.PReLU())

        self.conv = nn.Sequential(*self.conv)
        if self.use_res_connect:
            self.eltadd = nn.EltAdd()
Пример #17
0
    def __init__(self, channels, det_out=False):
        super(ResBlock, self).__init__()

        self.channels = channels
        self.det_out = det_out

        self.relu = nn.ReLU()
        self.block = nn.Sequential(
            nn.Conv2d(in_channels=self.channels,
                      out_channels=self.channels,
                      kernel_size=3,
                      stride=1,
                      padding=1), nn.ReLU(),
            nn.Conv2d(in_channels=self.channels,
                      out_channels=self.channels,
                      kernel_size=3,
                      stride=1,
                      padding=1))

        self.eltadd = nn.EltAdd()
Пример #18
0
 def build_conv_block(self,
                      in_channels: int,
                      out_channels: int,
                      kernel_size: int = 3,
                      stride: int = 1,
                      padding: int = 1,
                      dilation: int = 1,
                      n_conv: int = 2,
                      with_pool: bool = False):
     layers = [
         nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding,
                   dilation),
         nn.ReLU()
     ]
     for i in range(1, n_conv):
         layers += [
             nn.Conv2d(out_channels, out_channels, kernel_size, stride,
                       padding),
             nn.ReLU()
         ]
     if with_pool:
         layers += [nn.MaxPool2d(2, 2)]
     return nn.Sequential(*layers)
Пример #19
0
def conv_bn_no_relu(inp, oup, stride):
    return nn.Sequential(
        nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
        nn.BatchNorm2d(oup),
    )
Пример #20
0
def conv_bn(inp, oup, stride, k_size=3):
    return nn.Sequential(nn.Conv2d(inp, oup, k_size, stride, 1, bias=False),
                         nn.BatchNorm2d(oup), nn.PReLU())
Пример #21
0
    def __init__(self, in_planes=256, out_planes=256):
        super(RFE, self).__init__()
        self.out_channels = out_planes
        self.inter_channels = int(in_planes / 4)

        self.branch0 = nn.Sequential(
            nn.Conv2d(in_planes,
                      self.inter_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0), nn.ReLU(inplace=True),
            nn.Conv2d(self.inter_channels,
                      self.inter_channels,
                      kernel_size=(1, 5),
                      stride=1,
                      padding=(0, 2)), nn.ReLU(inplace=True),
            nn.Conv2d(self.inter_channels,
                      self.inter_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0), nn.ReLU(inplace=True))
        self.branch1 = nn.Sequential(
            nn.Conv2d(in_planes,
                      self.inter_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0), nn.ReLU(inplace=True),
            nn.Conv2d(self.inter_channels,
                      self.inter_channels,
                      kernel_size=(5, 1),
                      stride=1,
                      padding=(2, 0)), nn.ReLU(inplace=True),
            nn.Conv2d(self.inter_channels,
                      self.inter_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0), nn.ReLU(inplace=True))
        self.branch2 = nn.Sequential(
            nn.Conv2d(in_planes,
                      self.inter_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0), nn.ReLU(inplace=True),
            nn.Conv2d(self.inter_channels,
                      self.inter_channels,
                      kernel_size=(1, 3),
                      stride=1,
                      padding=(0, 1)), nn.ReLU(inplace=True),
            nn.Conv2d(self.inter_channels,
                      self.inter_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0), nn.ReLU(inplace=True))
        self.branch3 = nn.Sequential(
            nn.Conv2d(in_planes,
                      self.inter_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0), nn.ReLU(inplace=True),
            nn.Conv2d(self.inter_channels,
                      self.inter_channels,
                      kernel_size=(3, 1),
                      stride=1,
                      padding=(1, 0)), nn.ReLU(inplace=True),
            nn.Conv2d(self.inter_channels,
                      self.inter_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0), nn.ReLU(inplace=True))
        self.cated_conv = nn.Sequential(
            nn.Conv2d(in_planes,
                      out_planes,
                      kernel_size=1,
                      stride=1,
                      padding=0), nn.ReLU(inplace=True))

        self.eltadd = nn.EltAdd()
Пример #22
0
    def __init__(self):
        super(DSFD, self).__init__()
        self.size = 640
        self.num_classes = 2

        ######
        # build backbone
        ######
        resnet152 = vision.models.resnet152()
        self.layer1 = nn.Sequential(resnet152.conv1, resnet152.bn1,
                                    resnet152.relu, resnet152.maxpool,
                                    resnet152.layer1)
        self.layer2 = nn.Sequential(resnet152.layer2)
        self.layer3 = nn.Sequential(resnet152.layer3)
        self.layer4 = nn.Sequential(resnet152.layer4)
        self.layer5 = nn.Sequential(*[
            nn.Conv2d(2048, 512, kernel_size=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=2),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True)
        ])
        self.layer6 = nn.Sequential(*[
            nn.Conv2d(
                512,
                128,
                kernel_size=1,
            ),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        ])

        ######
        # dsfd specific layers
        ######
        output_channels = [256, 512, 1024, 2048, 512, 256]
        # fpn
        fpn_in = output_channels

        self.latlayer3 = nn.Conv2d(fpn_in[3],
                                   fpn_in[2],
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
        self.latlayer2 = nn.Conv2d(fpn_in[2],
                                   fpn_in[1],
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
        self.latlayer1 = nn.Conv2d(fpn_in[1],
                                   fpn_in[0],
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)

        self.smooth3 = nn.Conv2d(fpn_in[2],
                                 fpn_in[2],
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.smooth2 = nn.Conv2d(fpn_in[1],
                                 fpn_in[1],
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.smooth1 = nn.Conv2d(fpn_in[0],
                                 fpn_in[0],
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)

        self.upsample = nn.Upsample(scale_factor=2,
                                    mode='bilinear',
                                    align_corners=False)

        self.eltmul = nn.EltMul()

        # fem
        cpm_in = output_channels
        self.cpm3_3 = FEM(cpm_in[0])
        self.cpm4_3 = FEM(cpm_in[1])
        self.cpm5_3 = FEM(cpm_in[2])
        self.cpm7 = FEM(cpm_in[3])
        self.cpm6_2 = FEM(cpm_in[4])
        self.cpm7_2 = FEM(cpm_in[5])

        # pa
        cfg_mbox = [1, 1, 1, 1, 1, 1]
        head = pa_multibox(output_channels, cfg_mbox, self.num_classes)

        # detection head
        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        self.softmax = nn.Softmax(dim=-1)
Пример #23
0
def conv_1x1_bn(inp, oup):
    return nn.Sequential(nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
                         nn.BatchNorm2d(oup), nn.PReLU())
Пример #24
0
    def __init__(self,
                 embedding_size=128,
                 input_size=224,
                 width_mult=1.,
                 model_type=32):
        super(MobileNetV2, self).__init__()
        block_dwc = InvertedResidual_dwc
        input_channel = 64
        last_channel = 256
        interverted_residual_setting = [
            # t, c, n, s
            [1, 32, 1, 1],  # depthwise conv for first row
            [2, 32, 2, 1],
            [4, 32, 2, 1],
            [2, 32, 2, 2],
            [4, 32, 5, 1],
            [2, 32, 2, 2],
            [2, 32, 6, 2],
        ]
        if model_type != 32:
            for idx, irs in enumerate(interverted_residual_setting):
                irs[1] = model_type
                if idx == 2:
                    irs[3] = 2
                if idx == 3:
                    irs[3] = 1

        # building first layer
        input_channel = int(input_channel * width_mult)
        self.last_channel = int(
            last_channel * width_mult) if width_mult > 1.0 else last_channel
        self.features = [conv_bn(3, input_channel, 2)]

        # building inverted residual
        cnt = 0
        for t, c, n, s in interverted_residual_setting:
            output_channel = int(c * width_mult)
            for i in range(n):
                if cnt > 1:
                    if i == n - 1:  # reduce the featuremap in the last.
                        self.features.append(
                            block_dwc(input_channel,
                                      output_channel,
                                      s,
                                      expand_ratio=t,
                                      model_type=model_type))
                    else:
                        self.features.append(
                            block_dwc(input_channel,
                                      output_channel,
                                      1,
                                      expand_ratio=t,
                                      model_type=model_type))
                    input_channel = output_channel
                else:
                    if i == n - 1:  # reduce the featuremap in the last.
                        self.features.append(
                            block_dwc(input_channel,
                                      output_channel,
                                      s,
                                      expand_ratio=t,
                                      model_type=model_type))
                    else:
                        self.features.append(
                            block_dwc(input_channel,
                                      output_channel,
                                      1,
                                      expand_ratio=t,
                                      model_type=model_type))
                    input_channel = output_channel

            cnt += 1

        # building last several layers
        self.features.append(gated_conv1x1(input_channel, self.last_channel))

        # make it nn.Sequential
        self.features_sequential = nn.Sequential(*self.features)
Пример #25
0
    def __init__(self):
        super(SRN, self).__init__()

        block = Bottleneck
        layers = [3, 4, 6, 3]
        self.inplanes = 64
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.layer5 = nn.Conv2d(2048, 1024, kernel_size=3, stride=2, padding=1)
        self.layer6 = nn.Conv2d(1024, 256, kernel_size=3, stride=2, padding=1)

        self.c5_lateral = nn.Conv2d(2048,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0)
        self.c4_lateral = nn.Conv2d(1024,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0)
        self.c3_lateral = nn.Conv2d(512,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0)
        self.c2_lateral = nn.Conv2d(256,
                                    256,
                                    kernel_size=1,
                                    stride=1,
                                    padding=0)

        self.eltadd = nn.EltAdd()
        self.upsample = nn.Upsample(scale_factor=2,
                                    mode='bilinear',
                                    align_corners=False)

        self.p7_conv = nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1)
        self.p6_conv = nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1)
        self.p5_conv = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
        self.p4_conv = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
        self.p3_conv = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
        self.p2_conv = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)

        self.c7_conv = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
        self.c6_conv = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
        self.c5_conv = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0)
        self.c4_conv = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
        self.c3_conv = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
        self.c2_conv = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)

        # subnet_first stage
        num_anchors = 2 * 1
        self.cls_subnet = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            RFE(256, 256),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
        )
        self.box_subnet = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            RFE(256, 256),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
        )
        self.cls_subnet_pred = nn.Conv2d(256,
                                         num_anchors * 1,
                                         kernel_size=1,
                                         stride=1,
                                         padding=0)
        self.box_subnet_pred = nn.Conv2d(256,
                                         num_anchors * 4,
                                         kernel_size=1,
                                         stride=1,
                                         padding=0)

        self.sigmoid = nn.Sigmoid()
Пример #26
0
def conv_bn(inp, oup, stride=1, leaky=0):
    return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
                         nn.BatchNorm2d(oup),
                         nn.LeakyReLU(negative_slope=leaky, inplace=True))
Пример #27
0
def _conv_bn(in_channels, out_channels, stride):
    return nn.Sequential(
        nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
        nn.BatchNorm2d(num_features=out_channels),
        nn.ReLU(inplace=True)
    )
Пример #28
0
def _seperable_conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0):
    return nn.Sequential(
        nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, groups=in_channels, stride=stride, padding=padding),
        nn.ReLU(),
        nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1)
    )