Beispiel #1
0
    def __init__(self, in_channels: int, out_channels: int, index: int):
        super(SSH, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.half_out_channels = int(out_channels / 2)
        self.quater_out_channels = int(self.half_out_channels / 2)
        self.index = index
        
        self.ssh_3x3 = nn.Sequential(
            nn.Conv2d(in_channels=self.in_channels, out_channels=self.half_out_channels, kernel_size=3, stride=1, padding=1)
        )

        self.ssh_dimred = nn.Sequential(
            nn.Conv2d(in_channels=self.in_channels, out_channels=self.quater_out_channels, kernel_size=3, stride=1, padding=1),
            nn.ReLU()
        )

        self.ssh_5x5 = nn.Sequential(
            nn.Conv2d(in_channels=self.quater_out_channels, out_channels=self.quater_out_channels, kernel_size=3, stride=1, padding=1)
        )

        self.ssh_7x7 = nn.Sequential(
            nn.Conv2d(in_channels=self.quater_out_channels, out_channels=self.quater_out_channels, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(in_channels=self.quater_out_channels, out_channels=self.quater_out_channels, kernel_size=3, stride=1, padding=1)
        )

        self.out_relu = nn.ReLU()
Beispiel #2
0
    def __init__(self):
        super(LFFDv1, self).__init__()

        self.backbone = nn.Sequential(
            nn.Conv2d(in_channels=3,
                      out_channels=64,
                      kernel_size=3,
                      stride=2,
                      padding=0),  # downsample by 2
            nn.ReLU(),
            nn.Conv2d(in_channels=64,
                      out_channels=64,
                      kernel_size=3,
                      stride=2,
                      padding=0),  # downsample by 2
            ResBlock(64),
            ResBlock(64),
            ResBlock(64))

        self.rb1 = ResBlock(64, det_out=True)
        self.det1 = DetBlock(64)

        self.relu_conv10 = nn.ReLU()
        self.conv11 = nn.Conv2d(in_channels=64,
                                out_channels=64,
                                kernel_size=3,
                                stride=2,
                                padding=0)
        self.rb2 = ResBlock(64)
        self.det2 = DetBlock(64)

        self.rb3 = ResBlock(64, det_out=True)
        self.det3 = DetBlock(64)

        self.relu_conv15 = nn.ReLU()
        self.conv16 = nn.Conv2d(in_channels=64,
                                out_channels=128,
                                kernel_size=3,
                                stride=2,
                                padding=0)
        self.rb4 = ResBlock(128)
        self.det4 = DetBlock(64)

        self.relu_conv18 = nn.ReLU()
        self.conv19 = nn.Conv2d(in_channels=128,
                                out_channels=128,
                                kernel_size=3,
                                stride=2,
                                padding=0)
        self.rb5 = ResBlock(128)
        self.det5 = DetBlock(128)

        self.rb6 = ResBlock(128, det_out=True)
        self.det6 = DetBlock(128)

        self.rb7 = ResBlock(128, det_out=True)
        self.det7 = DetBlock(128)

        self.relu_conv25 = nn.ReLU()
        self.det8 = DetBlock(128)
Beispiel #3
0
    def __init__(self):
        super(NN, self).__init__()

        self.conv = nn.Conv2d(3, 64, 3, 1, 1)
        self.headers = nn.ModuleList(
            [nn.Conv2d(64, 4, 3, 1, 1),
             nn.Conv2d(64, 2, 3, 1, 1)])
Beispiel #4
0
 def __init__(self, input_channels, output_channels):
     super(DeepHeadModule, self).__init__()
     self._input_channels = input_channels
     self._output_channels = output_channels
     self._mid_channels = min(self._input_channels, 256)
     #print(self._mid_channels)
     self.conv1 = nn.Conv2d(self._input_channels,
                            self._mid_channels,
                            kernel_size=3,
                            dilation=1,
                            stride=1,
                            padding=1)
     self.conv2 = nn.Conv2d(self._mid_channels,
                            self._mid_channels,
                            kernel_size=3,
                            dilation=1,
                            stride=1,
                            padding=1)
     self.conv3 = nn.Conv2d(self._mid_channels,
                            self._mid_channels,
                            kernel_size=3,
                            dilation=1,
                            stride=1,
                            padding=1)
     self.conv4 = nn.Conv2d(self._mid_channels,
                            self._output_channels,
                            kernel_size=1,
                            dilation=1,
                            stride=1,
                            padding=0)
     self.relu = nn.ReLU(inplace=True)
Beispiel #5
0
    def _pyramidbox(self):
        self.ssh_conv3_l2norm = nn.L2Norm2d(512, 10)
        self.ssh_conv4_l2norm = nn.L2Norm2d(512, 8)
        self.ssh_conv5_l2norm = nn.L2Norm2d(512, 5)

        self.SSHchannels = [512, 512, 512, 512, 512, 512]
        loc = []
        conf = []
        for i in range(6):
            loc.append(
                nn.Conv2d(self.SSHchannels[i],
                          8,
                          kernel_size=3,
                          stride=1,
                          padding=1))
            if i == 0:
                conf.append(
                    nn.Conv2d(self.SSHchannels[i],
                              8,
                              kernel_size=3,
                              stride=1,
                              padding=1))
            else:
                conf.append(
                    nn.Conv2d(self.SSHchannels[i],
                              6,
                              kernel_size=3,
                              stride=1,
                              padding=1))
        self.mbox_loc = nn.ModuleList(loc)
        self.mbox_conf = nn.ModuleList(conf)

        self.softmax = nn.Softmax(dim=-1)
Beispiel #6
0
    def build_conv_block(self,
                         in_channels:  int,
                         out_channels: int,
                         kernel_size:  int = 3,
                         stride:       int = 1,
                         padding:      int = 1,
                         n_conv:       int = 2,
                         with_pool:    bool = False):
        layers = []

        if with_pool:
            layers.append(nn.MaxPool2d(kernel_size=2, stride=2))

        # convx_1
        layers += [
            nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
            nn.ReLU()
        ]
        # convx_2 -> convx_(n_conv)
        for i in range(1, n_conv):
            add_layers = [
                nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
                nn.ReLU()
            ]
            layers += add_layers

        # return as sequential
        return nn.Sequential(*layers)
Beispiel #7
0
    def build_detect_head(self):
        bbox_pred = []
        cls_score = []
        for oc in self.ssh_out_channels:
            bbox_pred += [ nn.Conv2d(in_channels=oc, out_channels=8, kernel_size=1, stride=1, padding=0) ]
            cls_score += [ nn.Conv2d(in_channels=oc, out_channels=4, kernel_size=1, stride=1, padding=0) ]

        return nn.ModuleList(bbox_pred), nn.ModuleList(cls_score)
Beispiel #8
0
def conv_dw(inp, oup, stride, leaky=0.1):
    return nn.Sequential(
        nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
        nn.BatchNorm2d(inp),
        nn.LeakyReLU(negative_slope=leaky, inplace=True),
        nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
        nn.BatchNorm2d(oup),
        nn.LeakyReLU(negative_slope=leaky, inplace=True),
    )
Beispiel #9
0
 def build_det_head(self):
     bbox_head = []
     cls_head = []
     for i in range(3):
         bbox_head += [
             nn.Conv2d(128, 4, kernel_size=1, stride=1, padding=0)
         ]
         cls_head += [nn.Conv2d(128, 2, kernel_size=1, stride=1, padding=0)]
     return nn.ModuleList(bbox_head), nn.ModuleList(cls_head)
Beispiel #10
0
def _conv_dw(in_channels, out_channels, stride):
    return nn.Sequential(
        nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=stride, padding=1, groups=in_channels, bias=False),
        nn.BatchNorm2d(in_channels),
        nn.ReLU(inplace=True),
        nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=False),
        nn.BatchNorm2d(out_channels),
        nn.ReLU(inplace=True)
    )
Beispiel #11
0
    def __init__(self):
        super(SSH, self).__init__()

        # backbone
        self.vgg16 = nn.ModuleList(make_layers(vgg_cfgs['D']))

        # SSH - M3
        self.M3 = M_Module(512, 256, 128)
        self.M3_bbox_pred = nn.Conv2d(512, 8, 1, 1, 0)
        self.M3_cls_score = nn.Conv2d(512, 4, 1, 1, 0)
        self.M3_cls_score_softmax = nn.Softmax(dim=1)
        # SSH - M2
        self.M2 = M_Module(512, 256, 128)
        self.M2_bbox_pred = nn.Conv2d(512, 8, 1, 1, 0)
        self.M2_cls_score = nn.Conv2d(512, 4, 1, 1, 0)
        self.M2_cls_score_softmax = nn.Softmax(dim=1)
        # SSH - M1
        self.conv4_128 = nn.Conv2d(512, 128, 1, 1, 0)
        self.conv4_128_relu = nn.ReLU(inplace=True)
        self.conv5_128 = nn.Conv2d(512, 128, 1, 1, 0)
        self.conv5_128_relu = nn.ReLU(inplace=True)
        self.conv5_128_up = nn.ConvTranspose2d(128, 128, 4, 2, 1, groups=128, bias=False)
        self.eltadd = nn.EltAdd()
        self.conv4_fuse_final = nn.Conv2d(128, 128, 3, 1, 1)
        self.conv4_fuse_final_relu = nn.ReLU(inplace=True)
        self.M1 = M_Module(128, 128, 64)
        self.M1_bbox_pred = nn.Conv2d(256, 8, 1, 1, 0)
        self.M1_cls_score = nn.Conv2d(256, 4, 1, 1, 0)
        self.M1_cls_score_softmax = nn.Softmax(dim=1)
Beispiel #12
0
 def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True):
     super(BasicConv, self).__init__()
     self.out_channels = out_planes
     if bn:
         self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False)
         self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True)
         self.relu = nn.ReLU(inplace=True) if relu else None
     else:
         self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True)
         self.bn = None
         self.relu = nn.ReLU(inplace=True) if relu else None
Beispiel #13
0
    def __init__(self, inc=128, outc=128):
        super(gated_conv1x1, self).__init__()
        self.inp = int(inc / 2)
        self.oup = int(outc / 2)
        self.conv1x1_1 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=False)
        self.gate_1 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=True)
        self.sigmoid1 = nn.Sigmoid()
        self.eltmul1 = nn.EltMul()

        self.conv1x1_2 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=False)
        self.gate_2 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=True)
        self.sigmoid2 = nn.Sigmoid()
        self.eltmul2 = nn.EltMul()
Beispiel #14
0
def upsample(in_channels, out_channels):  # should use F.inpterpolate
    return nn.Sequential(
        nn.Conv2d(in_channels=in_channels,
                  out_channels=in_channels,
                  kernel_size=(3, 3),
                  stride=1,
                  padding=1,
                  groups=in_channels,
                  bias=False),
        nn.Conv2d(in_channels=in_channels,
                  out_channels=out_channels,
                  kernel_size=1,
                  stride=1,
                  padding=0,
                  bias=False), nn.BatchNorm2d(out_channels), nn.ReLU())
Beispiel #15
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_sizes,
                 strides=None,
                 paddings=None,
                 with_pool=True):
        super(Conv_Block, self).__init__()
        assert len(in_channels) == len(out_channels)
        assert len(out_channels) == len(kernel_sizes)
        if strides is not None:
            assert len(kernel_sizes) == len(strides)

        self.pool = None
        if with_pool:
            self.pool = nn.MaxPool2d(kernel_size=2, stride=2)

        groups = len(in_channels)
        convs = []
        for i in range(groups):
            convs.append(
                nn.Conv2d(in_channels=in_channels[i],
                          out_channels=out_channels[i],
                          kernel_size=kernel_sizes[i],
                          stride=strides[i],
                          padding=paddings[i]))
            convs.append(nn.ReLU(inplace=True))
        self.feature = nn.Sequential(*convs)
Beispiel #16
0
 def __init__(self, inchannels=512, num_anchors=3):
     super(LandmarkHead, self).__init__()
     self.conv1x1 = nn.Conv2d(inchannels,
                              num_anchors * 10,
                              kernel_size=(1, 1),
                              stride=1,
                              padding=0)
 def __init__(self, in_channels, out_channels, kernel_size, stride, padding, **kwargs):
     super(ConvBNReLU, self).__init__()
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=True, **kwargs)
     self.bn = nn.BatchNorm2d(out_channels)
     self.relu = nn.ReLU(inplace=True)
Beispiel #18
0
 def __init__(self, input_channels, output_channels):
     super(DeepHeadModule , self).__init__()
     self._input_channels = input_channels
     self._mid_channels = 16
     self._output_channels = output_channels
     self.conv1 = BasicConv2d(self._input_channels, self._mid_channels, kernel_size=1, dilation=1, stride=1, padding=0)
     self.conv2 = BasicConv2d(self._mid_channels, self._mid_channels, kernel_size=3, dilation=1, stride=1, padding=1)
     self.conv3 = nn.Conv2d(self._mid_channels, self._output_channels, kernel_size=1, dilation=1, stride=1, padding=0)
Beispiel #19
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = nn.BatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = nn.BatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
     self.eltadd = nn.EltAdd()
Beispiel #20
0
    def __init__(self):
        super(light_DSFD, self).__init__()

        self.conv1 = CRelu(3, 32, kernel_size=7, stride=4, padding=3)
        self.conv3 = CRelu(64, 64, kernel_size=5, stride=2, padding=2)

        self.inception1 = Inception2d(64)
        self.inception2 = Inception2d(64)
        self.inception3 = Inception2d(128)
        self.inception4 = Inception2d(128)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.conv5_1 = BasicConv2d(128, 128, kernel_size=1, stride=1, padding=0)
        self.conv5_2 = BasicConv2d(128, 256, kernel_size=3, stride=2, padding=1)
        self.conv6_1 = BasicConv2d(256, 128, kernel_size=1, stride=1, padding=0)
        self.conv6_2 = BasicConv2d(128, 256, kernel_size=3, stride=2, padding=1)

        fpn_in = [64, 64, 128, 128, 256, 256]
        cpm_in = [64, 64, 64, 64, 64, 64]
        fpn_channel = 64
        cpm_channels = 64
        output_channels = cpm_in

        # fpn
        self.smooth3 = nn.Conv2d( fpn_channel, fpn_channel, kernel_size=1, stride=1, padding=0)
        self.smooth2 = nn.Conv2d( fpn_channel, fpn_channel, kernel_size=1, stride=1, padding=0)
        self.smooth1 = nn.Conv2d( fpn_channel, fpn_channel, kernel_size=1, stride=1, padding=0)
        self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
        self.eltmul = nn.EltMul()

        self.latlayer6 = nn.Conv2d( fpn_in[5], fpn_channel, kernel_size=1, stride=1, padding=0)
        self.latlayer5 = nn.Conv2d( fpn_in[4], fpn_channel, kernel_size=1, stride=1, padding=0)
        self.latlayer4 = nn.Conv2d( fpn_in[3], fpn_channel, kernel_size=1, stride=1, padding=0)
        self.latlayer3 = nn.Conv2d( fpn_in[2], fpn_channel, kernel_size=1, stride=1, padding=0)
        self.latlayer2 = nn.Conv2d( fpn_in[1], fpn_channel, kernel_size=1, stride=1, padding=0)
        self.latlayer1 = nn.Conv2d( fpn_in[0], fpn_channel, kernel_size=1, stride=1, padding=0)

        # cpm
        self.cpm1 = Inception2d(cpm_in[0])
        self.cpm2 = Inception2d(cpm_in[1])
        self.cpm3 = Inception2d(cpm_in[2])
        self.cpm4 = Inception2d(cpm_in[3])
        self.cpm5 = Inception2d(cpm_in[4])
        self.cpm6 = Inception2d(cpm_in[5])

        face_head = face_multibox(output_channels, [1, 1, 1, 1, 1, 1], 2 , cpm_channels)  
        self.loc = nn.ModuleList(face_head[0])
        self.conf = nn.ModuleList(face_head[1])

        self.softmax = nn.Softmax(dim=-1)
Beispiel #21
0
def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes,
                     out_planes,
                     kernel_size=3,
                     stride=stride,
                     padding=1,
                     bias=False)
Beispiel #22
0
    def __init__(self, in_channels, out_channels_left, out_channels_right):
        super(M_Module, self).__init__()

        inc = in_channels
        ocl, ocr = out_channels_left, out_channels_right

        # left branch
        self.ssh_3x3 = nn.Conv2d(inc, ocl, 3, 1, 1)
        # right branch
        self.ssh_dimred = nn.Conv2d(inc, ocr, 3, 1, 1)
        self.ssh_dimred_relu = nn.ReLU(inplace=True)
        self.ssh_5x5 = nn.Conv2d(ocr, ocr, 3, 1, 1)
        self.ssh_7x7_1 = nn.Conv2d(ocr, ocr, 3, 1, 1)
        self.ssh_7x7_1_relu = nn.ReLU(inplace=True)
        self.ssh_7x7 = nn.Conv2d(ocr, ocr, 3, 1, 1)

        self.ssh_output_relu = nn.ReLU(inplace=True)
Beispiel #23
0
 def __init__(self, inchannels=512, num_anchors=3):
     super(ClassHead, self).__init__()
     self.num_anchors = num_anchors
     self.conv1x1 = nn.Conv2d(inchannels,
                              self.num_anchors * 2,
                              kernel_size=(1, 1),
                              stride=1,
                              padding=0)
Beispiel #24
0
    def __init__(self, up_from_channels, up_to_channels):
        super(LFPN, self).__init__()

        self.conv1 = nn.Conv2d(up_from_channels, up_to_channels, kernel_size=1)
        self.conv1_relu = nn.ReLU(inplace=True)

        self.upsampling = nn.ConvTranspose2d(up_to_channels,
                                             up_to_channels,
                                             kernel_size=4,
                                             stride=2,
                                             padding=1,
                                             groups=up_to_channels,
                                             bias=False)

        self.conv2 = nn.Conv2d(up_to_channels, up_to_channels, kernel_size=1)
        self.conv2_relu = nn.ReLU(inplace=True)
        self.eltmul = nn.EltMul()
Beispiel #25
0
    def __init__(self, inp, oup, stride, expand_ratio, model_type=32):
        super(InvertedResidual_dwc, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = self.stride == 1 and inp == oup

        self.conv = []

        if expand_ratio == 1:
            self.conv.append(
                nn.Conv2d(inp,
                          hidden_dim,
                          kernel_size=(3, 3),
                          stride=stride,
                          padding=1,
                          groups=hidden_dim))
            self.conv.append(nn.BatchNorm2d(hidden_dim))
            self.conv.append(nn.PReLU())
            self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
            self.conv.append(nn.BatchNorm2d(oup))
            if model_type == 32:
                self.conv.append(nn.PReLU())
        else:
            self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False))
            self.conv.append(nn.BatchNorm2d(hidden_dim))
            self.conv.append(nn.PReLU())
            self.conv.append(
                nn.Conv2d(hidden_dim,
                          hidden_dim,
                          kernel_size=(3, 3),
                          stride=stride,
                          padding=1,
                          groups=hidden_dim))
            self.conv.append(nn.BatchNorm2d(hidden_dim))
            self.conv.append(nn.PReLU())
            self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
            self.conv.append(nn.BatchNorm2d(oup))
            if model_type == 32:
                self.conv.append(nn.PReLU())

        self.conv = nn.Sequential(*self.conv)
        if self.use_res_connect:
            self.eltadd = nn.EltAdd()
Beispiel #26
0
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1):
        super(IdentityBlock, self).__init__()

        out_channels_1, out_channels_2, out_channels_3 = out_channels//4, out_channels//4, out_channels

        self.conv1 = nn.Conv2d(in_channels, out_channels_1, kernel_size=(1, 1))
        self.bn1 = nn.BatchNorm2d(out_channels_1)
        self.relu1 = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(out_channels_1, out_channels_2, kernel_size=(kernel_size, kernel_size), padding=(padding, padding), dilation=(dilation, dilation))
        self.bn2 = nn.BatchNorm2d(out_channels_2)
        self.relu2 = nn.ReLU(inplace=True)

        self.conv3 = nn.Conv2d(out_channels_2, out_channels_3, kernel_size=(1, 1))
        self.bn3 = nn.BatchNorm2d(out_channels_3)

        self.eltadd = nn.EltAdd()
        self.relu_f = nn.ReLU(inplace=True)
 def multibox(self, num_classes):
     loc_layers = []
     conf_layers = []
     loc_layers += [nn.Conv2d(self.model3.out_channels, 3 * 14, kernel_size=3, padding=1, bias=True)]
     conf_layers += [nn.Conv2d(self.model3.out_channels, 3 * num_classes, kernel_size=3, padding=1, bias=True)]
     loc_layers += [nn.Conv2d(self.model4.out_channels, 2 * 14, kernel_size=3, padding=1, bias=True)]
     conf_layers += [nn.Conv2d(self.model4.out_channels, 2 * num_classes, kernel_size=3, padding=1, bias=True)]
     loc_layers += [nn.Conv2d(self.model5.out_channels, 2 * 14, kernel_size=3, padding=1, bias=True)]
     conf_layers += [nn.Conv2d(self.model5.out_channels, 2 * num_classes, kernel_size=3, padding=1, bias=True)]
     loc_layers += [nn.Conv2d(self.model6.out_channels, 3 * 14, kernel_size=3, padding=1, bias=True)]
     conf_layers += [nn.Conv2d(self.model6.out_channels, 3 * num_classes, kernel_size=3, padding=1, bias=True)]
     return nn.Sequential(*loc_layers), nn.Sequential(*conf_layers)
Beispiel #28
0
    def __init__(self, mode='slim'):
        super(ULFG, self).__init__()
        self.mode = mode

        self.base_channel = 8 * 2
        self.backbone = nn.Sequential(
            _conv_bn(3, self.base_channel, 2),  # 160*120
            _conv_dw(self.base_channel, self.base_channel * 2, 1),
            _conv_dw(self.base_channel * 2, self.base_channel * 2, 2),  # 80*60
            _conv_dw(self.base_channel * 2, self.base_channel * 2, 1),
            _conv_dw(self.base_channel * 2, self.base_channel * 4, 2),  # 40*30
            _conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
            _conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
            _conv_dw(self.base_channel * 4, self.base_channel * 4, 1),
            _conv_dw(self.base_channel * 4, self.base_channel * 8, 2),  # 20*15
            _conv_dw(self.base_channel * 8, self.base_channel * 8, 1),
            _conv_dw(self.base_channel * 8, self.base_channel * 8, 1),
            _conv_dw(self.base_channel * 8, self.base_channel * 16, 2),  # 10*8
            _conv_dw(self.base_channel * 16, self.base_channel * 16, 1)
        )
        if self.mode == 'rfb':
            self.backbone[7] = BasicRFB(self.base_channel * 4, self.base_channel * 4, stride=1, scale=1.0)

        self.source_layer_indexes = [8, 11, 13]
        self.extras = nn.Sequential(
            nn.Conv2d(in_channels=self.base_channel * 16, out_channels=self.base_channel * 4, kernel_size=1),
            nn.ReLU(),
            _seperable_conv2d(in_channels=self.base_channel * 4, out_channels=self.base_channel * 16, kernel_size=3, stride=2, padding=1),
            nn.ReLU()
        )
        self.regression_headers = nn.ModuleList([
            _seperable_conv2d(in_channels=self.base_channel * 4, out_channels=3 * 4, kernel_size=3, padding=1),
            _seperable_conv2d(in_channels=self.base_channel * 8, out_channels=2 * 4, kernel_size=3, padding=1),
            _seperable_conv2d(in_channels=self.base_channel * 16, out_channels=2 * 4, kernel_size=3, padding=1),
            nn.Conv2d(in_channels=self.base_channel * 16, out_channels=3 * 4, kernel_size=3, padding=1)
        ])
        self.classification_headers = nn.ModuleList([
            _seperable_conv2d(in_channels=self.base_channel * 4, out_channels=3 * 2, kernel_size=3, padding=1),
            _seperable_conv2d(in_channels=self.base_channel * 8, out_channels=2 * 2, kernel_size=3, padding=1),
            _seperable_conv2d(in_channels=self.base_channel * 16, out_channels=2 * 2, kernel_size=3, padding=1),
            nn.Conv2d(in_channels=self.base_channel * 16, out_channels=3 * 2, kernel_size=3, padding=1)
        ])
        self.softmax = nn.Softmax(dim=2)
Beispiel #29
0
    def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=False, add_relu=True, add_bn=True, eps=1e-5):
        super(ConvBlock, self).__init__()

        self.conv = nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding, bias=bias)
        self.relu = None
        self.bn = None

        if add_relu:
            self.relu = nn.ReLU()
        if add_bn:
            self.bn = nn.BatchNorm2d(out_channel, eps=eps)
Beispiel #30
0
    def __init__(self, in_channels):
        super(CPM, self).__init__()
        # residual
        self.branch1 = Conv_BN(in_channels, 1024, 1, 1, 0, act=None)
        self.branch2a = Conv_BN(in_channels, 256, 1, 1, 0, act='relu')
        self.branch2b = Conv_BN(256, 256, 3, 1, 1, act='relu')
        self.branch2c = Conv_BN(256, 1024, 1, 1, 0, act=None)
        self.eltadd = nn.EltAdd()
        self.rescomb_relu = nn.ReLU(inplace=True)

        # ssh
        self.ssh_1_conv = nn.Conv2d(1024, 256, 3, 1, 1)
        self.ssh_dimred_conv = nn.Conv2d(1024, 128, 3, 1, 1)
        self.ssh_dimred_relu = nn.ReLU(inplace=True)
        self.ssh_2_conv = nn.Conv2d(128, 128, 3, 1, 1)
        self.ssh_3a_conv = nn.Conv2d(128, 128, 3, 1, 1)
        self.ssh_3a_relu = nn.ReLU(inplace=True)
        self.ssh_3b_conv = nn.Conv2d(128, 128, 3, 1, 1)

        self.concat_relu = nn.ReLU(inplace=True)