Example #1
0
    def __init__(self, channels):
        super(ResBlock, self).__init__()
        assert channels % 2 == 0  # ensure the in_channels is even
        mid_channels = channels // 2

        self.conv1 = ConvModule(channels, mid_channels, kernel_size=1, stride=1, padding=0, bias=False,
                                norm='BatchNorm2d', activation='ReLU')
        self.conv2 = ConvModule(mid_channels, channels, kernel_size=3, stride=1, padding=1, bias=False,
                                norm='BatchNorm2d', activation='ReLU')
Example #2
0
 def make_lateral_conv(self, in_c, out_c):
     out_c2 = out_c * 2
     return nn.Sequential(
         ConvModule(in_c, out_c, kernel_size=1, stride=1,
                    padding=0, dilation=1, groups=1, bias=False, norm='BatchNorm2d', activation='ReLU'),
         ConvModule(out_c, out_c2, kernel_size=3, stride=1,
                    padding=1, dilation=1, groups=1, bias=False, norm='BatchNorm2d', activation='ReLU'),
         ConvModule(out_c2, out_c, kernel_size=1, stride=1,
                    padding=0, dilation=1, groups=1, bias=False, norm='BatchNorm2d', activation='ReLU'),
         ConvModule(out_c, out_c2, kernel_size=3, stride=1,
                    padding=1, dilation=1, groups=1, bias=False, norm='BatchNorm2d', activation='ReLU'),
         ConvModule(out_c2, out_c, kernel_size=1, stride=1,
                    padding=0, dilation=1, groups=1, bias=False, norm='BatchNorm2d', activation='ReLU'),
     )
Example #3
0
 def _make_layer_v3(self, in_places, places, block, stride):
     layers = []
     layers.append(ConvModule(in_places, places, kernel_size=3, stride=stride, padding=1, bias=False,
                              norm='BatchNorm2d', activation='ReLU'))
     for i in range(block):
         layers.append(ResBlock(places))
     return nn.Sequential(*layers)
Example #4
0
    def __init__(self, subtype='darknet53', out_stages=[2, 3, 4], backbone_path=None):
        super(Darknet, self).__init__()
        self.out_stages = out_stages
        self.backbone_path = backbone_path

        self.out_channels = [64, 128, 256, 512, 1024]
        self.conv1 = ConvModule(3, 32, kernel_size=3, stride=1, padding=1, bias=False,
                   norm='BatchNorm2d', activation='ReLU')

        if subtype == 'darknet53':
            self.layer1 = self._make_layer_v3(32, 64, 1, 2)
            self.layer2 = self._make_layer_v3(64, 128, 2, 2)
            self.layer3 = self._make_layer_v3(128, 256, 8, 2)
            self.layer4 = self._make_layer_v3(256, 512, 8, 2)
            self.layer5 = self._make_layer_v3(512, 1024, 4, 2)
        elif subtype == 'darknet19':
            self.layer1 = self._make_layer_v2(32, 64, 1, 2)
            self.layer2 = self._make_layer_v2(64, 128, 3, 2)
            self.layer3 = self._make_layer_v2(128, 256, 3, 2)
            self.layer4 = self._make_layer_v2(256, 512, 5, 2)
            self.layer5 = self._make_layer_v2(512, 1024, 5, 2)
        else:
            raise NotImplementedError

        self.out_channels = self.out_channels[self.out_stages[0]:self.out_stages[-1] + 1]

        if self.backbone_path:
            self.backbone.load_state_dict(torch.load(self.backbone_path))
        else:
            self.init_weights()
Example #5
0
 def _make_layer_v2(self, in_places, places, block, stride):
     layers = []
     layers.append(nn.MaxPool2d(kernel_size=2, stride=stride))
     for i in range(block):
         layers.append(ConvModule(in_places, places, kernel_size=1 if i%2 else 3, stride=1, padding=(i+1)%2, bias=False,
                         norm='BatchNorm2d', activation='ReLU'))
         in_places, places = places, in_places
     return nn.Sequential(*layers)
Example #6
0
    def __init__(self, in_channels=[256, 512, 1024],
                 out_channels=[128, 256, 512]):
        super(YOLOV3Neck, self).__init__()
        assert isinstance(in_channels, list)
        self.in_channels = in_channels
        self.out_channels = out_channels

        for i, (in_c, out_c) in enumerate(
                zip(reversed(self.in_channels), reversed(self.out_channels))):
            if i > 0:
                self.add_module(f'conv{i}', ConvModule(in_c, out_c, kernel_size=1, stride=1,
                                           padding=0, dilation=1, groups=1, bias=False, norm='BatchNorm2d', activation='ReLU'))
            # in_c + out_c : High-lvl feats will be cat with low-lvl feats
            self.add_module(f'lateral_conv{i}',
                            self.make_lateral_conv(in_c + out_c if i>0 else in_c, out_c))
Example #7
0
    def __init__(self,
                 num_classes=80,
                 in_channels=[128, 256, 512],
                 out_channels=[256, 512, 1024],
                 featmap_strides=[8, 16, 32],
                 num_anchors=3):
        super(YOLOV3Head, self).__init__()
        # Check params
        assert (len(in_channels) == len(out_channels) == len(featmap_strides))
        self.num_classes = num_classes
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.featmap_strides = featmap_strides
        # usually the numbers of anchors for each level are the same
        # except SSD detectors
        self.num_anchors = num_anchors
        # int: number of attributes in pred_map, bboxes (4) + objectness (1) + num_classes
        self.num_attrib = 5 + self.num_classes

        for i, (in_c,
                out_c) in enumerate(zip(self.in_channels, self.out_channels)):
            self.add_module(
                f'conv_bridge{i}',
                ConvModule(in_c,
                           out_c,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           dilation=1,
                           groups=1,
                           bias=False,
                           norm='BatchNorm2d',
                           activation='ReLU'))
            self.add_module(
                f'conv_pred{i}',
                nn.Conv2d(out_c, self.num_anchors * self.num_attrib, 1))