Exemple #1
0
 def __init__(self, num_classes=1):
     super(RetinaNet, self).__init__()
     self.num_anchors = 7 * 2  # vertical offset -> *2
     self.num_classes = num_classes
     self.fpn = FPN50()
     self.loc_head = self._make_head(self.num_anchors * 8)
     self.cls_head = self._make_head(self.num_anchors * self.num_classes)
Exemple #2
0
 def __init__(self, num_classes=80):
     super(RetinaNet, self).__init__()
     self.fpn = FPN50()
     self.num_classes = num_classes
     self.loc_head = self._make_head(self.num_anchors * 4)
     self.cls_head = self._make_head(self.num_anchors * self.num_classes)
     self.focal_loss = FocalLoss()
 def __init__(self, num_classes=500, num_anchors=9):
     super().__init__()
     self.fpn = FPN50()
     self.num_classes = num_classes
     self.num_anchors = num_anchors
     self.loc_head = self._make_head(self.num_anchors * 4)
     self.cls_head = self._make_head(self.num_anchors * self.num_classes)
 def __init__(self, num_classes=20, num_anchors=9, backbone='resnet50'):
     super(RetinaNet, self).__init__()
     if backbone == 'resnet50':
         self.fpn = FPN50()
     elif backbone == 'resnet101':
         self.fpn = FPN101()
     else:
         print('Invalid backbone network')
     self.num_classes = num_classes
     self.num_anchors = num_anchors
     self.loc_head = self._make_head(self.num_anchors * 4)
     self.cls_head = self._make_head(self.num_anchors * self.num_classes)
Exemple #5
0
 def __init__(self, num_classes=9):
     super(RetinaNet, self).__init__()
     self.fpn = FPN50()
     self.num_classes = num_classes
     self.classifier1 = nn.Conv2d(3,
                                  18,
                                  kernel_size=3,
                                  padding=1,
                                  bias=False)
     self.classifier2 = nn.Conv2d(108,
                                  3,
                                  kernel_size=3,
                                  padding=1,
                                  bias=False)
     self.loc_head = self._make_head(self.num_anchors * 4)
     self.cls_head = self._make_head(self.num_anchors * self.num_classes)
Exemple #6
0
    def __init__(self, num_classes):
        super(FPNSSD512, self).__init__()
        self.num_classes = num_classes
        self.extractor = FPN50()
        self.loc_layers = nn.ModuleList()
        self.cls_layers = nn.ModuleList()

        in_channels = 256
        num_anchors = (4, 6, 6, 6, 6, 4, 4)
        for i in range(len(num_anchors)):
            self.loc_layers += [
                nn.Conv2d(in_channels,
                          num_anchors[i] * 4,
                          kernel_size=3,
                          padding=1)
            ]
            self.cls_layers += [
                nn.Conv2d(in_channels,
                          num_anchors[i] * num_classes,
                          kernel_size=3,
                          padding=1)
            ]
    def __init__(self, layers):
        super(poseNet, self).__init__()
        if layers == 101:
            self.fpn = FPN101()
        if layers == 50:
            self.fpn = FPN50()

        self.latlayer4 = nn.Conv2d(256,
                                   256,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)

        # D-layers
        # 两个3x3卷积核,把channels降到128
        self.convt1 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
        self.convt2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
        self.convt3 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
        self.convt4 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
        self.convs1 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
        self.convs2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
        self.convs3 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
        self.convs4 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)

        self.upsample1 = nn.Upsample(scale_factor=8,
                                     mode='bilinear',
                                     align_corners=True)
        self.upsample2 = nn.Upsample(scale_factor=4,
                                     mode='bilinear',
                                     align_corners=True)
        self.upsample3 = nn.Upsample(scale_factor=2,
                                     mode='bilinear',
                                     align_corners=True)
        # self.upsample4 = nn.Upsample(size=(120,120),mode='bilinear',align_corners=True)

        self.concat = Concat()
        self.conv2 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1)
        self.convfin = nn.Conv2d(256, 17, kernel_size=1, stride=1, padding=0)
Exemple #8
0
    def __init__(self, num_classes, firstinit=False):
        super(RetinaNet, self).__init__()
        self.name = 'RetinaNet'
        self.num_classes = num_classes
        self.loc_head = self._make_head(self.num_anchors * 4)
        self.cls_head = self._make_head(self.num_anchors * self.num_classes)
        self.best_loss = None
        self.lr = None

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.weight.data.normal_(0, 0.01)
                m.bias.data.zero_()

        self.fpn = FPN50(firstinit)
Exemple #9
0
  https://download.pytorch.org/models/resnet50-19c8e357.pth
'''
import math

import torch
from torch import nn
from torch.nn import init

from fpn import FPN50
from retinanet import RetinaNet

print('Loading pretrained ResNet50 model..')
d = torch.load('./model/resnet50.pth')

print('Loading into FPN50..')
fpn = FPN50()
dd = fpn.state_dict()
for k in d.keys():
    if not k.startswith('fc'):  # skip fc layers
        dd[k] = d[k]

print('Saving RetinaNet..')
net = RetinaNet()
for m in net.modules():
    if isinstance(m, nn.Conv2d):
        init.normal(m.weight, mean=0, std=0.01)
        if m.bias is not None:
            init.constant(m.bias, 0)
    elif isinstance(m, nn.BatchNorm2d):
        m.weight.data.fill_(1)
        m.bias.data.zero_()
 def __init__(self):
     super(RetinaNet, self).__init__()
     self.fpn = FPN50()
     self.loc_head = self._make_head(self.num_anchors * 4)
     self.cls_head = self._make_head(self.num_anchors * self.num_classes)
Exemple #11
0
 def __init__(self, num_classes):
     super(FPNSSD512, self).__init__()
     self.fpn = FPN50()
     self.num_classes = num_classes
     self.loc_head = self._make_head(self.num_anchors * 4)
     self.cls_head = self._make_head(self.num_anchors * self.num_classes)