예제 #1
0
 def __init__(self, phase, base, extras, head, num_classes):
     super(SSD, self).__init__()
     self.phase = phase
     self.num_classes = num_classes
     self.cfg = Config
     self.vgg = nn.ModuleList(base)
     self.L2Norm = L2Norm(512, 20)
     self.extras = nn.ModuleList(extras)
     self.priorbox = PriorBox(self.cfg)
     with torch.no_grad():
         self.priors = self.priorbox.forward()
     self.loc = nn.ModuleList(head[0])
     self.conf = nn.ModuleList(head[1])
     if phase == 'test':
         self.softmax = nn.Softmax(dim=-1)
         self.detect = Detect(self)
예제 #2
0
    def __init__(self, phase, base, extras, head, num_classes):
        super(SSD, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        self.cfg = Config
        self.vgg = nn.ModuleList(base)
        self.L2Norm = L2Norm(512, 20)
        self.extras = nn.ModuleList(extras)
        self.priorbox = PriorBox(self.cfg)
        with torch.no_grad():
            self.priors = Variable(self.priorbox.forward())
        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])



        
        if phase == 'test':
            self.softmax = nn.Softmax(dim=-1)
            self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)

        self.SE1 = SEModule(512)
        self.SE2 = SEModule(1024)
        self.SE3 = SEModule(512)
        self.SE4 = SEModule(256)
        self.SE5 = SEModule(256)
        self.SE6 = SEModule(256)
예제 #3
0
class SSD(nn.Module):
    def __init__(self, phase, base, extras, head, num_classes):
        super(SSD, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        self.cfg = Config
        self.vgg = nn.ModuleList(base)
        self.L2Norm = L2Norm(512, 20)
        self.extras = nn.ModuleList(extras)
        self.priorbox = PriorBox(self.cfg)
        with torch.no_grad():
            self.priors = self.priorbox.forward()
        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        if phase == 'test':
            self.softmax = nn.Softmax(dim=-1)
            self.detect = Detect(self)

    def forward(self, x):
        sources = list()
        loc = list()
        conf = list()

        # 获得conv4_3的内容 relu层也算 Pooling不进行relu 一共36层 0-22=1-23
        for k in range(23):  # 22层
            x = self.vgg[k](x)

        s = self.L2Norm(x)  # L2标准化 原因:深度不够 24层
        sources.append(s)

        # 获得fc7的内容
        for k in range(23, len(self.vgg)):  #23-34=24-35层
            x = self.vgg[k](x)
        sources.append(x)  #FC7_1

        # 获得后面的内容
        for k, v in enumerate(self.extras):
            x = F.relu(v(x), inplace=True)  # 这里加了relu所以在网络中没有显示
            if k % 2 == 1:
                sources.append(x)

# [batch_size,channel
# 添加回归层和分类层
        for (x, l, c) in zip(sources, self.loc, self.conf):
            loc.append(l(x).permute(0, 2, 3, 1).contiguous())  # permute 通道数翻转
            conf.append(c(x).permute(0, 2, 3, 1).contiguous())

        # 进行resize
        loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
        conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
        if self.phase == "test":
            output = self.detect.apply(
                loc.view(loc.size(0), -1, 4),
                self.softmax(conf.view(conf.size(0), -1, self.num_classes)),
                self.priors)
        else:
            output = (loc.view(loc.size(0), -1,
                               4), conf.view(conf.size(0), -1,
                                             self.num_classes), self.priors)
        return output
예제 #4
0
    def __init__(self, phase, base, extras, head, num_classes):
        super(SSD, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        self.cfg = Config
        self.vgg = nn.ModuleList(base)
        self.L2Norm = L2Norm(512, 20)
        self.extras = nn.ModuleList(extras)
        self.priorbox = PriorBox(self.cfg)
        with torch.no_grad():
            self.priors = Variable(self.priorbox.forward())
            # self.priors = self.priorbox.forward()  # 这一行改成这样也能正常运行

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        self.relu_list4cxq = nn.ModuleList([torch.nn.ReLU(True) for i in range(8)])  # 自己修改后的方式
        self.feature_maps4cxq = None  # 用于grad cam
        self.scores4cxq = None  # 用于grad cam
        if phase == 'test':
            self.softmax = nn.Softmax(dim=-1)
            self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)
예제 #5
0
    def __init__(self, phase, base, extras, head, num_classes):
        super(SSD, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        self.cfg = Config
        self.vgg = nn.ModuleList(base)
        self.L2Norm = L2Norm(512, 20)
        self.extras = nn.ModuleList(extras)
        self.priorbox = PriorBox(self.cfg)
        with torch.no_grad():
            self.priors = Variable(self.priorbox.forward())
        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        if phase == 'test':
            self.softmax = nn.Softmax(dim=-1)
            self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)

        self.upsample_256_256 = Upsample(10)
        self.conv_256_512 = nn.Conv2d(in_channels=256,
                                      out_channels=512,
                                      kernel_size=1,
                                      stride=1)

        #conv8_2 -> conv8_2
        self.conv_512_512_1 = nn.Conv2d(in_channels=512,
                                        out_channels=512,
                                        kernel_size=1,
                                        stride=1)

        self.upsample_512_512 = Upsample(19)
        self.conv_512_1024 = nn.Conv2d(in_channels=512,
                                       out_channels=1024,
                                       kernel_size=1,
                                       stride=1)
        self.conv_1024_1024 = nn.Conv2d(in_channels=1024,
                                        out_channels=1024,
                                        kernel_size=1,
                                        stride=1)

        self.upsample_1024_1024 = Upsample(38)
        self.conv_1024_512 = nn.Conv2d(in_channels=1024,
                                       out_channels=512,
                                       kernel_size=1,
                                       stride=1)
        self.conv_512_512_2 = nn.Conv2d(in_channels=512,
                                        out_channels=512,
                                        kernel_size=1,
                                        stride=1)

        self.smooth = nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1)
        self.smooth1 = nn.Conv2d(1024,
                                 1024,
                                 kernel_size=3,
                                 padding=1,
                                 stride=1)

        if USE_CBAM:

            self.CBAM1 = Bottleneck(512)
            self.CBAM2 = Bottleneck(1024)
            self.CBAM3 = Bottleneck(512)
            self.CBAM4 = Bottleneck(256)
            self.CBAM5 = Bottleneck(256)
            self.CBAM6 = Bottleneck(256)

        if USE_SE:
            self.SE1 = SEModule(512)
            self.SE2 = SEModule(1024)
            self.SE3 = SEModule(512)
            self.SE4 = SEModule(256)
            self.SE5 = SEModule(256)
            self.SE6 = SEModule(256)
예제 #6
0
class SSD(nn.Module):
    def __init__(self, phase, base, extras, head, num_classes):
        super(SSD, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        self.cfg = Config
        self.vgg = nn.ModuleList(base)
        self.L2Norm = L2Norm(512, 20)
        self.extras = nn.ModuleList(extras)
        self.priorbox = PriorBox(self.cfg)
        with torch.no_grad():
            self.priors = Variable(self.priorbox.forward())
            # self.priors = self.priorbox.forward()  # 这一行改成这样也能正常运行

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        self.relu_list4cxq = nn.ModuleList([torch.nn.ReLU(True) for i in range(8)])  # 自己修改后的方式
        self.feature_maps4cxq = None  # 用于grad cam
        self.scores4cxq = None  # 用于grad cam
        if phase == 'test':
            self.softmax = nn.Softmax(dim=-1)
            self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)
    def forward(self, x):
        sources = list()
        loc = list()
        conf = list()

        # 获得conv4_3的内容
        for k in range(23):
            x = self.vgg[k](x)

        s = self.L2Norm(x)
        sources.append(s)

        # 获得fc7的内容
        for k in range(23, len(self.vgg)):
            x = self.vgg[k](x)
        sources.append(x)

        # 获得后面的内容
        for k, v in enumerate(self.extras):
            # x = F.relu(v(x), inplace=True)  # 原始实现方式
            x = self.relu_list4cxq[k](v(x))  # 修改后的方式  
            if k % 2 == 1:
                sources.append(x)


        self.feature_maps4cxq = sources  # 6张特征图
        # 添加回归层和分类层
        for (x, l, c) in zip(sources, self.loc, self.conf):
            loc.append(l(x).permute(0, 2, 3, 1).contiguous())
            conf.append(c(x).permute(0, 2, 3, 1).contiguous())

        self.scores4cxq = conf  # 用于保存各个类别的分数

        # 进行resize
        loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)  # torch.Size([4, 34928])
        conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)  # torch.Size([4, 26196])
        if self.phase == "test":
            # loc会resize到batch_size,num_anchors,4
            # conf会resize到batch_size,num_anchors,num_classes
            # output = self.detect(
            output = self.detect.apply(
                loc.view(loc.size(0), -1, 4),                   # loc preds torch.Size([4, 8732, 4])
                self.softmax(conf.view(conf.size(0), -1,
                             self.num_classes)),                # conf preds # torch.Size([4, 8732, 3])
                self.priors              # torch.Size([8732, 4])
            )  # torch.Size([1, 3, 200, 5])  1置信度+4位置信息
        else:
            output = (
                loc.view(loc.size(0), -1, 4),
                conf.view(conf.size(0), -1, self.num_classes),
                self.priors
            )  # torch.Size([4, 8732, 4]) torch.Size([4, 8732, 3]) torch.Size([8732, 4])
        return output
예제 #7
0
    def __init__(self, phase, base, extras, head, num_classes):
        super(SSD, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        self.cfg = Config
        self.vgg = nn.ModuleList(base)
        self.L2Norm = L2Norm(512, 20)
        self.extras = nn.ModuleList(extras)
        self.priorbox = PriorBox(self.cfg)
        with torch.no_grad():
            self.priors = Variable(self.priorbox.forward())
        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        if phase == 'test':
            self.softmax = nn.Softmax(dim=-1)
            self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)

        self.DilationConv_128_128 = nn.Conv2d(in_channels=128,
                                              out_channels=128,
                                              kernel_size=3,
                                              padding=2,
                                              dilation=2,
                                              stride=2)
        self.conv_512_256 = nn.Conv2d(in_channels=512,
                                      out_channels=256,
                                      kernel_size=1,
                                      stride=1)
        self.upsample_1024_1024 = Upsample(38)
        self.conv_1024_128 = nn.Conv2d(in_channels=1024,
                                       out_channels=128,
                                       kernel_size=1,
                                       stride=1)

        self.DilationConv_512_256 = nn.Conv2d(in_channels=512,
                                              out_channels=256,
                                              kernel_size=3,
                                              padding=2,
                                              dilation=2,
                                              stride=2)

        self.conv_1024_512 = nn.Conv2d(in_channels=1024,
                                       out_channels=512,
                                       kernel_size=1,
                                       stride=1)

        self.upsample_512_512 = Upsample(19)
        self.conv_512_256_fc7 = nn.Conv2d(in_channels=512,
                                          out_channels=256,
                                          kernel_size=1,
                                          stride=1)

        self.DilationConv_512_128_2 = nn.Conv2d(in_channels=512,
                                                out_channels=128,
                                                kernel_size=3,
                                                padding=2,
                                                dilation=2,
                                                stride=2)

        self.conv_512_256_2 = nn.Conv2d(in_channels=512,
                                        out_channels=256,
                                        kernel_size=1,
                                        stride=1)

        self.upsample_256_256_2 = Upsample(10)
        self.conv_256_128_2 = nn.Conv2d(in_channels=256,
                                        out_channels=128,
                                        kernel_size=1,
                                        stride=1)

        self.smooth = nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1)
        self.smooth2 = nn.Conv2d(1024,
                                 1024,
                                 kernel_size=3,
                                 padding=1,
                                 stride=1)

        self.bn = nn.BatchNorm2d(128)
        self.bn1 = nn.BatchNorm2d(256)

        if USE_SE:
            self.SE1 = SEModule(512)
            self.SE2 = SEModule(512)
            self.SE3 = SEModule(512)
            self.SE4 = SEModule(256)
            self.SE5 = SEModule(256)
            self.SE6 = SEModule(256)

        if USE_ECA:
            self.ECA1 = ECAModule(512)
            self.ECA2 = ECAModule(1024)
            self.ECA3 = ECAModule(512)
            self.ECA4 = ECAModule(256)