def __init__(self, phase, base, extras, head, num_classes): super(SSD, self).__init__() self.phase = phase self.num_classes = num_classes self.cfg = Config self.vgg = nn.ModuleList(base) self.L2Norm = L2Norm(512, 20) self.extras = nn.ModuleList(extras) self.priorbox = PriorBox(self.cfg) with torch.no_grad(): self.priors = Variable(self.priorbox.forward()) self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) if phase == 'test': self.softmax = nn.Softmax(dim=-1) self.detect = Detect(num_classes, 0, 200, 0.01, 0.45) self.SE1 = SEModule(512) self.SE2 = SEModule(1024) self.SE3 = SEModule(512) self.SE4 = SEModule(256) self.SE5 = SEModule(256) self.SE6 = SEModule(256)
def __init__(self, phase, base, extras, head, num_classes): super(SSD, self).__init__() self.phase = phase self.num_classes = num_classes self.cfg = Config self.vgg = nn.ModuleList(base) self.L2Norm = L2Norm(512, 20) self.extras = nn.ModuleList(extras) self.priorbox = PriorBox(self.cfg) with torch.no_grad(): self.priors = self.priorbox.forward() self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) if phase == 'test': self.softmax = nn.Softmax(dim=-1) self.detect = Detect(self)
def __init__(self, phase, base, extras, head, num_classes): super(SSD, self).__init__() self.phase = phase self.num_classes = num_classes self.cfg = Config self.vgg = nn.ModuleList(base) self.L2Norm = L2Norm(512, 20) self.extras = nn.ModuleList(extras) self.priorbox = PriorBox(self.cfg) with torch.no_grad(): self.priors = Variable(self.priorbox.forward()) # self.priors = self.priorbox.forward() # 这一行改成这样也能正常运行 self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) self.relu_list4cxq = nn.ModuleList([torch.nn.ReLU(True) for i in range(8)]) # 自己修改后的方式 self.feature_maps4cxq = None # 用于grad cam self.scores4cxq = None # 用于grad cam if phase == 'test': self.softmax = nn.Softmax(dim=-1) self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)
def __init__(self, phase, base, extras, head, num_classes): super(SSD, self).__init__() self.phase = phase self.num_classes = num_classes self.cfg = Config self.vgg = nn.ModuleList(base) self.L2Norm = L2Norm(512, 20) self.extras = nn.ModuleList(extras) self.priorbox = PriorBox(self.cfg) with torch.no_grad(): self.priors = Variable(self.priorbox.forward()) self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) if phase == 'test': self.softmax = nn.Softmax(dim=-1) self.detect = Detect(num_classes, 0, 200, 0.01, 0.45) self.upsample_256_256 = Upsample(10) self.conv_256_512 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1, stride=1) #conv8_2 -> conv8_2 self.conv_512_512_1 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=1, stride=1) self.upsample_512_512 = Upsample(19) self.conv_512_1024 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1, stride=1) self.conv_1024_1024 = nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=1, stride=1) self.upsample_1024_1024 = Upsample(38) self.conv_1024_512 = nn.Conv2d(in_channels=1024, out_channels=512, kernel_size=1, stride=1) self.conv_512_512_2 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=1, stride=1) self.smooth = nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1) self.smooth1 = nn.Conv2d(1024, 1024, kernel_size=3, padding=1, stride=1) if USE_CBAM: self.CBAM1 = Bottleneck(512) self.CBAM2 = Bottleneck(1024) self.CBAM3 = Bottleneck(512) self.CBAM4 = Bottleneck(256) self.CBAM5 = Bottleneck(256) self.CBAM6 = Bottleneck(256) if USE_SE: self.SE1 = SEModule(512) self.SE2 = SEModule(1024) self.SE3 = SEModule(512) self.SE4 = SEModule(256) self.SE5 = SEModule(256) self.SE6 = SEModule(256)
def __init__(self, phase, base, extras, head, num_classes): super(SSD, self).__init__() self.phase = phase self.num_classes = num_classes self.cfg = Config self.vgg = nn.ModuleList(base) self.L2Norm = L2Norm(512, 20) self.extras = nn.ModuleList(extras) self.priorbox = PriorBox(self.cfg) with torch.no_grad(): self.priors = Variable(self.priorbox.forward()) self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) if phase == 'test': self.softmax = nn.Softmax(dim=-1) self.detect = Detect(num_classes, 0, 200, 0.01, 0.45) self.DilationConv_128_128 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=2, dilation=2, stride=2) self.conv_512_256 = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1, stride=1) self.upsample_1024_1024 = Upsample(38) self.conv_1024_128 = nn.Conv2d(in_channels=1024, out_channels=128, kernel_size=1, stride=1) self.DilationConv_512_256 = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=2, dilation=2, stride=2) self.conv_1024_512 = nn.Conv2d(in_channels=1024, out_channels=512, kernel_size=1, stride=1) self.upsample_512_512 = Upsample(19) self.conv_512_256_fc7 = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1, stride=1) self.DilationConv_512_128_2 = nn.Conv2d(in_channels=512, out_channels=128, kernel_size=3, padding=2, dilation=2, stride=2) self.conv_512_256_2 = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1, stride=1) self.upsample_256_256_2 = Upsample(10) self.conv_256_128_2 = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=1, stride=1) self.smooth = nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1) self.smooth2 = nn.Conv2d(1024, 1024, kernel_size=3, padding=1, stride=1) self.bn = nn.BatchNorm2d(128) self.bn1 = nn.BatchNorm2d(256) if USE_SE: self.SE1 = SEModule(512) self.SE2 = SEModule(512) self.SE3 = SEModule(512) self.SE4 = SEModule(256) self.SE5 = SEModule(256) self.SE6 = SEModule(256) if USE_ECA: self.ECA1 = ECAModule(512) self.ECA2 = ECAModule(1024) self.ECA3 = ECAModule(512) self.ECA4 = ECAModule(256)