コード例 #1
0
    def __init__(self, in_channels_dict, out_channels=256, mode="concat"):
        super().__init__()
        assert mode in ["concat", "add"]
        self.mode = mode
        self.net = nn.ModuleDict()
        self.reduce = nn.ModuleDict()

        keys = sorted(list(in_channels_dict.keys()))[::-1]  # [P7,P6,P5,...P2]
        for i, key in enumerate(keys):
            in_channels = in_channels_dict[key]
            if i == 0:
                self.reduce[key] = nn.Sequential(
                    nn.Conv2d(in_channels, out_channels, 3, 1, 1),
                    nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
            else:
                self.net[key] = nn.Sequential(
                    nn.Conv2d(out_channels, out_channels, 3, 1, 1),
                    nn.BatchNorm2d(out_channels),
                    nn.ReLU(inplace=True),
                    nn.ConvTranspose2d(out_channels, out_channels, 3, 2, 1,
                                       1),  # 2,2,0,0
                    nn.BatchNorm2d(out_channels),
                    nn.ReLU(inplace=True))
                if self.mode == "concat":
                    self.reduce[key] = nn.Sequential(
                        nn.Conv2d(in_channels + out_channels, out_channels, 3,
                                  1, 1), nn.BatchNorm2d(out_channels),
                        nn.ReLU(inplace=True))
                else:
                    self.reduce[key] = nn.Sequential(
                        nn.Conv2d(in_channels, out_channels, 3, 1, 1),
                        nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))

        _initParmasV2(self, self.modules())
コード例 #2
0
    def __init__(self, in_channels_dict, out_channels=256):
        super().__init__()

        self.inner_blocks = nn.ModuleDict()
        self.layer_blocks = nn.ModuleDict()
        for name, in_channels in in_channels_dict.items():
            if in_channels == 0:
                continue
            inner_block_module = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, 1),
                nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
            layer_block_module = nn.Sequential(
                nn.Conv2d(out_channels, out_channels, 3, padding=1),
                nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
            self.inner_blocks[name] = inner_block_module
            self.layer_blocks[name] = layer_block_module

        # initialize parameters now to avoid modifying the initialization of top_blocks
        # _initParmas(self, self.modules())
        _initParmasV2(self, self.modules())
コード例 #3
0
    def __init__(self,
                 model_name="vgg16_bn",
                 num_classes=21,
                 freeze_at=["res2", "res3", "res4", "res5"],
                 use_shortcut=False,
                 pretrained=False,
                 return_indices=False):
        super().__init__()
        self.return_indices = return_indices
        self.num_classes = num_classes
        self.use_shortcut = use_shortcut
        self.inplanes = 64
        features = nn.Sequential(
            nn.Conv2d(3, self.inplanes, 3, padding=1),
            nn.BatchNorm2d(self.inplanes), nn.ReLU(inplace=True),
            nn.Conv2d(self.inplanes, self.inplanes, 3, padding=1),
            nn.BatchNorm2d(self.inplanes), nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2, return_indices=return_indices),
            nn.Conv2d(self.inplanes, self.inplanes * 2, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 2), nn.ReLU(inplace=True),
            nn.Conv2d(self.inplanes * 2, self.inplanes * 2, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 2), nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2, return_indices=return_indices),
            nn.Conv2d(self.inplanes * 2, self.inplanes * 4, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 4), nn.ReLU(inplace=True),
            nn.Conv2d(self.inplanes * 4, self.inplanes * 4, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 4), nn.ReLU(inplace=True),
            nn.Conv2d(self.inplanes * 4, self.inplanes * 4, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 4), nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2, return_indices=return_indices),
            nn.Conv2d(self.inplanes * 4, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8), nn.ReLU(inplace=True),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8), nn.ReLU(inplace=True),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8), nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2, return_indices=return_indices),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8), nn.ReLU(inplace=True),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8), nn.ReLU(inplace=True),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8), nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2, return_indices=return_indices))

        # self._initialize_weights()
        if pretrained:
            # 加载预训练的模型
            state_dict = load_state_dict_from_url(model_urls["vgg16_bn"],
                                                  progress=True)
            features.load_state_dict({
                k[9:]: v
                for k, v in state_dict.items()
                if k[9:] in features.state_dict()
            })
        else:
            _initialize_weights(self, features.modules())

        self.backbone = nn.ModuleDict(
            OrderedDict([  # nn.Sequential
                ("res1", features[0:7]),
                ("res2", features[7:14]),
                ("res3", features[14:24]),
                ("res4", features[24:34]),
                ("res5", features[34:]),
            ]))

        # 参数冻结
        for name in freeze_at:
            for parameter in self.backbone[name].parameters():
                parameter.requires_grad_(False)

        # 统计所有可更新梯度的变量
        print("只有以下变量做梯度更新:")
        for name, parameter in self.backbone.named_parameters():
            if parameter.requires_grad:
                print("name:", name)

        self.decode = nn.ModuleDict()
        self.decode["res5"] = nn.Sequential(
            # nn.MaxUnpool2d(2,2),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8),
            nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8),
            nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8),
            nn.ReLU(inplace=False))
        self.decode["res4"] = nn.Sequential(
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8), nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8), nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 4, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 4), nn.ReLU(inplace=False))

        self.decode["res3"] = nn.Sequential(
            nn.Conv2d(self.inplanes * 4, self.inplanes * 4, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 4), nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 4, self.inplanes * 4, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 4), nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 4, self.inplanes * 2, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 2), nn.ReLU(inplace=False))

        self.decode["res2"] = nn.Sequential(
            nn.Conv2d(self.inplanes * 2, self.inplanes * 2, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 2), nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 2, self.inplanes, 3, padding=1),
            nn.BatchNorm2d(self.inplanes), nn.ReLU(inplace=False))

        self.decode["res1"] = nn.Sequential(
            nn.Conv2d(self.inplanes, self.inplanes, 3, padding=1),
            nn.BatchNorm2d(self.inplanes),
            nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes, num_classes, 3, padding=1),
            # nn.BatchNorm2d(num_classes),
            # nn.ReLU(inplace=False)
            # nn.Softmax(1)
        )

        _initParmasV2(self, self.decode.modules())
コード例 #4
0
    def __init__(self,
                 model_name="vgg16_bn",
                 num_classes=21,
                 freeze_at=["res2", "res3", "res4", "res5"],
                 use_shortcut=False,
                 pretrained=False,
                 return_indices=False):
        super().__init__()

        self.num_classes = num_classes
        self.use_shortcut = use_shortcut

        # self.backbone = Backbone_vgg16bn(pretrained=pretrained,freeze_at=freeze_at,return_indices=return_indices)
        self.backbone = Backbone_vgg(model_name,
                                     pretrained=pretrained,
                                     freeze_at=freeze_at)

        self.inplanes = self.backbone.inplanes

        self.decode = nn.ModuleDict()
        self.decode["res5"] = nn.Sequential(
            # nn.MaxUnpool2d(2,2),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8),
            nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8),
            nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8),
            nn.ReLU(inplace=False))
        self.decode["res4"] = nn.Sequential(
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8), nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 8, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 8), nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 8, self.inplanes * 4, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 4), nn.ReLU(inplace=False))

        self.decode["res3"] = nn.Sequential(
            nn.Conv2d(self.inplanes * 4, self.inplanes * 4, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 4), nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 4, self.inplanes * 4, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 4), nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 4, self.inplanes * 2, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 2), nn.ReLU(inplace=False))

        self.decode["res2"] = nn.Sequential(
            nn.Conv2d(self.inplanes * 2, self.inplanes * 2, 3, padding=1),
            nn.BatchNorm2d(self.inplanes * 2), nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes * 2, self.inplanes, 3, padding=1),
            nn.BatchNorm2d(self.inplanes), nn.ReLU(inplace=False))

        self.decode["res1"] = nn.Sequential(
            nn.Conv2d(self.inplanes, self.inplanes, 3, padding=1),
            nn.BatchNorm2d(self.inplanes),
            nn.ReLU(inplace=False),
            nn.Conv2d(self.inplanes, num_classes, 3, padding=1),
            # nn.BatchNorm2d(num_classes),
            # nn.ReLU(inplace=False)
            # nn.Softmax(1)
        )

        _initParmasV2(self, self.decode.modules())