コード例 #1
0
ファイル: pspnet.py プロジェクト: dingmyu/psa
 def __init__(self, in_dim, reduction_dim, bins, BatchNorm=nn.BatchNorm2d):
     super(PPM, self).__init__()
     self.features = []
     self.features.append(
         nn.Sequential(
             nn.Conv2d(in_dim,
                       256,
                       kernel_size=3,
                       stride=1,
                       padding=1,
                       bias=False), BatchNorm(256), nn.ReLU(inplace=True)))
     self.features.append(
         nn.Sequential(
             nn.Conv2d(in_dim,
                       256,
                       kernel_size=3,
                       stride=1,
                       padding=3,
                       dilation=3,
                       bias=True),
             nn.Conv2d(256, 256, kernel_size=1, stride=1, bias=False),
             BatchNorm(256), nn.ReLU(inplace=True)))
     self.features.append(
         nn.Sequential(
             nn.Conv2d(in_dim,
                       256,
                       kernel_size=3,
                       stride=1,
                       padding=6,
                       dilation=6,
                       bias=True),
             nn.Conv2d(256, 256, kernel_size=1, stride=1, bias=False),
             BatchNorm(256), nn.ReLU(inplace=True)))
     self.features.append(
         nn.Sequential(
             nn.Conv2d(in_dim,
                       256,
                       kernel_size=3,
                       stride=1,
                       padding=9,
                       dilation=9,
                       bias=True),
             nn.Conv2d(256, 256, kernel_size=1, stride=1, bias=False),
             BatchNorm(256), nn.ReLU(inplace=True)))
     self.features = nn.ModuleList(self.features)
コード例 #2
0
 def __init__(self, in_dim, reduction_dim, bins, BatchNorm=nn.BatchNorm2d):
     super(PPM, self).__init__()
     self.features = []
     for bin in bins:
         self.features.append(
             nn.Sequential(
                 nn.AdaptiveAvgPool2d(bin),
                 nn.Conv2d(in_dim, reduction_dim, kernel_size=1,
                           bias=False), BatchNorm(reduction_dim),
                 nn.ReLU(inplace=True)))
     self.features = nn.ModuleList(self.features)
コード例 #3
0
 def __init__(self,
              channel,
              reduction=1,
              multiply=True,
              BatchNorm=nn.BatchNorm2d):
     super(SpatialFCAttentionLayer, self).__init__()
     self.fc = nn.Sequential(
         nn.Conv2d(channel,
                   channel // reduction,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias=False), BatchNorm(channel // reduction),
         nn.ReLU(inplace=True),
         nn.Conv2d(channel // reduction,
                   1,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias=False), BatchNorm(1), nn.Sigmoid())
     self.multiply = multiply
コード例 #4
0
    def __init__(self, hidden_channels, out_channels, num_layers=3,
                 dropout=0.5):
        super(GIN, self).__init__()

        self.dropout = dropout

        self.atom_encoder = AtomEncoder(hidden_channels)
        self.bond_encoder = BondEncoder(hidden_channels)

        self.convs = torch.nn.ModuleList()
        for _ in range(num_layers):
            nn = Sequential(
                Linear(hidden_channels, 2 * hidden_channels),
                BatchNorm(2 * hidden_channels),
                ReLU(),
                Linear(2 * hidden_channels, hidden_channels),
                BatchNorm(hidden_channels),
                ReLU(),
            )
            self.convs.append(GINEConv(nn, train_eps=True))

        self.lin = Linear(hidden_channels, out_channels)
コード例 #5
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers):
        super(Net, self).__init__()

        self.convs = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for i in range(num_layers):
            mlp = Sequential(
                Linear(in_channels, 2 * hidden_channels),
                BatchNorm(2 * hidden_channels),
                ReLU(),
                Linear(2 * hidden_channels, hidden_channels),
            )
            conv = GINConv(mlp, train_eps=True).jittable()

            self.convs.append(conv)
            self.batch_norms.append(BatchNorm(hidden_channels))

            in_channels = hidden_channels

        self.lin1 = Linear(hidden_channels, hidden_channels)
        self.batch_norm1 = BatchNorm(hidden_channels)
        self.lin2 = Linear(hidden_channels, out_channels)
コード例 #6
0
ファイル: pspnet.py プロジェクト: dingmyu/cityscapes_scripts
    def __init__(self,
                 layers=50,
                 bins=(1, 2, 3, 6),
                 dropout=0.1,
                 classes=2,
                 zoom_factor=8,
                 use_softmax=True,
                 use_aux=True,
                 pretrained=True,
                 syncbn=True,
                 group_size=1,
                 group=None,
                 sync_stats=False):
        super(PSPNet, self).__init__()
        assert layers in [50, 101, 152]
        assert 2048 % len(bins) == 0
        assert classes > 1
        assert zoom_factor in [1, 2, 4, 8]
        self.zoom_factor = zoom_factor
        self.use_softmax = use_softmax
        self.use_aux = use_aux

        if syncbn:
            # from lib.syncbn import SynchronizedBatchNorm2d as BatchNorm
            def BNFunc(*args, **kwargs):
                return SyncBatchNorm2d(*args,
                                       **kwargs,
                                       eps=1e-4,
                                       momentum=0.9,
                                       group_size=group_size,
                                       group=group,
                                       sync_stats=sync_stats)

            BatchNorm = BNFunc
        else:
            from torch.nn import BatchNorm2d as BatchNorm
        models.BatchNorm = BatchNorm

        if layers == 50:
            resnet = models.resnet50(pretrained=pretrained)
        elif layers == 101:
            resnet = models.resnet101(pretrained=pretrained)
        else:
            resnet = models.resnet152(pretrained=pretrained)

        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                    resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        fea_dim = 2048
        self.ppm = PPM(fea_dim, int(fea_dim / len(bins)), bins, BatchNorm)
        fea_dim *= 2
        self.cls = nn.Sequential(
            nn.Conv2d(512, 256, kernel_size=3, padding=1, bias=False),
            BatchNorm(256), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout),
            nn.Conv2d(256, classes, kernel_size=1))
        self.conv6 = nn.Sequential(
            nn.Conv2d(1280, 256, kernel_size=1, padding=0, bias=True),
            BatchNorm(256), nn.ReLU(inplace=True))
        self.conv1_1x1 = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=1, padding=0, bias=True),
            BatchNorm(256), nn.ReLU(inplace=True))
        if use_aux:
            self.aux = nn.Sequential(
                nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False),
                BatchNorm(256), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout),
                nn.Conv2d(256, classes, kernel_size=1))
コード例 #7
0
ファイル: pspnet.py プロジェクト: dingmyu/psa
    def __init__(self,
                 backbone='resnet',
                 layers=50,
                 bins=(1, 2, 3, 6),
                 dropout=0.1,
                 classes=2,
                 zoom_factor=8,
                 use_ppm=True,
                 use_softmax=True,
                 use_aux=False,
                 pretrained=True,
                 syncbn=True,
                 group_size=8,
                 group=None):
        super(PSPNet, self).__init__()
        assert layers in [50, 101, 152]
        assert 2048 % len(bins) == 0
        assert classes > 1
        assert zoom_factor in [1, 2, 4, 8]
        self.zoom_factor = zoom_factor
        self.use_ppm = use_ppm
        self.use_softmax = use_softmax
        self.use_aux = use_aux

        if backbone == 'resnet':
            import resnet as models
        elif backbone == 'ibnnet_a':
            import ibnnet_a as models
        elif backbone == 'ibnnet_b':
            import ibnnet_b as models
        else:
            raise NameError('Backbone type not defined!')

        if syncbn:
            # from lib.syncbn import SynchronizedBatchNorm2d as BatchNorm
            def BNFunc(*args, **kwargs):
                return SyncBatchNorm2d(*args,
                                       **kwargs,
                                       group_size=group_size,
                                       group=group,
                                       sync_stats=True)

            BatchNorm = BNFunc
        else:
            from torch.nn import BatchNorm2d as BatchNorm
        models.BatchNorm = BatchNorm

        if layers == 50:
            resnet = models.resnet50(pretrained=pretrained)
        elif layers == 101:
            resnet = models.resnet101(pretrained=pretrained)
        else:
            resnet = models.resnet152(pretrained=pretrained)
        if backbone == 'ibnnet_b':
            self.layer0 = nn.Sequential(resnet.conv1, resnet.INCat0,
                                        resnet.relu, resnet.maxpool)
        else:
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                        resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n and not 'convbnin.conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n and not 'convbnin.conv2' in n:
                m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        fea_dim = 2048
        #   if use_ppm:
        #      self.ppm = PPM(fea_dim, int(fea_dim/len(bins)), bins, BatchNorm)

        self.cls = nn.Sequential(
            nn.Conv2d(2048, 512, kernel_size=3, padding=1, bias=False),
            BatchNorm(512),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=dropout),
        )
        self.pool1 = nn.AdaptiveAvgPool2d(1)
        self.result = nn.Conv2d(512, 287, 1, bias=False)
        if use_aux:
            self.aux = nn.Sequential(
                nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False),
                BatchNorm(256), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout),
                nn.Conv2d(256, classes, kernel_size=1))
コード例 #8
0
 def __init__(self, in_, out, BatchNorm=nn.BatchNorm2d):
     super(ConvRelu, self).__init__()
     self.conv = conv3x3(in_, out)
     self.bn = BatchNorm(out)
     self.activation = nn.ReLU(inplace=True)
コード例 #9
0
    def __init__(self,
                 backbone='resnet',
                 layers=50,
                 bins=(1, 2, 3, 6),
                 dropout=0.1,
                 classes=2,
                 zoom_factor=8,
                 use_ppm=True,
                 use_softmax=True,
                 use_aux=True,
                 pretrained=True,
                 syncbn=True,
                 group_size=8,
                 group=None):
        super(PSPNet, self).__init__()
        assert layers in [50, 101, 152]
        assert 2048 % len(bins) == 0
        assert classes > 1
        assert zoom_factor in [1, 2, 4, 8]
        self.zoom_factor = zoom_factor
        self.use_ppm = use_ppm
        self.use_softmax = use_softmax
        self.use_aux = use_aux

        if backbone == 'resnet':
            import resnet as models
        elif backbone == 'ibnnet_a':
            import ibnnet_a as models
        elif backbone == 'ibnnet_b':
            import ibnnet_b as models
        else:
            raise NameError('Backbone type not defined!')

        if syncbn:
            # from lib.syncbn import SynchronizedBatchNorm2d as BatchNorm
            def BNFunc(*args, **kwargs):
                return SyncBatchNorm2d(*args,
                                       **kwargs,
                                       group_size=group_size,
                                       group=group,
                                       sync_stats=True)

            BatchNorm = BNFunc
        else:
            from torch.nn import BatchNorm2d as BatchNorm
        models.BatchNorm = BatchNorm

        if layers == 50:
            resnet = models.resnet50(pretrained=pretrained)
        elif layers == 101:
            resnet = models.resnet101(pretrained=pretrained)
        else:
            resnet = models.resnet152(pretrained=pretrained)
        if backbone == 'ibnnet_b':
            self.layer0 = nn.Sequential(resnet.conv1, resnet.INCat0,
                                        resnet.relu, resnet.maxpool)
        else:
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                        resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n and not 'convbnin.conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n and not 'convbnin.conv2' in n:
                m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        channel_4x = 256
        # channel attention layer and spatial attention layer.
        self.cam_4x = ChannelAttentionLayer(channel_4x,
                                            reduction=1,
                                            multiply=True)
        self.sam_4x = SpatialAttentionLayer(channel_4x,
                                            reduction=1,
                                            multiply=True)

        channel_8x = 512
        # channel attention layer and spatial attention layer.
        self.cam_8x = ChannelAttentionLayer(channel_8x,
                                            reduction=1,
                                            multiply=True)
        self.sam_8x = SpatialAttentionLayer(channel_8x,
                                            reduction=1,
                                            multiply=True)

        channel_1x = classes  # final predict
        # channel attention layer and spatial attention layer.
        self.cam_1x = ChannelAttentionLayer(channel_1x,
                                            reduction=1,
                                            multiply=True)
        self.sam_1x = SpatialFCAttentionLayer(channel_1x,
                                              reduction=1,
                                              multiply=True)

        fea_dim = 2048
        if use_ppm:
            self.ppm = PPM(fea_dim + 128, int(fea_dim / len(bins)), bins,
                           BatchNorm)
            fea_dim *= 2

        self.cls = nn.Sequential(
            nn.Conv2d(768, 256, kernel_size=3, padding=1, bias=False),
            BatchNorm(256), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout),
            nn.Conv2d(256, classes, kernel_size=1))

        self.cls_2 = nn.Sequential(
            nn.Conv2d(classes * 2, classes, kernel_size=1))

        self.conv6 = nn.Sequential(
            nn.Conv2d(256 * 8, 512, kernel_size=1, padding=0, bias=True),
            BatchNorm(512), nn.ReLU(inplace=True))
        self.conv1_1x1 = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=1, padding=0, bias=True),
            BatchNorm(256), nn.ReLU(inplace=True))
        self.conv2_1x1 = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=1, padding=0, bias=True),
            BatchNorm(256), nn.ReLU(inplace=True))
        if use_aux:
            self.aux = nn.Sequential(
                nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False),
                BatchNorm(256), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout),
                nn.Conv2d(256, classes, kernel_size=1))
            # init_weights(self.aux)
        # comment to use default initialization
        init_weights(self.ppm)