def __init__(self, class_num, bn_momentum=0.01):
     super(DeepLabV3, self).__init__()
     self.Resnet101 = resnet101.get_resnet101(dilation=[1, 1, 1, 2],
                                              bn_momentum=bn_momentum,
                                              is_fpn=False)
     self.ASPP = ASPP(2048, 256, [6, 12, 18], norm_act=nn.BatchNorm2d)
     self.classify = nn.Conv2d(256, class_num, 1, bias=True)
예제 #2
0
 def __init__(self, class_num, bn_momentum=0.01):
     super(FastFCN, self).__init__()
     self.Resnet101 = resnet101.get_resnet101(dilation=[1, 1, 1, 2],
                                              bn_momentum=bn_momentum,
                                              is_fpn=True)
     self.jpu = JPU([512, 1024, 2048], width=512, norm_layer=nn.BatchNorm2d)
     self.head = Head(class_num, norm_layer=nn.BatchNorm2d)
 def __init__(self, class_num, bn_momentum=0.01):
     super(DenseASPP, self).__init__()
     self.Resnet101 = resnet101.get_resnet101(dilation=[1, 1, 1, 2],
                                              bn_momentum=bn_momentum,
                                              is_fpn=False)
     self.head = _DenseASPPHead(2048,
                                class_num=class_num,
                                norm_layer=nn.BatchNorm2d)
 def __init__(self, output_stride, class_num, pretrained, bn_momentum=0.1, freeze_bn=False, drop_out=True, model_path=''):
     super(DeepLabV3plus, self).__init__()
     self.Resnet101 = resnet101.get_resnet101(dilation=[1, 1, 1, 2], bn_momentum=bn_momentum, is_fpn=False)
     self.encoder = Encoder(bn_momentum, output_stride, drop_out)
     self.decoder = Decoder(class_num, bn_momentum, drop_out)
     if freeze_bn:
         self.freeze_bn()
         print("freeze bacth normalization successfully!")
    def __init__(self, output_stride, class_num, pretrained, bn_momentum=0.01, freeze_bn=False, model_path=''):
        super(DeepLabV3, self).__init__()
        self.Resnet101 = resnet101.get_resnet101(num_classes=0, dilation=[1, 1, 1, 2], bn_momentum=bn_momentum, is_fpn=False)
        self.ASPP = AsppModule(out_channels=256, bn_momentum=bn_momentum, output_stride=output_stride)
        self.classify = nn.Conv2d(256, class_num, 1, bias=True)

        if freeze_bn:
            self.freeze_bn()
            print("freeze bacth normalization successfully!")
예제 #6
0
 def __init__(self, class_num, bn_momentum=0.01):
     super(PSPNet, self).__init__()
     self.Resnet101 = resnet101.get_resnet101(dilation=[1, 1, 1, 2],
                                              bn_momentum=bn_momentum,
                                              is_fpn=False)
     self.psp_layer = PyramidPooling('psp',
                                     class_num,
                                     2048,
                                     norm_layer=nn.BatchNorm2d)
 def __init__(self, class_num, bn_momentum=0.01):
     super(DeepLabV3plus, self).__init__()
     self.Resnet101 = resnet101.get_resnet101(dilation=[1, 1, 1, 2],
                                              bn_momentum=bn_momentum,
                                              is_fpn=True)
     self.head = Head(class_num, norm_layer=nn.BatchNorm2d)
    def __init__(self,
                 input_size,
                 num_classes,
                 bn_momentum=0.0003,
                 features=256,
                 pretained=False,
                 model_path=''):
        super(RDF, self).__init__()
        self.Resnet101rgb = get_resnet101(bn_momentum=bn_momentum)
        self.Resnet101hha = get_resnet101(bn_momentum=bn_momentum)

        # This is the four stages of each resnet.
        self.rgblayer1 = nn.Sequential(
            self.Resnet101rgb.conv1, self.Resnet101rgb.bn1,
            self.Resnet101rgb.relu1, self.Resnet101rgb.conv2,
            self.Resnet101rgb.bn2, self.Resnet101rgb.relu2,
            self.Resnet101rgb.conv3, self.Resnet101rgb.bn3,
            self.Resnet101rgb.relu3, self.Resnet101rgb.maxpool,
            self.Resnet101rgb.layer1)
        self.rgblayer2 = self.Resnet101rgb.layer2
        self.rgblayer3 = self.Resnet101rgb.layer3
        self.rgblayer4 = self.Resnet101rgb.layer4

        self.hhalayer1 = nn.Sequential(
            self.Resnet101hha.conv1, self.Resnet101hha.bn1,
            self.Resnet101hha.relu1, self.Resnet101hha.conv2,
            self.Resnet101hha.bn2, self.Resnet101hha.relu2,
            self.Resnet101hha.conv3, self.Resnet101hha.bn3,
            self.Resnet101hha.relu3, self.Resnet101hha.maxpool,
            self.Resnet101hha.layer1)
        self.hhalayer2 = self.Resnet101hha.layer2
        self.hhalayer3 = self.Resnet101hha.layer3
        self.hhalayer4 = self.Resnet101hha.layer4

        # MMF Block
        self.mmf1 = MMFBlock(256)
        self.mmf2 = MMFBlock(512)
        self.mmf3 = MMFBlock(1024)
        self.mmf4 = MMFBlock(2048)

        # modify the feature maps from each stage of RenNet, modify their channels
        self.layer1_rn = nn.Conv2d(256,
                                   features,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False)
        self.layer2_rn = nn.Conv2d(512,
                                   features,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False)
        self.layer3_rn = nn.Conv2d(1024,
                                   features,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False)
        self.layer4_rn = nn.Conv2d(
            2048, 2 * features, kernel_size=3, stride=1, padding=1, bias=False
        )  # here, 2*fetures means we use two same stage-4 features as input

        self.refinenet4 = RefineNetBlock(
            2 * features, (2 * features, math.ceil(input_size // 32)))
        self.refinenet3 = RefineNetBlock(features,
                                         (2 * features, input_size // 32),
                                         (features, input_size // 16))
        self.refinenet2 = RefineNetBlock(features,
                                         (features, input_size // 16),
                                         (features, input_size // 8))
        self.refinenet1 = RefineNetBlock(features, (features, input_size // 8),
                                         (features, input_size // 4))

        self.output_conv = nn.Sequential(
            ResidualConvUnit(features), ResidualConvUnit(features),
            nn.Conv2d(features,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
def get_refinenet(input_size, num_classes, features=256, bn_momentum=0.01, pretrained=True):
    resnet101 = get_resnet101(num_classes=num_classes, bn_momentum=bn_momentum)      # new ResNet proposed in PSPNet
    return RefineNet4Cascade(input_size, num_classes=num_classes, resnet_factory=resnet101,
                             features=features, bn_momentum=bn_momentum, pretrained=pretrained)
    def __init__(self, input_channel, input_size,  refinenet_block, num_classes=1, features=256, resnet_factory=models.resnet101, bn_momentum = 0.01, pretrained=True, freeze_resnet=False):
        """Multi-path 4-Cascaded RefineNet for image segmentation

        Args:
            input_shape ((int, int)): (channel, size) assumes input has
                equal height and width
            refinenet_block (block): RefineNet Block
            num_classes (int, optional): number of classes
            features (int, optional): number of features in refinenet
            resnet_factory (func, optional): A Resnet model from torchvision.
                Default: models.resnet101
            pretrained (bool, optional): Use pretrained version of resnet
                Default: True
            freeze_resnet (bool, optional): Freeze resnet model
                Default: True

        Raises:
            ValueError: size of input_shape not divisible by 32
        """
        super().__init__()

        input_channel = input_channel
        input_size = input_size

        self.Resnet101 = get_resnet101(num_classes=0, bn_momentum=bn_momentum)

        self.layer1 = nn.Sequential(self.Resnet101.conv1, self.Resnet101.bn1, self.Resnet101.relu1,
                                    self.Resnet101.conv2, self.Resnet101.bn2, self.Resnet101.relu2,
                                    self.Resnet101.conv3, self.Resnet101.bn3, self.Resnet101.relu3,
                                    self.Resnet101.maxpool, self.Resnet101.layer1)

        self.layer2 = self.Resnet101.layer2
        self.layer3 = self.Resnet101.layer3
        self.layer4 = self.Resnet101.layer4

        # freeze the resnet parameters, default is false
        if freeze_resnet:
            layers = [self.layer1, self.layer2, self.layer3, self.layer4]
            for layer in layers:
                for param in layer.parameters():
                    param.requires_grad = False

        # modify the feature maps from each stage of RenNet, modify their channels
        self.layer1_rn = nn.Conv2d(
            256, features, kernel_size=3, stride=1, padding=1, bias=False)
        self.layer2_rn = nn.Conv2d(
            512, features, kernel_size=3, stride=1, padding=1, bias=False)
        self.layer3_rn = nn.Conv2d(
            1024, features, kernel_size=3, stride=1, padding=1, bias=False)
        self.layer4_rn = nn.Conv2d(
            2048, 2 * features, kernel_size=3, stride=1, padding=1, bias=False)     # here, 2*fetures means we use two same stage-4 features as input

        self.refinenet4 = RefineNetBlock(2 * features,
                                         (2 * features, math.ceil(input_size // 32)))
        self.refinenet3 = RefineNetBlock(features,
                                         (2 * features, input_size // 32),
                                         (features, input_size // 16))
        self.refinenet2 = RefineNetBlock(features,
                                         (features, input_size // 16),
                                         (features, input_size // 8))
        self.refinenet1 = RefineNetBlock(features, (features, input_size // 8),
                                         (features, input_size // 4))

        self.output_conv = nn.Sequential(
            ResidualConvUnit(features), ResidualConvUnit(features),
            nn.Conv2d(
                features,
                num_classes,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=True))