Exemplo n.º 1
0
    def __init__(self,
                 num_classes: int = 19,
                 backbone_indices: Tuple[int] = (-1, ),
                 channels: int = None,
                 align_corners: bool = False,
                 pretrained: str = None):
        super(FCN, self).__init__()

        self.backbone = HRNet_W48()
        backbone_channels = [
            self.backbone.feat_channels[i] for i in backbone_indices
        ]

        self.head = FCNHead(num_classes, backbone_indices, backbone_channels,
                            channels)

        self.align_corners = align_corners
        self.transforms = T.Compose([T.Normalize()])

        if pretrained is not None:
            model_dict = paddle.load(pretrained)
            self.set_dict(model_dict)
            print("load custom parameters success")

        else:
            checkpoint = os.path.join(self.directory, 'model.pdparams')
            model_dict = paddle.load(checkpoint)
            self.set_dict(model_dict)
            print("load pretrained parameters success")
Exemplo n.º 2
0
    def __init__(self,
                 num_classes: int = 21,
                 backbone_indices: Tuple[int] = (0, 3),
                 aspp_ratios: Tuple[int] = (1, 12, 24, 36),
                 aspp_out_channels: int = 256,
                 align_corners=False,
                 pretrained: str = None):
        super(DeepLabV3PResnet50, self).__init__()
        self.backbone = ResNet50_vd()
        backbone_channels = [
            self.backbone.feat_channels[i] for i in backbone_indices
        ]
        self.head = DeepLabV3PHead(num_classes, backbone_indices,
                                   backbone_channels, aspp_ratios,
                                   aspp_out_channels, align_corners)
        self.align_corners = align_corners
        self.transforms = T.Compose([T.Normalize()])

        if pretrained is not None:
            model_dict = paddle.load(pretrained)
            self.set_dict(model_dict)
            print("load custom parameters success")

        else:
            checkpoint = os.path.join(self.directory,
                                      'deeplabv3p_model.pdparams')
            model_dict = paddle.load(checkpoint)
            self.set_dict(model_dict)
            print("load pretrained parameters success")
Exemplo n.º 3
0
    def __init__(self,
                 num_classes: int = 19,
                 align_corners: bool = False,
                 use_deconv: bool = False,
                 pretrained: str = None):
        super(UNet, self).__init__()

        self.encode = Encoder()
        self.decode = Decoder(align_corners, use_deconv=use_deconv)
        self.cls = self.conv = nn.Conv2D(in_channels=64,
                                         out_channels=num_classes,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1)

        self.transforms = T.Compose([T.Normalize()])

        if pretrained is not None:
            model_dict = paddle.load(pretrained)
            self.set_dict(model_dict)
            print("load custom parameters success")

        else:
            checkpoint = os.path.join(self.directory, 'model.pdparams')
            model_dict = paddle.load(checkpoint)
            self.set_dict(model_dict)
            print("load pretrained parameters success")
Exemplo n.º 4
0
    def __init__(self,
                 num_classes: int = 19,
                 backbone_indices: List[int] = [0],
                 ocr_mid_channels: int = 512,
                 ocr_key_channels: int = 256,
                 align_corners: bool = False,
                 pretrained: str = None):
        super(OCRNetHRNetW18, self).__init__()
        self.backbone = HRNet_W18()
        self.backbone_indices = backbone_indices
        in_channels = [
            self.backbone.feat_channels[i] for i in backbone_indices
        ]
        self.head = OCRHead(num_classes=num_classes,
                            in_channels=in_channels,
                            ocr_mid_channels=ocr_mid_channels,
                            ocr_key_channels=ocr_key_channels)
        self.align_corners = align_corners
        self.transforms = T.Compose([T.Normalize()])

        if pretrained is not None:
            model_dict = paddle.load(pretrained)
            self.set_dict(model_dict)
            print("load custom parameters success")

        else:
            checkpoint = os.path.join(self.directory, 'model.pdparams')
            model_dict = paddle.load(checkpoint)
            self.set_dict(model_dict)
            print("load pretrained parameters success")
Exemplo n.º 5
0
    def __init__(self,
                 num_classes: int = 19,
                 align_corners: bool = False,
                 pretrained: str = None):

        super(FastSCNN, self).__init__()

        self.learning_to_downsample = LearningToDownsample(32, 48, 64)
        self.global_feature_extractor = GlobalFeatureExtractor(
            in_channels=64,
            block_channels=[64, 96, 128],
            out_channels=128,
            expansion=6,
            num_blocks=[3, 3, 3],
            align_corners=True)
        self.feature_fusion = FeatureFusionModule(64, 128, 128, align_corners)
        self.classifier = Classifier(128, num_classes)
        self.align_corners = align_corners
        self.transforms = T.Compose([T.Normalize()])

        if pretrained is not None:
            model_dict = paddle.load(pretrained)
            self.set_dict(model_dict)
            print("load custom parameters success")

        else:
            checkpoint = os.path.join(self.directory,
                                      'fastscnn_model.pdparams')
            model_dict = paddle.load(checkpoint)
            self.set_dict(model_dict)
            print("load pretrained parameters success")
Exemplo n.º 6
0
    def __init__(self,
                 num_classes: int = 19,
                 lambd: float = 0.25,
                 align_corners: bool = False,
                 pretrained: str = None):
        super(BiSeNetV2, self).__init__()

        C1, C2, C3 = 64, 64, 128
        db_channels = (C1, C2, C3)
        C1, C3, C4, C5 = int(C1 * lambd), int(C3 * lambd), 64, 128
        sb_channels = (C1, C3, C4, C5)
        mid_channels = 128

        self.db = DetailBranch(db_channels)
        self.sb = SemanticBranch(sb_channels)

        self.bga = BGA(mid_channels, align_corners)
        self.aux_head1 = SegHead(C1, C1, num_classes)
        self.aux_head2 = SegHead(C3, C3, num_classes)
        self.aux_head3 = SegHead(C4, C4, num_classes)
        self.aux_head4 = SegHead(C5, C5, num_classes)
        self.head = SegHead(mid_channels, mid_channels, num_classes)

        self.align_corners = align_corners
        self.transforms = T.Compose([T.Normalize()])

        if pretrained is not None:
            model_dict = paddle.load(pretrained)
            self.set_dict(model_dict)
            print("load custom parameters success")

        else:
            checkpoint = os.path.join(self.directory, 'bisenet_model.pdparams')
            model_dict = paddle.load(checkpoint)
            self.set_dict(model_dict)
            print("load pretrained parameters success")
Exemplo n.º 7
0
    def __init__(self,
                 num_classes: int = 19,
                 stem_channels: Tuple[int] = (16, 24, 32, 48),
                 ch_list: Tuple[int] = (64, 96, 160, 224, 320),
                 grmul: float = 1.7,
                 gr: Tuple[int] = (10, 16, 18, 24, 32),
                 n_layers: Tuple[int] = (4, 4, 8, 8, 8),
                 align_corners: bool = False,
                 pretrained: str = None):

        super(HarDNet, self).__init__()
        self.align_corners = align_corners
        self.pretrained = pretrained
        encoder_blks_num = len(n_layers)
        decoder_blks_num = encoder_blks_num - 1
        encoder_in_channels = stem_channels[3]

        self.stem = nn.Sequential(
            layers.ConvBNReLU(3,
                              stem_channels[0],
                              kernel_size=3,
                              bias_attr=False),
            layers.ConvBNReLU(stem_channels[0],
                              stem_channels[1],
                              kernel_size=3,
                              bias_attr=False),
            layers.ConvBNReLU(stem_channels[1],
                              stem_channels[2],
                              kernel_size=3,
                              stride=2,
                              bias_attr=False),
            layers.ConvBNReLU(stem_channels[2],
                              stem_channels[3],
                              kernel_size=3,
                              bias_attr=False))

        self.encoder = Encoder(encoder_blks_num, encoder_in_channels, ch_list,
                               gr, grmul, n_layers)

        skip_connection_channels = self.encoder.get_skip_channels()
        decoder_in_channels = self.encoder.get_out_channels()

        self.decoder = Decoder(decoder_blks_num, decoder_in_channels,
                               skip_connection_channels, gr, grmul, n_layers,
                               align_corners)

        self.cls_head = nn.Conv2D(in_channels=self.decoder.get_out_channels(),
                                  out_channels=num_classes,
                                  kernel_size=1)

        self.transforms = T.Compose([T.Normalize()])

        if pretrained is not None:
            model_dict = paddle.load(pretrained)
            self.set_dict(model_dict)
            print("load custom parameters success")

        else:
            checkpoint = os.path.join(self.directory, 'model.pdparams')
            model_dict = paddle.load(checkpoint)
            self.set_dict(model_dict)
            print("load pretrained parameters success")