def __init__(self,
                 in_channels,
                 num_classes,
                 kernel_size,
                 n_pool=4,
                 base_channels=64,
                 output_activation=None):
        """__init__ Build a vanilla Unet model

        https://arxiv.org/abs/1505.04597
        The model however uses instance normalization before each activation
        and uses Average pooling over max pooling.

        Args:
            in_channels ([type]): [description]
            num_classes ([type]): [description]
            kernel_size ([type]): [description]
            n_pool (int, optional): [description]. Defaults to 4.
            base_channels (int, optional): [description]. Defaults to 64.
            output_activation ([type], optional): [description]. Defaults to None.
        """
        layers = [1] * n_pool
        encoder = Models.VGGNet(in_channels,
                                base_channels,
                                kernel_size,
                                layers=layers)
        super(VGGLinkNet, self).__init__(encoder, num_classes, kernel_size)
        if output_activation is not None:
            self.output_activation = output_activation
    def __init__(self,
                 in_channels,
                 n_classes,
                 kernel_size,
                 layers=[3, 3, 5, 2],
                 base_channels=64,
                 output_activation=None,
                 **kwargs):
        super(PSPNetBuilder, self).__init__()
        self.encoder = Models.ResNet(in_channels,
                                     base_channels,
                                     kernel_size,
                                     layers=layers,
                                     **kwargs)

        encoder_channels = self.encoder.out_channels

        self.connector = self.encoder.connector_constructor(self.encoder)

        self.bottleneck = nn.PyramidPool(encoder_channels,
                                         encoder_channels * 2)

        self.upsample = torch.nn.UpsamplingBilinear2d(scale_factor=2)

        self.upsample_layers = []

        n_upsamples = len(layers) - 1

        in_channels = self.bottleneck.out_channels + self.encoder.out_channels

        for i in range(n_upsamples):
            self.upsample_layers.append(
                nn.UpsampleConv2d(in_channels, in_channels // 2, kernel_size))
            setattr(self, 'upsample_%d' % i, self.upsample_layers[-1])

            in_channels = in_channels // 2

        self.pw_conv = torch.nn.Conv2d(self.upsample_layers[-1].out_channels,
                                       n_classes,
                                       1,
                                       bias=False)

        self.output_activation = output_activation

        self.out_channels = n_classes
コード例 #3
0
    def __init__(self,
                 in_channels,
                 num_classes,
                 kernel_size=3,
                 layers=[3, 3, 5, 2],
                 base_channels=64,
                 output_activation=None,
                 **kwargs):
        """ Build a Resnet Unet model

        https://towardsdatascience.com/u-nets-with-resnet-encoders-and-cross-connections-d8ba94125a2c
        The model however uses instance normalization before each activation
        and uses Average pooling over max pooling.

        Args:
            in_channels ([int])
            num_classes ([int]): the number of output channels
            kernel_size ([int])]
        Kwargs:
            base_channels ([int]): The total number
                of channels the networks is initialized with
            output_activation([_nn.Layer,_torch.nn.functional]):
                activation function of form f(x)
        """
        encoder_layers = layers[:-1]
        encoder = Models.ResNet(in_channels,
                                base_channels,
                                kernel_size,
                                layers=encoder_layers)
        n_bottleneck = layers[-1]

        class ResBottle(nn.RepeatedLayers):
            layer_constructor = encoder.layer_constructor
            _layer_dict = dict(stride=[[1, 1]] * n_bottleneck, )

        super(ResnetFPN, self).__init__(encoder,
                                        num_classes,
                                        kernel_size,
                                        bottleneck=ResBottle,
                                        **kwargs)
        if output_activation is not None:
            self.output_activation = output_activation
コード例 #4
0
        https://arxiv.org/abs/1505.04597
        The model however uses instance normalization before each activation
        and uses Average pooling over max pooling.

        Args:
            in_channels ([type]): [description]
            num_classes ([type]): [description]
            kernel_size ([type]): [description]
            n_pool (int, optional): [description]. Defaults to 4.
            base_channels (int, optional): [description]. Defaults to 64.
            output_activation ([type], optional): [description]. Defaults to
                None.
        """
        layers = [layer_repeats] * n_pool
        encoder = Models.CSESEVGGNet(in_channels,
                                     base_channels,
                                     kernel_size,
                                     layers=layers)
        super(CSESEVGGFPN, self).__init__(encoder, num_classes, kernel_size,
                                          **kwargs)
        if output_activation is not None:
            self.output_activation = output_activation


if __name__ == "__main__":
    image = torch.zeros(1, 3, 256, 256)
    encoder = Models.ResNet(image, 1, 3, layers=[2, 3, 5, 2])
    model = ResnetFPN(3, 1, 3)
    output = model(image)
    print('done')