Esempio n. 1
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        kernel_size: int,
        norm_type: Optional[Union[Normalisation, str]] = None,
        acti_type: Optional[Union[Activation, str]] = None,
        dropout_prob: Optional[float] = None,
    ):

        super(ConvNormActi, self).__init__()

        layers = nn.ModuleList()

        conv_type = Conv[Conv.CONV, spatial_dims]
        padding_size = same_padding(kernel_size)
        conv = conv_type(in_channels,
                         out_channels,
                         kernel_size,
                         padding=padding_size)
        layers.append(conv)

        if norm_type is not None:
            norm_type = Normalisation(norm_type)
            layers.append(
                SUPPORTED_NORM[norm_type](spatial_dims)(out_channels))
        if acti_type is not None:
            acti_type = Activation(acti_type)
            layers.append(SUPPORTED_ACTI[acti_type](inplace=True))
        if dropout_prob is not None:
            dropout_type = Dropout[Dropout.DROPOUT, spatial_dims]
            layers.append(dropout_type(p=dropout_prob))
        self.layers = nn.Sequential(*layers)
Esempio n. 2
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        kernels: Sequence[int] = (3, 3),
        dilation=1,
        norm_type: Union[Normalisation, str] = Normalisation.INSTANCE,
        acti_type: Union[Activation, str] = Activation.RELU,
        channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,
    ) -> None:
        """
        Args:
            kernels: each integer k in `kernels` corresponds to a convolution layer with kernel size k.
            norm_type: {``"batch"``, ``"instance"``}
                Feature normalisation with batchnorm or instancenorm. Defaults to ``"instance"``.
            acti_type: {``"relu"``, ``"prelu"``, ``"relu6"``}
                Non-linear activation using ReLU or PReLU. Defaults to ``"relu"``.
            channel_matching: {``"pad"``, ``"project"``}
                Specifies handling residual branch and conv branch channel mismatches. Defaults to ``"pad"``.

                - ``"pad"``: with zero padding.
                - ``"project"``: with a trainable conv with kernel size.

        Raises:
            ValueError: channel matching must be pad or project, got {channel_matching}.
            ValueError: in_channels > out_channels is incompatible with `channel_matching=pad`.

        """
        super(HighResBlock, self).__init__()
        conv_type = Conv[Conv.CONV, spatial_dims]
        norm_type = Normalisation(norm_type)
        acti_type = Activation(acti_type)

        self.project, self.pad = None, None
        if in_channels != out_channels:
            channel_matching = ChannelMatching(channel_matching)
            if channel_matching == ChannelMatching.PROJECT:
                self.project = conv_type(in_channels, out_channels, kernel_size=1)
            if channel_matching == ChannelMatching.PAD:
                if in_channels > out_channels:
                    raise ValueError("in_channels > out_channels is incompatible with `channel_matching=pad`.")
                pad_1 = (out_channels - in_channels) // 2
                pad_2 = out_channels - in_channels - pad_1
                pad = [0, 0] * spatial_dims + [pad_1, pad_2] + [0, 0]
                self.pad = lambda input: F.pad(input, pad)

        layers = nn.ModuleList()
        _in_chns, _out_chns = in_channels, out_channels
        for kernel_size in kernels:
            layers.append(SUPPORTED_NORM[norm_type](spatial_dims)(_in_chns))
            layers.append(SUPPORTED_ACTI[acti_type](inplace=True))
            layers.append(
                conv_type(
                    _in_chns, _out_chns, kernel_size, padding=same_padding(kernel_size, dilation), dilation=dilation
                )
            )
            _in_chns = _out_chns
        self.layers = nn.Sequential(*layers)
Esempio n. 3
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        kernel_size: int,
        norm_type: Optional[Union[Normalisation, str]] = None,
        acti_type: Optional[Union[Activation, str]] = None,
        dropout_prob: Optional[float] = None,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions of the input image.
            in_channels: number of input channels.
            out_channels: number of output channels.
            kernel_size: size of the convolving kernel.
            norm_type: {``"batch"``, ``"instance"``}
                Feature normalisation with batchnorm or instancenorm. Defaults to ``"batch"``.
            acti_type: {``"relu"``, ``"prelu"``, ``"relu6"``}
                Non-linear activation using ReLU or PReLU. Defaults to ``"relu"``.
            dropout_prob: probability of the feature map to be zeroed
                (only applies to the penultimate conv layer).
        """

        super(ConvNormActi, self).__init__()

        layers = nn.ModuleList()

        conv_type = Conv[Conv.CONV, spatial_dims]
        padding_size = same_padding(kernel_size)
        conv = conv_type(in_channels,
                         out_channels,
                         kernel_size,
                         padding=padding_size)
        layers.append(conv)

        if norm_type is not None:
            norm_type = Normalisation(norm_type)
            layers.append(
                SUPPORTED_NORM[norm_type](spatial_dims)(out_channels))
        if acti_type is not None:
            acti_type = Activation(acti_type)
            layers.append(SUPPORTED_ACTI[acti_type](inplace=True))
        if dropout_prob is not None:
            dropout_type = Dropout[Dropout.DROPOUT, spatial_dims]
            layers.append(dropout_type(p=dropout_prob))
        self.layers = nn.Sequential(*layers)