def get_conv_layer(
    spatial_dims: int, in_channels: int, out_channels: int, kernel_size: Union[Sequence[int], int] = 3
) -> nn.Module:
    padding = same_padding(kernel_size)
    mod: nn.Module = Convolution(
        spatial_dims, in_channels, out_channels, kernel_size=kernel_size, bias=False, conv_only=True, padding=padding
    )
    return mod
Esempio n. 2
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        conv_out_channels: int,
        kernel_sizes=(1, 3, 3, 3),
        dilations=(1, 2, 4, 6),
        norm_type=Norm.BATCH,
        acti_type=Act.LEAKYRELU,
    ):
        """
        Args:
            spatial_dims: number of spatial dimensions, could be 1, 2, or 3.
            in_channels: number of input channels.
            conv_out_channels: number of output channels of each atrous conv.
                The final number of output channels is conv_out_channels * len(kernel_sizes).
            kernel_sizes: a sequence of four convolutional kernel sizes.
                Defaults to (1, 3, 3, 3) for four (dilated) convolutions.
            dilations: a sequence of four convolutional dilation parameters.
                Defaults to (1, 2, 4, 6) for four (dilated) convolutions.
            norm_type: final kernel-size-one convolution normalization type.
                Defaults to batch norm.
            acti_type: final kernel-size-one convolution activation type.
                Defaults to leaky ReLU.
        """
        super().__init__()
        if len(kernel_sizes) != len(dilations):
            raise ValueError(
                "len(kernel_sizes) and len(dilations) must be the same.")
        pads = tuple(
            same_padding(k, d) for k, d in zip(kernel_sizes, dilations))

        self.convs = nn.ModuleList()
        for k, d, p in zip(kernel_sizes, dilations, pads):
            _conv = Conv[Conv.CONV, spatial_dims](
                in_channels=in_channels,
                out_channels=conv_out_channels,
                kernel_size=k,
                dilation=d,
                padding=p,
            )
            self.convs.append(_conv)

        out_channels = conv_out_channels * len(
            pads)  # final conv. output channels
        self.conv_k1 = Convolution(
            dimensions=spatial_dims,
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=1,
            act=acti_type,
            norm=norm_type,
        )
Esempio n. 3
0
def get_conv_layer(
    spatial_dims: int,
    in_channels: int,
    out_channels: int,
    kernel_size: Union[Sequence[int], int] = 3,
    evonorm: Optional[EvoNormLayer] = None,
) -> nn.Module:
    padding = same_padding(kernel_size)
    return Convolution(
        spatial_dims,
        in_channels,
        out_channels,
        kernel_size=kernel_size,
        bias=False,
        conv_only=True,
        padding=padding,
        evonorm=evonorm,
    )
Esempio n. 4
0
def get_conv_block(
    spatial_dims: int,
    in_channels: int,
    out_channels: int,
    kernel_size: Union[Sequence[int], int] = 3,
    act: Optional[Union[Tuple, str]] = "RELU",
    norm: Optional[Union[Tuple, str]] = "BATCH",
) -> nn.Module:
    padding = same_padding(kernel_size)
    return Convolution(
        spatial_dims,
        in_channels,
        out_channels,
        kernel_size=kernel_size,
        act=act,
        norm=norm,
        bias=False,
        conv_only=False,
        padding=padding,
    )
Esempio n. 5
0
def get_conv_block(
    spatial_dims: int,
    in_channels: int,
    out_channels: int,
    # act_norm: nn.Module,
    kernel_size: Union[Sequence[int], int] = 3,
    strides: int = 1,
    padding: Optional[Union[Tuple[int, ...], int]] = None,
    evonorm: Optional[EvoNormLayer] = None,
    act: Optional[Union[Tuple, str]] = "RELU",
    norm: Optional[Union[Tuple, str]] = "BATCH",
    initializer: Optional[str] = "kaiming_uniform",
) -> nn.Module:
    if padding is None:
        padding = same_padding(kernel_size)
    conv_block = Convolution(
        spatial_dims,
        in_channels,
        out_channels,
        kernel_size=kernel_size,
        strides=strides,
        evonorm=evonorm,
        act=act,
        norm=norm,
        bias=False,
        conv_only=False,
        padding=padding,
    )
    conv_type: Type[Union[nn.Conv1d, nn.Conv2d,
                          nn.Conv3d]] = Conv[Conv.CONV, spatial_dims]
    for m in conv_block.modules():
        if isinstance(m, conv_type):
            if initializer == "kaiming_uniform":
                nn.init.kaiming_normal_(torch.as_tensor(m.weight))
            elif initializer == "zeros":
                nn.init.zeros_(torch.as_tensor(m.weight))
            else:
                raise ValueError(
                    f"initializer {initializer} is not supported, "
                    "currently supporting kaiming_uniform and zeros")
    return conv_block
Esempio n. 6
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        conv_out_channels: int,
        kernel_sizes: Sequence[int] = (1, 3, 3, 3),
        dilations: Sequence[int] = (1, 2, 4, 6),
        norm_type=Norm.BATCH,
        acti_type=Act.LEAKYRELU,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions, could be 1, 2, or 3.
            in_channels: number of input channels.
            conv_out_channels: number of output channels of each atrous conv.
                The final number of output channels is conv_out_channels * len(kernel_sizes).
            kernel_sizes: a sequence of four convolutional kernel sizes.
                Defaults to (1, 3, 3, 3) for four (dilated) convolutions.
            dilations: a sequence of four convolutional dilation parameters.
                Defaults to (1, 2, 4, 6) for four (dilated) convolutions.
            norm_type: final kernel-size-one convolution normalization type.
                Defaults to batch norm.
            acti_type: final kernel-size-one convolution activation type.
                Defaults to leaky ReLU.

        Raises:
            ValueError: When ``kernel_sizes`` length differs from ``dilations``.

        See also:

            :py:class:`monai.networks.layers.Act`
            :py:class:`monai.networks.layers.Conv`
            :py:class:`monai.networks.layers.Norm`

        """
        super().__init__()
        if len(kernel_sizes) != len(dilations):
            raise ValueError(
                "kernel_sizes and dilations length must match, "
                f"got kernel_sizes={len(kernel_sizes)} dilations={len(dilations)}."
            )
        pads = tuple(
            same_padding(k, d) for k, d in zip(kernel_sizes, dilations))

        self.convs = nn.ModuleList()
        for k, d, p in zip(kernel_sizes, dilations, pads):
            _conv = Conv[Conv.CONV,
                         spatial_dims](in_channels=in_channels,
                                       out_channels=conv_out_channels,
                                       kernel_size=k,
                                       dilation=d,
                                       padding=p)
            self.convs.append(_conv)

        out_channels = conv_out_channels * len(
            pads)  # final conv. output channels
        self.conv_k1 = Convolution(
            dimensions=spatial_dims,
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=1,
            act=acti_type,
            norm=norm_type,
        )
Esempio n. 7
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        conv_out_channels: int,
        kernel_sizes: Sequence[int] = (1, 3, 3, 3),
        dilations: Sequence[int] = (1, 2, 4, 6),
        norm_type: Optional[Union[Tuple, str]] = "BATCH",
        acti_type: Optional[Union[Tuple, str]] = "LEAKYRELU",
        bias: bool = False,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions, could be 1, 2, or 3.
            in_channels: number of input channels.
            conv_out_channels: number of output channels of each atrous conv.
                The final number of output channels is conv_out_channels * len(kernel_sizes).
            kernel_sizes: a sequence of four convolutional kernel sizes.
                Defaults to (1, 3, 3, 3) for four (dilated) convolutions.
            dilations: a sequence of four convolutional dilation parameters.
                Defaults to (1, 2, 4, 6) for four (dilated) convolutions.
            norm_type: final kernel-size-one convolution normalization type.
                Defaults to batch norm.
            acti_type: final kernel-size-one convolution activation type.
                Defaults to leaky ReLU.
            bias: whether to have a bias term in convolution blocks. Defaults to False.
                According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,
                if a conv layer is directly followed by a batch norm layer, bias should be False.

        Raises:
            ValueError: When ``kernel_sizes`` length differs from ``dilations``.

        See also:

            :py:class:`monai.networks.layers.Act`
            :py:class:`monai.networks.layers.Conv`
            :py:class:`monai.networks.layers.Norm`

        """
        super().__init__()
        if len(kernel_sizes) != len(dilations):
            raise ValueError(
                "kernel_sizes and dilations length must match, "
                f"got kernel_sizes={len(kernel_sizes)} dilations={len(dilations)}."
            )
        pads = tuple(
            same_padding(k, d) for k, d in zip(kernel_sizes, dilations))

        self.convs = nn.ModuleList()
        for k, d, p in zip(kernel_sizes, dilations, pads):
            _conv = Conv[Conv.CONV,
                         spatial_dims](in_channels=in_channels,
                                       out_channels=conv_out_channels,
                                       kernel_size=k,
                                       dilation=d,
                                       padding=p)
            self.convs.append(_conv)

        out_channels = conv_out_channels * len(
            pads)  # final conv. output channels
        self.conv_k1 = Convolution(
            spatial_dims=spatial_dims,
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=1,
            act=acti_type,
            norm=norm_type,
            bias=bias,
        )