Example #1
0
    def _get_encode_layer(self, in_channels: int, out_channels: int,
                          strides: int, is_last: bool) -> nn.Module:

        if self.num_res_units > 0:
            return ResidualUnit(
                dimensions=self.dimensions,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                kernel_size=self.kernel_size,
                subunits=self.num_res_units,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                last_conv_only=is_last,
            )
        return Convolution(
            dimensions=self.dimensions,
            in_channels=in_channels,
            out_channels=out_channels,
            strides=strides,
            kernel_size=self.kernel_size,
            act=self.act,
            norm=self.norm,
            dropout=self.dropout,
            conv_only=is_last,
        )
Example #2
0
def get_conv_layer(
    spatial_dims: int,
    in_channels: int,
    out_channels: int,
    kernel_size: Union[Sequence[int], int] = 3,
) -> nn.Module:
    padding = same_padding(kernel_size)
    return Convolution(
        spatial_dims,
        in_channels,
        out_channels,
        kernel_size=kernel_size,
        bias=False,
        conv_only=True,
        padding=padding,
    )
Example #3
0
def get_deconv_block(
    spatial_dims: int,
    in_channels: int,
    out_channels: int,
) -> nn.Module:
    return Convolution(
        dimensions=spatial_dims,
        in_channels=in_channels,
        out_channels=out_channels,
        strides=2,
        act="RELU",
        norm="BATCH",
        bias=False,
        is_transposed=True,
        padding=1,
        output_padding=1,
    )
Example #4
0
    def _get_intermediate_module(
            self, in_channels: int,
            num_inter_units: int) -> Tuple[nn.Module, int]:
        # Define some types
        intermediate: nn.Module
        unit: nn.Module

        intermediate = nn.Identity()
        layer_channels = in_channels

        if self.inter_channels:
            intermediate = nn.Sequential()

            for i, (dc, di) in enumerate(
                    zip(self.inter_channels, self.inter_dilations)):
                if self.num_inter_units > 0:
                    unit = ResidualUnit(
                        dimensions=self.dimensions,
                        in_channels=layer_channels,
                        out_channels=dc,
                        strides=1,
                        kernel_size=self.kernel_size,
                        subunits=self.num_inter_units,
                        act=self.act,
                        norm=self.norm,
                        dropout=self.dropout,
                        dilation=di,
                    )
                else:
                    unit = Convolution(
                        dimensions=self.dimensions,
                        in_channels=layer_channels,
                        out_channels=dc,
                        strides=1,
                        kernel_size=self.kernel_size,
                        act=self.act,
                        norm=self.norm,
                        dropout=self.dropout,
                        dilation=di,
                    )

                intermediate.add_module("inter_%i" % i, unit)
                layer_channels = dc

        return intermediate, layer_channels
Example #5
0
def get_conv_block(
    spatial_dims: int,
    in_channels: int,
    out_channels: int,
    kernel_size: Union[Sequence[int], int] = 3,
    act: Optional[Union[Tuple, str]] = "RELU",
    norm: Optional[Union[Tuple, str]] = "BATCH",
) -> nn.Module:
    padding = same_padding(kernel_size)
    return Convolution(
        spatial_dims,
        in_channels,
        out_channels,
        kernel_size=kernel_size,
        act=act,
        norm=norm,
        bias=False,
        conv_only=False,
        padding=padding,
    )
Example #6
0
    def _get_layer(self, in_channels: int, out_channels: int, strides: int,
                   is_last: bool) -> Union[ResidualUnit, Convolution]:
        """
        Returns a layer accepting inputs with `in_channels` number of channels and producing outputs of `out_channels`
        number of channels. The `strides` indicates downsampling factor, ie. convolutional stride. If `is_last`
        is True this is the final layer and is not expected to include activation and normalization layers.
        """

        layer: Union[ResidualUnit, Convolution]

        if self.num_res_units > 0:
            layer = ResidualUnit(
                subunits=self.num_res_units,
                last_conv_only=is_last,
                dimensions=self.dimensions,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                kernel_size=self.kernel_size,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                bias=self.bias,
            )
        else:
            layer = Convolution(
                conv_only=is_last,
                dimensions=self.dimensions,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                kernel_size=self.kernel_size,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                bias=self.bias,
            )

        return layer
Example #7
0
    def _get_decode_layer(self, in_channels: int, out_channels: int,
                          strides: int, is_last: bool) -> nn.Sequential:

        decode = nn.Sequential()

        conv = Convolution(
            dimensions=self.dimensions,
            in_channels=in_channels,
            out_channels=out_channels,
            strides=strides,
            kernel_size=self.up_kernel_size,
            act=self.act,
            norm=self.norm,
            dropout=self.dropout,
            conv_only=is_last and self.num_res_units == 0,
            is_transposed=True,
        )

        decode.add_module("conv", conv)

        if self.num_res_units > 0:
            ru = ResidualUnit(
                dimensions=self.dimensions,
                in_channels=out_channels,
                out_channels=out_channels,
                strides=1,
                kernel_size=self.kernel_size,
                subunits=1,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                last_conv_only=is_last,
            )

            decode.add_module("resunit", ru)

        return decode
Example #8
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        kernels: Sequence[int] = (3, 3),
        dilation: Union[Sequence[int], int] = 1,
        norm_type: Union[Tuple, str] = ("batch", {
            "affine": True
        }),
        acti_type: Union[Tuple, str] = ("relu", {
            "inplace": True
        }),
        channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions of the input image.
            in_channels: number of input channels.
            out_channels: number of output channels.
            kernels: each integer k in `kernels` corresponds to a convolution layer with kernel size k.
            dilation: spacing between kernel elements.
            norm_type: feature normalization type and arguments.
                Defaults to ``("batch", {"affine": True})``.
            acti_type: {``"relu"``, ``"prelu"``, ``"relu6"``}
                Non-linear activation using ReLU or PReLU. Defaults to ``"relu"``.
            channel_matching: {``"pad"``, ``"project"``}
                Specifies handling residual branch and conv branch channel mismatches. Defaults to ``"pad"``.

                - ``"pad"``: with zero padding.
                - ``"project"``: with a trainable conv with kernel size one.

        Raises:
            ValueError: When ``channel_matching=pad`` and ``in_channels > out_channels``. Incompatible values.

        """
        super(HighResBlock, self).__init__()
        self.chn_pad = ChannelPad(spatial_dims=spatial_dims,
                                  in_channels=in_channels,
                                  out_channels=out_channels,
                                  mode=channel_matching)

        layers = nn.ModuleList()
        _in_chns, _out_chns = in_channels, out_channels

        for kernel_size in kernels:
            layers.append(
                ADN(ordering="NA",
                    in_channels=_in_chns,
                    act=acti_type,
                    norm=norm_type,
                    norm_dim=spatial_dims))
            layers.append(
                Convolution(
                    dimensions=spatial_dims,
                    in_channels=_in_chns,
                    out_channels=_out_chns,
                    kernel_size=kernel_size,
                    dilation=dilation,
                ))
            _in_chns = _out_chns

        self.layers = nn.Sequential(*layers)
Example #9
0
    def __init__(
        self,
        spatial_dims: int = 3,
        in_channels: int = 1,
        out_channels: int = 1,
        norm_type: Union[str, tuple] = ("batch", {
            "affine": True
        }),
        acti_type: Union[str, tuple] = ("relu", {
            "inplace": True
        }),
        dropout_prob: Optional[Union[Tuple, str, float]] = 0.0,
        layer_params: Sequence[Dict] = DEFAULT_LAYER_PARAMS_3D,
        channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,
    ) -> None:

        super(HighResNet, self).__init__()
        blocks = nn.ModuleList()

        # initial conv layer
        params = layer_params[0]
        _in_chns, _out_chns = in_channels, params["n_features"]
        blocks.append(
            Convolution(
                dimensions=spatial_dims,
                in_channels=_in_chns,
                out_channels=_out_chns,
                kernel_size=params["kernel_size"],
                adn_ordering="NA",
                act=acti_type,
                norm=norm_type,
            ))

        # residual blocks
        for (idx, params) in enumerate(
                layer_params[1:-2]
        ):  # res blocks except the 1st and last two conv layers.
            _in_chns, _out_chns = _out_chns, params["n_features"]
            _dilation = 2**idx
            for _ in range(params["repeat"]):
                blocks.append(
                    HighResBlock(
                        spatial_dims=spatial_dims,
                        in_channels=_in_chns,
                        out_channels=_out_chns,
                        kernels=params["kernels"],
                        dilation=_dilation,
                        norm_type=norm_type,
                        acti_type=acti_type,
                        channel_matching=channel_matching,
                    ))
                _in_chns = _out_chns

        # final conv layers
        params = layer_params[-2]
        _in_chns, _out_chns = _out_chns, params["n_features"]
        blocks.append(
            Convolution(
                dimensions=spatial_dims,
                in_channels=_in_chns,
                out_channels=_out_chns,
                kernel_size=params["kernel_size"],
                adn_ordering="NAD",
                act=acti_type,
                norm=norm_type,
                dropout=dropout_prob,
            ))

        params = layer_params[-1]
        _in_chns = _out_chns
        blocks.append(
            Convolution(
                dimensions=spatial_dims,
                in_channels=_in_chns,
                out_channels=out_channels,
                kernel_size=params["kernel_size"],
                adn_ordering="NAD",
                act=acti_type,
                norm=norm_type,
                dropout=dropout_prob,
            ))

        self.blocks = nn.Sequential(*blocks)
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        n_chns_1: int,
        n_chns_2: int,
        n_chns_3: int,
        conv_param_1: Optional[Dict] = None,
        conv_param_2: Optional[Dict] = None,
        conv_param_3: Optional[Dict] = None,
        project: Optional[Convolution] = None,
        r: int = 2,
        acti_type_1: Union[Tuple[str, Dict], str] = ("relu", {
            "inplace": True
        }),
        acti_type_2: Union[Tuple[str, Dict], str] = "sigmoid",
        acti_type_final: Optional[Union[Tuple[str, Dict], str]] = ("relu", {
            "inplace":
            True
        }),
    ):
        """
        Args:
            spatial_dims: number of spatial dimensions, could be 1, 2, or 3.
            in_channels: number of input channels.
            n_chns_1: number of output channels in the 1st convolution.
            n_chns_2: number of output channels in the 2nd convolution.
            n_chns_3: number of output channels in the 3rd convolution.
            conv_param_1: additional parameters to the 1st convolution.
                Defaults to ``{"kernel_size": 1, "norm": Norm.BATCH, "act": ("relu", {"inplace": True})}``
            conv_param_2: additional parameters to the 2nd convolution.
                Defaults to ``{"kernel_size": 3, "norm": Norm.BATCH, "act": ("relu", {"inplace": True})}``
            conv_param_3: additional parameters to the 3rd convolution.
                Defaults to ``{"kernel_size": 1, "norm": Norm.BATCH, "act": None}``
            project: in the case of residual chns and output chns doesn't match, a project
                (Conv) layer/block is used to adjust the number of chns. In SENET, it is
                consisted with a Conv layer as well as a Norm layer.
                Defaults to None (chns are matchable) or a Conv layer with kernel size 1.
            r: the reduction ratio r in the paper. Defaults to 2.
            acti_type_1: activation type of the hidden squeeze layer. Defaults to "relu".
            acti_type_2: activation type of the output squeeze layer. Defaults to "sigmoid".
            acti_type_final: activation type of the end of the block. Defaults to "relu".

        See also:

            :py:class:`monai.networks.blocks.ChannelSELayer`

        """
        super(SEBlock, self).__init__()

        if not conv_param_1:
            conv_param_1 = {
                "kernel_size": 1,
                "norm": Norm.BATCH,
                "act": ("relu", {
                    "inplace": True
                })
            }
        self.conv1 = Convolution(dimensions=spatial_dims,
                                 in_channels=in_channels,
                                 out_channels=n_chns_1,
                                 **conv_param_1)

        if not conv_param_2:
            conv_param_2 = {
                "kernel_size": 3,
                "norm": Norm.BATCH,
                "act": ("relu", {
                    "inplace": True
                })
            }
        self.conv2 = Convolution(dimensions=spatial_dims,
                                 in_channels=n_chns_1,
                                 out_channels=n_chns_2,
                                 **conv_param_2)

        if not conv_param_3:
            conv_param_3 = {"kernel_size": 1, "norm": Norm.BATCH, "act": None}
        self.conv3 = Convolution(dimensions=spatial_dims,
                                 in_channels=n_chns_2,
                                 out_channels=n_chns_3,
                                 **conv_param_3)

        self.se_layer = ChannelSELayer(spatial_dims=spatial_dims,
                                       in_channels=n_chns_3,
                                       r=r,
                                       acti_type_1=acti_type_1,
                                       acti_type_2=acti_type_2)

        if project is None and in_channels != n_chns_3:
            self.project = Conv[Conv.CONV, spatial_dims](in_channels,
                                                         n_chns_3,
                                                         kernel_size=1)
        elif project is None:
            self.project = nn.Identity()
        else:
            self.project = project

        if acti_type_final is not None:
            act_final, act_final_args = split_args(acti_type_final)
            self.act = Act[act_final](**act_final_args)
        else:
            self.act = nn.Identity()