def __init__(
        self,
        spatial_dims: int,
        in_chns: int,
        out_chns: int,
        act: Union[str, tuple],
        norm: Union[str, tuple],
        bias: bool,
        dropout: Union[float, tuple] = 0.0,
        dim: Optional[int] = None,
    ):
        """
        Args:
            spatial_dims: number of spatial dimensions.
            in_chns: number of input channels.
            out_chns: number of output channels.
            act: activation type and arguments.
            norm: feature normalization type and arguments.
            bias: whether to have a bias term in convolution blocks.
            dropout: dropout ratio. Defaults to no dropout.

        .. deprecated:: 0.6.0
            ``dim`` is deprecated, use ``spatial_dims`` instead.
        """
        super().__init__()

        if dim is not None:
            spatial_dims = dim
        conv_0 = Convolution(spatial_dims, in_chns, out_chns, act=act, norm=norm, dropout=dropout, bias=bias, padding=1)
        conv_1 = Convolution(
            spatial_dims, out_chns, out_chns, act=act, norm=norm, dropout=dropout, bias=bias, padding=1
        )
        self.add_module("conv_0", conv_0)
        self.add_module("conv_1", conv_1)
Example #2
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        n_chns_1: int,
        n_chns_2: int,
        n_chns_3: int,
        conv_param_1: Optional[Dict[str, Any]] = None,
        conv_param_2: Optional[Dict[str, Any]] = None,
        conv_param_3: Optional[Dict[str, Any]] = None,
        r: int = 2,
        acti_type_1="relu",
        acti_type_2="sigmoid",
    ):
        """
        Args:
            spatial_dims: number of spatial dimensions, could be 1, 2, or 3.
            in_channels: number of input channels.
            n_chns_1: number of output channels in the 1st convolution.
            n_chns_2: number of output channels in the 2nd convolution.
            n_chns_3: number of output channels in the 3rd convolution.
            conv_param_1: additional parameters to the 1st convolution.
                Defaults to ``{"kernel_size": 1, "norm": Norm.BATCH, "act": Act.RELU}``
            conv_param_2: additional parameters to the 2nd convolution.
                Defaults to ``{"kernel_size": 3, "norm": Norm.BATCH, "act": Act.RELU}``
            conv_param_3: additional parameters to the 3rd convolution.
                Defaults to ``{"kernel_size": 1, "norm": Norm.BATCH, "act": None}``
            r: the reduction ratio r in the paper. Defaults to 2.
            acti_type_1: activation type of the hidden squeeze layer. Defaults to "relu".
            acti_type_2: activation type of the output squeeze layer. Defaults to "sigmoid".

        See also:

            :py:class:`monai.networks.blocks.ChannelSELayer`

        """
        super(SEBlock, self).__init__()

        if not conv_param_1:
            conv_param_1 = {"kernel_size": 1, "norm": Norm.BATCH, "act": Act.RELU}
        self.conv1 = Convolution(
            dimensions=spatial_dims, in_channels=in_channels, out_channels=n_chns_1, **conv_param_1
        )

        if not conv_param_2:
            conv_param_2 = {"kernel_size": 3, "norm": Norm.BATCH, "act": Act.RELU}
        self.conv2 = Convolution(dimensions=spatial_dims, in_channels=n_chns_1, out_channels=n_chns_2, **conv_param_2)

        if not conv_param_3:
            conv_param_3 = {"kernel_size": 1, "norm": Norm.BATCH, "act": None}
        self.conv3 = Convolution(dimensions=spatial_dims, in_channels=n_chns_2, out_channels=n_chns_3, **conv_param_3)

        self.se_layer = ChannelSELayer(
            spatial_dims=spatial_dims, in_channels=n_chns_3, r=r, acti_type_1=acti_type_1, acti_type_2=acti_type_2
        )

        if in_channels != n_chns_3:  # in the case of residual chns and output chns doesn't match
            self.project = Conv[Conv.CONV, spatial_dims](in_channels, n_chns_3, kernel_size=1)
        else:
            self.project = None
Example #3
0
    def __init__(
        self,
        spatial_dims,
        in_channels,
        out_channels,
        stride=2,
        act_1=Act.LEAKYRELU,
        norm_1=Norm.BATCH,
        act_2=Act.LEAKYRELU,
        norm_2=Norm.BATCH,
        conv_only=True,
    ):
        """
        A sequence of Conv_1 + Norm_1 + Act_1 + Conv_2 (+ Norm_2 + Act_2).

        `norm_2` and `act_2` are ignored when `conv_only` is True.
        `stride` is for `Conv_1`, typically stride=2 for 2x spatial downsampling.

        Args:
            spatial_dims: number of the input spatial dimension.
            in_channels: number of input channels.
            out_channels: number of output channels.
            stride: stride of the first conv., mainly used for 2x downsampling when stride=2.
            act_1: activation type of the first convolution.
            norm_1: normalization type of the first convolution.
            act_2: activation type of the second convolution.
            norm_2: normalization type of the second convolution.
            conv_only: whether the second conv is convolution layer only. Default to True,
                indicates that `act_2` and `norm_2` are not in use.
        """
        super(DoubleConv, self).__init__()
        self.conv = nn.Sequential(
            Convolution(
                spatial_dims,
                in_channels,
                out_channels,
                strides=stride,
                act=act_1,
                norm=norm_1,
                bias=False,
            ),
            Convolution(spatial_dims,
                        out_channels,
                        out_channels,
                        act=act_2,
                        norm=norm_2,
                        conv_only=conv_only),
        )
Example #4
0
    def _get_encode_layer(self, in_channels: int, out_channels: int,
                          strides: int, is_last: bool) -> nn.Module:

        if self.num_res_units > 0:
            return ResidualUnit(
                dimensions=self.dimensions,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                kernel_size=self.kernel_size,
                subunits=self.num_res_units,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                last_conv_only=is_last,
            )
        else:
            return Convolution(
                dimensions=self.dimensions,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                kernel_size=self.kernel_size,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                conv_only=is_last,
            )
 def _get_encode_layer(self, in_channels: int, out_channels: int,
                       strides: int, is_last: bool) -> nn.Module:
     """
     Returns a single layer of the encoder part of the network.
     """
     mod: nn.Module
     if self.num_res_units > 0:
         mod = ResidualUnit(
             spatial_dims=self.dimensions,
             in_channels=in_channels,
             out_channels=out_channels,
             strides=strides,
             kernel_size=self.kernel_size,
             subunits=self.num_res_units,
             act=self.act,
             norm=self.norm,
             dropout=self.dropout,
             bias=self.bias,
             last_conv_only=is_last,
         )
     mod = Convolution(
         spatial_dims=self.dimensions,
         in_channels=in_channels,
         out_channels=out_channels,
         strides=strides,
         kernel_size=self.kernel_size,
         act=self.act,
         norm=self.norm,
         dropout=self.dropout,
         bias=self.bias,
         conv_only=is_last,
     )
     return mod
Example #6
0
    def _get_layer(self, in_channels, out_channels, strides, is_last):
        """
        Returns a layer accepting inputs with `in_channels` number of channels and producing outputs of `out_channels`
        number of channels. The `strides` indicates upsampling factor, ie. transpose convolutional stride. If `is_last`
        is True this is the final layer and is not expected to include activation and normalization layers.
        """
        common_kwargs = dict(
            dimensions=self.dimensions,
            out_channels=out_channels,
            kernel_size=self.kernel_size,
            act=self.act,
            norm=self.norm,
            dropout=self.dropout,
            bias=self.bias,
        )

        layer = Convolution(
            in_channels=in_channels,
            strides=strides,
            is_transposed=True,
            conv_only=is_last or self.num_res_units > 0,
            **common_kwargs,
        )

        if self.num_res_units > 0:
            ru = ResidualUnit(in_channels=out_channels,
                              subunits=self.num_res_units,
                              last_conv_only=is_last,
                              **common_kwargs)

            layer = nn.Sequential(layer, ru)

        return layer
def get_conv_layer(
    spatial_dims: int, in_channels: int, out_channels: int, kernel_size: Union[Sequence[int], int] = 3
) -> nn.Module:
    padding = same_padding(kernel_size)
    mod: nn.Module = Convolution(
        spatial_dims, in_channels, out_channels, kernel_size=kernel_size, bias=False, conv_only=True, padding=padding
    )
    return mod
Example #8
0
 def test_dropout1(self):
     conv = Convolution(2,
                        self.input_channels,
                        self.output_channels,
                        dropout=0.15)
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[0],
                       self.im_shape[1])
     self.assertEqual(out.shape, expected_shape)
Example #9
0
 def test_dilation1(self):
     conv = Convolution(3,
                        self.input_channels,
                        self.output_channels,
                        dilation=3)
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[1],
                       self.im_shape[0], self.im_shape[2])
     self.assertEqual(out.shape, expected_shape)
Example #10
0
 def test_conv_only1(self):
     conv = Convolution(3,
                        self.input_channels,
                        self.output_channels,
                        conv_only=True)
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[1],
                       self.im_shape[0], self.im_shape[2])
     self.assertEqual(out.shape, expected_shape)
Example #11
0
 def test_stride1(self):
     conv = Convolution(2,
                        self.input_channels,
                        self.output_channels,
                        strides=2)
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[0] // 2,
                       self.im_shape[1] // 2)
     self.assertEqual(out.shape, expected_shape)
Example #12
0
 def test_transpose1(self):
     conv = Convolution(2,
                        self.input_channels,
                        self.output_channels,
                        is_transposed=True)
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[0],
                       self.im_shape[1])
     self.assertEqual(out.shape, expected_shape)
Example #13
0
 def test_conv1_no_acti(self):
     conv = Convolution(2,
                        self.input_channels,
                        self.output_channels,
                        act=None)
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[0],
                       self.im_shape[1])
     self.assertEqual(out.shape, expected_shape)
Example #14
0
 def test_transpose2(self):
     conv = Convolution(3,
                        self.input_channels,
                        self.output_channels,
                        strides=2,
                        is_transposed=True)
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[1] * 2,
                       self.im_shape[0] * 2, self.im_shape[2] * 2)
     self.assertEqual(out.shape, expected_shape)
Example #15
0
 def test_conv1(self):
     conv = Convolution(3,
                        self.input_channels,
                        self.output_channels,
                        dropout=0.1,
                        adn_ordering="DAN")
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[1],
                       self.im_shape[0], self.im_shape[2])
     self.assertEqual(out.shape, expected_shape)
Example #16
0
 def test_conv1_no_acti(self):
     conv = Convolution(3,
                        self.input_channels,
                        self.output_channels,
                        act=None,
                        adn_ordering="AND")
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[1],
                       self.im_shape[0], self.im_shape[2])
     self.assertEqual(out.shape, expected_shape)
Example #17
0
def get_conv_block(
    spatial_dims: int,
    in_channels: int,
    out_channels: int,
    # act_norm: nn.Module,
    kernel_size: Union[Sequence[int], int] = 3,
    strides: int = 1,
    padding: Optional[Union[Tuple[int, ...], int]] = None,
    evonorm: Optional[EvoNormLayer] = None,
    act: Optional[Union[Tuple, str]] = "RELU",
    norm: Optional[Union[Tuple, str]] = "BATCH",
    initializer: Optional[str] = "kaiming_uniform",
) -> nn.Module:
    if padding is None:
        padding = same_padding(kernel_size)
    conv_block = Convolution(
        spatial_dims,
        in_channels,
        out_channels,
        kernel_size=kernel_size,
        strides=strides,
        evonorm=evonorm,
        act=act,
        norm=norm,
        bias=False,
        conv_only=False,
        padding=padding,
    )
    conv_type: Type[Union[nn.Conv1d, nn.Conv2d,
                          nn.Conv3d]] = Conv[Conv.CONV, spatial_dims]
    for m in conv_block.modules():
        if isinstance(m, conv_type):
            if initializer == "kaiming_uniform":
                nn.init.kaiming_normal_(torch.as_tensor(m.weight))
            elif initializer == "zeros":
                nn.init.zeros_(torch.as_tensor(m.weight))
            else:
                raise ValueError(
                    f"initializer {initializer} is not supported, "
                    "currently supporting kaiming_uniform and zeros")
    return conv_block
    def _get_intermediate_module(
            self, in_channels: int,
            num_inter_units: int) -> Tuple[nn.Module, int]:
        """
        Returns the intermediate block of the network which accepts input from the encoder and whose output goes
        to the decoder.
        """
        # Define some types
        intermediate: nn.Module
        unit: nn.Module

        intermediate = nn.Identity()
        layer_channels = in_channels

        if self.inter_channels:
            intermediate = nn.Sequential()

            for i, (dc, di) in enumerate(
                    zip(self.inter_channels, self.inter_dilations)):
                if self.num_inter_units > 0:
                    unit = ResidualUnit(
                        spatial_dims=self.dimensions,
                        in_channels=layer_channels,
                        out_channels=dc,
                        strides=1,
                        kernel_size=self.kernel_size,
                        subunits=self.num_inter_units,
                        act=self.act,
                        norm=self.norm,
                        dropout=self.dropout,
                        dilation=di,
                        bias=self.bias,
                    )
                else:
                    unit = Convolution(
                        spatial_dims=self.dimensions,
                        in_channels=layer_channels,
                        out_channels=dc,
                        strides=1,
                        kernel_size=self.kernel_size,
                        act=self.act,
                        norm=self.norm,
                        dropout=self.dropout,
                        dilation=di,
                        bias=self.bias,
                    )

                intermediate.add_module("inter_%i" % i, unit)
                layer_channels = dc

        return intermediate, layer_channels
def get_deconv_block(spatial_dims: int, in_channels: int, out_channels: int) -> nn.Module:
    mod: nn.Module = Convolution(
        spatial_dims=spatial_dims,
        in_channels=in_channels,
        out_channels=out_channels,
        strides=2,
        act="RELU",
        norm="BATCH",
        bias=False,
        is_transposed=True,
        padding=1,
        output_padding=1,
    )
    return mod
Example #20
0
    def __init__(
        self,
        dim: int,
        in_chns: int,
        out_chns: int,
        act: Union[str, tuple],
        norm: Union[str, tuple],
        dropout: Union[float, tuple] = 0.0,
    ):
        """
        Args:
            dim: number of spatial dimensions.
            in_chns: number of input channels.
            out_chns: number of output channels.
            act: activation type and arguments.
            norm: feature normalization type and arguments.
            dropout: dropout ratio. Defaults to no dropout.
        """
        super().__init__()

        conv_0 = Convolution(dim,
                             in_chns,
                             out_chns,
                             act=act,
                             norm=norm,
                             dropout=dropout,
                             padding=1)
        conv_1 = Convolution(dim,
                             out_chns,
                             out_chns,
                             act=act,
                             norm=norm,
                             dropout=dropout,
                             padding=1)
        self.add_module("conv_0", conv_0)
        self.add_module("conv_1", conv_1)
Example #21
0
 def __init__(self,
              in_channels,
              out_channels,
              dropout_p,
              spatial_dims: int = 2):
     super().__init__()
     self.conv_conv_se = nn.Sequential(
         Convolution(spatial_dims,
                     in_channels,
                     out_channels,
                     kernel_size=3,
                     norm=Norm.BATCH,
                     act=Act.LEAKYRELU),
         nn.Dropout(dropout_p),
         Convolution(spatial_dims,
                     out_channels,
                     out_channels,
                     kernel_size=3,
                     norm=Norm.BATCH,
                     act=Act.LEAKYRELU),
         ResidualSELayer(spatial_dims=spatial_dims,
                         in_channels=out_channels,
                         r=2),
     )
Example #22
0
 def test_stride1(self):
     for strides in [2, (2, 2, 2), [2, 2, 2]]:
         conv = Convolution(3,
                            self.input_channels,
                            self.output_channels,
                            strides=strides)
         out = conv(self.imt)
         expected_shape = (
             1,
             self.output_channels,
             self.im_shape[1] // 2,
             self.im_shape[0] // 2,
             self.im_shape[2] // 2,
         )
         self.assertEqual(out.shape, expected_shape)
Example #23
0
    def _get_intermediate_module(
            self, in_channels: int,
            num_inter_units: int) -> Tuple[nn.Module, int]:
        # Define some types
        intermediate: nn.Module
        unit: nn.Module

        intermediate = nn.Identity()
        layer_channels = in_channels

        if self.inter_channels:
            intermediate = nn.Sequential()

            for i, (dc, di) in enumerate(
                    zip(self.inter_channels, self.inter_dilations)):
                if self.num_inter_units > 0:
                    unit = ResidualUnit(
                        dimensions=self.dimensions,
                        in_channels=layer_channels,
                        out_channels=dc,
                        strides=1,
                        kernel_size=self.kernel_size,
                        subunits=self.num_inter_units,
                        act=self.act,
                        norm=self.norm,
                        dropout=self.dropout,
                        dilation=di,
                    )
                else:
                    unit = Convolution(
                        dimensions=self.dimensions,
                        in_channels=layer_channels,
                        out_channels=dc,
                        strides=1,
                        kernel_size=self.kernel_size,
                        act=self.act,
                        norm=self.norm,
                        dropout=self.dropout,
                        dilation=di,
                    )

                intermediate.add_module("inter_%i" % i, unit)
                layer_channels = dc

        return intermediate, layer_channels
Example #24
0
def get_conv_layer(
    spatial_dims: int,
    in_channels: int,
    out_channels: int,
    kernel_size: Union[Sequence[int], int] = 3,
    evonorm: Optional[EvoNormLayer] = None,
) -> nn.Module:
    padding = same_padding(kernel_size)
    return Convolution(
        spatial_dims,
        in_channels,
        out_channels,
        kernel_size=kernel_size,
        bias=False,
        conv_only=True,
        padding=padding,
        evonorm=evonorm,
    )
Example #25
0
def get_conv_block(
    spatial_dims: int,
    in_channels: int,
    out_channels: int,
    kernel_size: Union[Sequence[int], int] = 3,
    act: Optional[Union[Tuple, str]] = "RELU",
    norm: Optional[Union[Tuple, str]] = "BATCH",
) -> nn.Module:
    padding = same_padding(kernel_size)
    return Convolution(
        spatial_dims,
        in_channels,
        out_channels,
        kernel_size=kernel_size,
        act=act,
        norm=norm,
        bias=False,
        conv_only=False,
        padding=padding,
    )
    def _get_decode_layer(self, in_channels: int, out_channels: int,
                          strides: int, is_last: bool) -> nn.Sequential:
        """
        Returns a single layer of the decoder part of the network.
        """
        decode = nn.Sequential()

        conv = Convolution(
            spatial_dims=self.dimensions,
            in_channels=in_channels,
            out_channels=out_channels,
            strides=strides,
            kernel_size=self.up_kernel_size,
            act=self.act,
            norm=self.norm,
            dropout=self.dropout,
            bias=self.bias,
            conv_only=is_last and self.num_res_units == 0,
            is_transposed=True,
        )

        decode.add_module("conv", conv)

        if self.num_res_units > 0:
            ru = ResidualUnit(
                spatial_dims=self.dimensions,
                in_channels=out_channels,
                out_channels=out_channels,
                strides=1,
                kernel_size=self.kernel_size,
                subunits=1,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                bias=self.bias,
                last_conv_only=is_last,
            )

            decode.add_module("resunit", ru)

        return decode
Example #27
0
    def _get_layer(self, in_channels: int, out_channels: int, strides: int,
                   is_last: bool):
        """
        Returns a layer accepting inputs with `in_channels` number of channels and producing outputs of `out_channels`
        number of channels. The `strides` indicates downsampling factor, ie. convolutional stride. If `is_last`
        is True this is the final layer and is not expected to include activation and normalization layers.
        """

        layer: Union[ResidualUnit, Convolution]

        if self.num_res_units > 0:
            layer = ResidualUnit(
                subunits=self.num_res_units,
                last_conv_only=is_last,
                dimensions=self.dimensions,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                kernel_size=self.kernel_size,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                bias=self.bias,
            )
        else:
            layer = Convolution(
                conv_only=is_last,
                dimensions=self.dimensions,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                kernel_size=self.kernel_size,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                bias=self.bias,
            )

        return layer
Example #28
0
    def __init__(
        self,
        spatial_dims: int = 3,
        in_channels: int = 1,
        out_channels: int = 1,
        norm_type: Union[str, tuple] = ("batch", {
            "affine": True
        }),
        acti_type: Union[str, tuple] = ("relu", {
            "inplace": True
        }),
        dropout_prob: Optional[Union[Tuple, str, float]] = 0.0,
        layer_params: Sequence[Dict] = DEFAULT_LAYER_PARAMS_3D,
        channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,
    ) -> None:

        super(HighResNet, self).__init__()
        blocks = nn.ModuleList()

        # initial conv layer
        params = layer_params[0]
        _in_chns, _out_chns = in_channels, params["n_features"]
        blocks.append(
            Convolution(
                dimensions=spatial_dims,
                in_channels=_in_chns,
                out_channels=_out_chns,
                kernel_size=params["kernel_size"],
                adn_ordering="NA",
                act=acti_type,
                norm=norm_type,
            ))

        # residual blocks
        for (idx, params) in enumerate(
                layer_params[1:-2]
        ):  # res blocks except the 1st and last two conv layers.
            _in_chns, _out_chns = _out_chns, params["n_features"]
            _dilation = 2**idx
            for _ in range(params["repeat"]):
                blocks.append(
                    HighResBlock(
                        spatial_dims=spatial_dims,
                        in_channels=_in_chns,
                        out_channels=_out_chns,
                        kernels=params["kernels"],
                        dilation=_dilation,
                        norm_type=norm_type,
                        acti_type=acti_type,
                        channel_matching=channel_matching,
                    ))
                _in_chns = _out_chns

        # final conv layers
        params = layer_params[-2]
        _in_chns, _out_chns = _out_chns, params["n_features"]
        blocks.append(
            Convolution(
                dimensions=spatial_dims,
                in_channels=_in_chns,
                out_channels=_out_chns,
                kernel_size=params["kernel_size"],
                adn_ordering="NAD",
                act=acti_type,
                norm=norm_type,
                dropout=dropout_prob,
            ))

        params = layer_params[-1]
        _in_chns = _out_chns
        blocks.append(
            Convolution(
                dimensions=spatial_dims,
                in_channels=_in_chns,
                out_channels=out_channels,
                kernel_size=params["kernel_size"],
                adn_ordering="NAD",
                act=acti_type,
                norm=norm_type,
                dropout=dropout_prob,
            ))

        self.blocks = nn.Sequential(*blocks)
Example #29
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        n_chns_1: int,
        n_chns_2: int,
        n_chns_3: int,
        conv_param_1: Optional[Dict] = None,
        conv_param_2: Optional[Dict] = None,
        conv_param_3: Optional[Dict] = None,
        project: Optional[Convolution] = None,
        r: int = 2,
        acti_type_1: Union[Tuple[str, Dict], str] = ("relu", {
            "inplace": True
        }),
        acti_type_2: Union[Tuple[str, Dict], str] = "sigmoid",
        acti_type_final: Optional[Union[Tuple[str, Dict], str]] = ("relu", {
            "inplace":
            True
        }),
    ):
        """
        Args:
            spatial_dims: number of spatial dimensions, could be 1, 2, or 3.
            in_channels: number of input channels.
            n_chns_1: number of output channels in the 1st convolution.
            n_chns_2: number of output channels in the 2nd convolution.
            n_chns_3: number of output channels in the 3rd convolution.
            conv_param_1: additional parameters to the 1st convolution.
                Defaults to ``{"kernel_size": 1, "norm": Norm.BATCH, "act": ("relu", {"inplace": True})}``
            conv_param_2: additional parameters to the 2nd convolution.
                Defaults to ``{"kernel_size": 3, "norm": Norm.BATCH, "act": ("relu", {"inplace": True})}``
            conv_param_3: additional parameters to the 3rd convolution.
                Defaults to ``{"kernel_size": 1, "norm": Norm.BATCH, "act": None}``
            project: in the case of residual chns and output chns doesn't match, a project
                (Conv) layer/block is used to adjust the number of chns. In SENET, it is
                consisted with a Conv layer as well as a Norm layer.
                Defaults to None (chns are matchable) or a Conv layer with kernel size 1.
            r: the reduction ratio r in the paper. Defaults to 2.
            acti_type_1: activation type of the hidden squeeze layer. Defaults to "relu".
            acti_type_2: activation type of the output squeeze layer. Defaults to "sigmoid".
            acti_type_final: activation type of the end of the block. Defaults to "relu".

        See also:

            :py:class:`monai.networks.blocks.ChannelSELayer`

        """
        super(SEBlock, self).__init__()

        if not conv_param_1:
            conv_param_1 = {
                "kernel_size": 1,
                "norm": Norm.BATCH,
                "act": ("relu", {
                    "inplace": True
                })
            }
        self.conv1 = Convolution(dimensions=spatial_dims,
                                 in_channels=in_channels,
                                 out_channels=n_chns_1,
                                 **conv_param_1)

        if not conv_param_2:
            conv_param_2 = {
                "kernel_size": 3,
                "norm": Norm.BATCH,
                "act": ("relu", {
                    "inplace": True
                })
            }
        self.conv2 = Convolution(dimensions=spatial_dims,
                                 in_channels=n_chns_1,
                                 out_channels=n_chns_2,
                                 **conv_param_2)

        if not conv_param_3:
            conv_param_3 = {"kernel_size": 1, "norm": Norm.BATCH, "act": None}
        self.conv3 = Convolution(dimensions=spatial_dims,
                                 in_channels=n_chns_2,
                                 out_channels=n_chns_3,
                                 **conv_param_3)

        self.se_layer = ChannelSELayer(spatial_dims=spatial_dims,
                                       in_channels=n_chns_3,
                                       r=r,
                                       acti_type_1=acti_type_1,
                                       acti_type_2=acti_type_2)

        self.project = project
        if self.project is None and in_channels != n_chns_3:
            self.project = Conv[Conv.CONV, spatial_dims](in_channels,
                                                         n_chns_3,
                                                         kernel_size=1)

        self.act = None
        if acti_type_final is not None:
            act_final, act_final_args = split_args(acti_type_final)
            self.act = Act[act_final](**act_final_args)
Example #30
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        kernels: Sequence[int] = (3, 3),
        dilation: Union[Sequence[int], int] = 1,
        norm_type: Union[Tuple, str] = ("batch", {
            "affine": True
        }),
        acti_type: Union[Tuple, str] = ("relu", {
            "inplace": True
        }),
        channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions of the input image.
            in_channels: number of input channels.
            out_channels: number of output channels.
            kernels: each integer k in `kernels` corresponds to a convolution layer with kernel size k.
            dilation: spacing between kernel elements.
            norm_type: feature normalization type and arguments.
                Defaults to ``("batch", {"affine": True})``.
            acti_type: {``"relu"``, ``"prelu"``, ``"relu6"``}
                Non-linear activation using ReLU or PReLU. Defaults to ``"relu"``.
            channel_matching: {``"pad"``, ``"project"``}
                Specifies handling residual branch and conv branch channel mismatches. Defaults to ``"pad"``.

                - ``"pad"``: with zero padding.
                - ``"project"``: with a trainable conv with kernel size one.

        Raises:
            ValueError: When ``channel_matching=pad`` and ``in_channels > out_channels``. Incompatible values.

        """
        super(HighResBlock, self).__init__()
        self.chn_pad = ChannelPad(spatial_dims=spatial_dims,
                                  in_channels=in_channels,
                                  out_channels=out_channels,
                                  mode=channel_matching)

        layers = nn.ModuleList()
        _in_chns, _out_chns = in_channels, out_channels

        for kernel_size in kernels:
            layers.append(
                ADN(ordering="NA",
                    in_channels=_in_chns,
                    act=acti_type,
                    norm=norm_type,
                    norm_dim=spatial_dims))
            layers.append(
                Convolution(
                    dimensions=spatial_dims,
                    in_channels=_in_chns,
                    out_channels=_out_chns,
                    kernel_size=kernel_size,
                    dilation=dilation,
                ))
            _in_chns = _out_chns

        self.layers = nn.Sequential(*layers)