예제 #1
0
def _get_adn_layer(act: Optional[Union[Tuple, str]],
                   dropout: Optional[Union[Tuple, str, float]],
                   ordering: Optional[str]) -> ADN:
    if ordering:
        return ADN(act=act, dropout=dropout, dropout_dim=1, ordering=ordering)
    else:
        return ADN(act=act, dropout=dropout, dropout_dim=1)
예제 #2
0
 def test_adn_3d(self, args):
     adn = ADN(**args)
     print(adn)
     out = adn(self.imt)
     expected_shape = (1, self.input_channels, self.im_shape[1],
                       self.im_shape[0], self.im_shape[2])
     self.assertEqual(out.shape, expected_shape)
예제 #3
0
파일: highresnet.py 프로젝트: tuan-cs/MONAI
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        kernels: Sequence[int] = (3, 3),
        dilation: Union[Sequence[int], int] = 1,
        norm_type: Union[Tuple, str] = ("batch", {
            "affine": True
        }),
        acti_type: Union[Tuple, str] = ("relu", {
            "inplace": True
        }),
        channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions of the input image.
            in_channels: number of input channels.
            out_channels: number of output channels.
            kernels: each integer k in `kernels` corresponds to a convolution layer with kernel size k.
            dilation: spacing between kernel elements.
            norm_type: feature normalization type and arguments.
                Defaults to ``("batch", {"affine": True})``.
            acti_type: {``"relu"``, ``"prelu"``, ``"relu6"``}
                Non-linear activation using ReLU or PReLU. Defaults to ``"relu"``.
            channel_matching: {``"pad"``, ``"project"``}
                Specifies handling residual branch and conv branch channel mismatches. Defaults to ``"pad"``.

                - ``"pad"``: with zero padding.
                - ``"project"``: with a trainable conv with kernel size one.

        Raises:
            ValueError: When ``channel_matching=pad`` and ``in_channels > out_channels``. Incompatible values.

        """
        super(HighResBlock, self).__init__()
        self.chn_pad = ChannelPad(spatial_dims=spatial_dims,
                                  in_channels=in_channels,
                                  out_channels=out_channels,
                                  mode=channel_matching)

        layers = nn.ModuleList()
        _in_chns, _out_chns = in_channels, out_channels

        for kernel_size in kernels:
            layers.append(
                ADN(ordering="NA",
                    in_channels=_in_chns,
                    act=acti_type,
                    norm=norm_type,
                    norm_dim=spatial_dims))
            layers.append(
                Convolution(
                    dimensions=spatial_dims,
                    in_channels=_in_chns,
                    out_channels=_out_chns,
                    kernel_size=kernel_size,
                    dilation=dilation,
                ))
            _in_chns = _out_chns

        self.layers = nn.Sequential(*layers)
예제 #4
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        strides: Union[Sequence[int], int] = 1,
        kernel_size: Union[Sequence[int], int] = 3,
        adn_ordering: str = "NDA",
        act: Optional[Union[Tuple, str]] = "PRELU",
        norm: Optional[Union[Tuple, str]] = "INSTANCE",
        dropout: Optional[Union[Tuple, str, float]] = None,
        dropout_dim: Optional[int] = 1,
        dilation: Union[Sequence[int], int] = 1,
        groups: int = 1,
        bias: bool = True,
        conv_only: bool = False,
        is_transposed: bool = False,
        padding: Optional[Union[Sequence[int], int]] = None,
        output_padding: Optional[Union[Sequence[int], int]] = None,
        dimensions: Optional[int] = None,
    ) -> None:
        super().__init__()
        self.dimensions = spatial_dims if dimensions is None else dimensions
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.is_transposed = is_transposed
        if padding is None:
            padding = same_padding(kernel_size, dilation)
        conv_type = Conv[Conv.CONVTRANS if is_transposed else Conv.CONV,
                         self.dimensions]

        conv: nn.Module
        if is_transposed:
            if output_padding is None:
                output_padding = stride_minus_kernel_padding(1, strides)
            conv = conv_type(
                in_channels,
                out_channels,
                kernel_size=kernel_size,
                stride=strides,
                padding=padding,
                output_padding=output_padding,
                groups=groups,
                bias=bias,
                dilation=dilation,
            )
        else:
            conv = conv_type(
                in_channels,
                out_channels,
                kernel_size=kernel_size,
                stride=strides,
                padding=padding,
                dilation=dilation,
                groups=groups,
                bias=bias,
            )

        self.add_module("conv", conv)

        if not conv_only:
            self.add_module(
                "adn",
                ADN(
                    ordering=adn_ordering,
                    in_channels=out_channels,
                    act=act,
                    norm=norm,
                    norm_dim=self.dimensions,
                    dropout=dropout,
                    dropout_dim=dropout_dim,
                ),
            )
예제 #5
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        kernels: Sequence[int] = (3, 3),
        dilation: Union[Sequence[int], int] = 1,
        norm_type: Union[Tuple, str] = ("batch", {"affine": True}),
        acti_type: Union[Tuple, str] = ("relu", {"inplace": True}),
        bias: bool = False,
        channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions of the input image.
            in_channels: number of input channels.
            out_channels: number of output channels.
            kernels: each integer k in `kernels` corresponds to a convolution layer with kernel size k.
            dilation: spacing between kernel elements.
            norm_type: feature normalization type and arguments.
                Defaults to ``("batch", {"affine": True})``.
            acti_type: {``"relu"``, ``"prelu"``, ``"relu6"``}
                Non-linear activation using ReLU or PReLU. Defaults to ``"relu"``.
            bias: whether to have a bias term in convolution blocks. Defaults to False.
                According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,
                if a conv layer is directly followed by a batch norm layer, bias should be False.
            channel_matching: {``"pad"``, ``"project"``}
                Specifies handling residual branch and conv branch channel mismatches. Defaults to ``"pad"``.

                - ``"pad"``: with zero padding.
                - ``"project"``: with a trainable conv with kernel size one.

        Raises:
            ValueError: When ``channel_matching=pad`` and ``in_channels > out_channels``. Incompatible values.

        """
        super().__init__()
        self.chn_pad = ChannelPad(
            spatial_dims=spatial_dims, in_channels=in_channels, out_channels=out_channels, mode=channel_matching
        )

        layers = nn.ModuleList()
        _in_chns, _out_chns = in_channels, out_channels

        for kernel_size in kernels:
            layers.append(
                ADN(ordering="NA", in_channels=_in_chns, act=acti_type, norm=norm_type, norm_dim=spatial_dims)
            )
            layers.append(
                Convolution(
                    spatial_dims=spatial_dims,
                    in_channels=_in_chns,
                    out_channels=_out_chns,
                    kernel_size=kernel_size,
                    dilation=dilation,
                    bias=bias,
                    conv_only=True,
                )
            )
            _in_chns = _out_chns

        self.layers = nn.Sequential(*layers)
예제 #6
0
 def test_no_input(self):
     with self.assertRaises(ValueError):
         ADN(norm="instance")