Ejemplo n.º 1
0
    def __init__(self,
                 spatial_dims: int,
                 f_int: int,
                 f_g: int,
                 f_l: int,
                 dropout=0.0):
        super().__init__()
        self.W_g = nn.Sequential(
            Convolution(
                spatial_dims=spatial_dims,
                in_channels=f_g,
                out_channels=f_int,
                kernel_size=1,
                strides=1,
                padding=0,
                dropout=dropout,
                conv_only=True,
            ),
            Norm[Norm.BATCH, spatial_dims](f_int),
        )

        self.W_x = nn.Sequential(
            Convolution(
                spatial_dims=spatial_dims,
                in_channels=f_l,
                out_channels=f_int,
                kernel_size=1,
                strides=1,
                padding=0,
                dropout=dropout,
                conv_only=True,
            ),
            Norm[Norm.BATCH, spatial_dims](f_int),
        )

        self.psi = nn.Sequential(
            Convolution(
                spatial_dims=spatial_dims,
                in_channels=f_int,
                out_channels=1,
                kernel_size=1,
                strides=1,
                padding=0,
                dropout=dropout,
                conv_only=True,
            ),
            Norm[Norm.BATCH, spatial_dims](1),
            nn.Sigmoid(),
        )

        self.relu = nn.ReLU()
Ejemplo n.º 2
0
Archivo: unet.py Proyecto: lsho76/MONAI
 def _get_down_layer(self, in_channels: int, out_channels: int, strides: int, is_top: bool) -> nn.Module:
     """
     Args:
         in_channels: number of input channels.
         out_channels: number of output channels.
         strides: convolution stride.
         is_top: True if this is the top block.
     """
     if self.num_res_units > 0:
         return ResidualUnit(
             self.dimensions,
             in_channels,
             out_channels,
             strides=strides,
             kernel_size=self.kernel_size,
             subunits=self.num_res_units,
             act=self.act,
             norm=self.norm,
             dropout=self.dropout,
         )
     else:
         return Convolution(
             self.dimensions,
             in_channels,
             out_channels,
             strides=strides,
             kernel_size=self.kernel_size,
             act=self.act,
             norm=self.norm,
             dropout=self.dropout,
         )
Ejemplo n.º 3
0
    def __init__(self, spatial_dims: int, nchan: int, act: Union[Tuple[str, Dict], str]):
        super(LUConv, self).__init__()

        self.act_function = get_acti_layer(act, nchan)
        self.conv_block = Convolution(
            dimensions=spatial_dims, in_channels=nchan, out_channels=nchan, kernel_size=5, act=None, norm=Norm.BATCH,
        )
Ejemplo n.º 4
0
    def _get_up_layer(self, in_channels, out_channels, strides, is_top):
        conv = Convolution(self.dimensions,
                           in_channels,
                           out_channels,
                           strides,
                           self.up_kernel_size,
                           self.act,
                           self.norm,
                           self.dropout,
                           conv_only=is_top and self.num_res_units == 0,
                           is_transposed=True)

        if self.num_res_units > 0:
            ru = ResidualUnit(self.dimensions,
                              out_channels,
                              out_channels,
                              1,
                              self.kernel_size,
                              1,
                              self.act,
                              self.norm,
                              self.dropout,
                              last_conv_only=is_top)
            return nn.Sequential(conv, ru)
        else:
            return conv
Ejemplo n.º 5
0
 def _get_down_layer(self, in_channels: int, out_channels: int,
                     strides: int, kernel_size: int, is_top: bool):
     if self.num_res_units > 0:
         return ResidualUnit(
             self.dimensions,
             in_channels,
             out_channels,
             strides=strides,
             kernel_size=kernel_size,
             subunits=self.num_res_units,
             act=self.act,
             norm=self.norm,
             dropout=self.dropout,
         )
     else:
         return Convolution(
             self.dimensions,
             in_channels,
             out_channels,
             strides=strides,
             kernel_size=kernel_size,
             act=self.act,
             norm=self.norm,
             dropout=self.dropout,
         )
Ejemplo n.º 6
0
def get_conv_layer(
    dimensions: int,
    in_channels: int,
    out_channels: int,
    kernel_size: Union[Sequence[int], int] = 3,
    stride: Union[Sequence[int], int] = 1,
    act: Optional[Union[Tuple, str]] = Act.PRELU,
    norm: Union[Tuple, str] = Norm.INSTANCE,
    bias: bool = False,
    conv_only: bool = True,
    is_transposed: bool = False,
):
    padding = get_padding(kernel_size, stride)
    output_padding = None
    if is_transposed:
        output_padding = get_output_padding(kernel_size, stride, padding)

    return Convolution(
        dimensions,
        in_channels,
        out_channels,
        strides=stride,
        kernel_size=kernel_size,
        act=act,
        norm=norm,
        bias=bias,
        conv_only=conv_only,
        is_transposed=is_transposed,
        padding=padding,
        output_padding=output_padding,
    )
Ejemplo n.º 7
0
    def _get_up_layer(self, in_channels: int, out_channels: int, strides: int,
                      kernel_size: int, is_top: bool):
        conv = Convolution(
            self.dimensions,
            in_channels,
            out_channels,
            strides=strides,
            kernel_size=kernel_size,
            act=self.act,
            norm=self.norm,
            dropout=self.dropout,
            conv_only=is_top and self.num_res_units == 0,
            is_transposed=True,
        )

        if self.num_res_units > 0:
            ru = ResidualUnit(
                self.dimensions,
                out_channels,
                out_channels,
                strides=1,
                kernel_size=kernel_size,
                subunits=1,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                last_conv_only=is_top,
            )
            return nn.Sequential(conv, ru)
        else:
            return conv
Ejemplo n.º 8
0
def get_conv_layer(
    spatial_dims: int,
    in_channels: int,
    out_channels: int,
    kernel_size: Union[Sequence[int], int] = 3,
    stride: Union[Sequence[int], int] = 1,
    act: Optional[Union[tuple, str]] = Act.PRELU,
    norm: Union[tuple, str] = Norm.INSTANCE,
    output_padding: Optional[Union[Sequence[int], int]] = None,
    dropout: Optional[Union[tuple, str, float]] = None,
    bias: bool = False,
    conv_only: bool = True,
    is_transposed: bool = False,
):
    padding = get_padding(kernel_size, stride)
    if is_transposed and output_padding is None:
        output_padding = get_output_padding(kernel_size, stride, padding)
    return Convolution(
        spatial_dims,
        in_channels,
        out_channels,
        strides=stride,
        kernel_size=kernel_size,
        act=act,
        norm=norm,
        dropout=dropout,
        bias=bias,
        conv_only=conv_only,
        is_transposed=is_transposed,
        padding=padding,
        output_padding=output_padding,
    )
Ejemplo n.º 9
0
def get_conv_layer(
    spatial_dims: int, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, bias: bool = False
):

    return Convolution(
        spatial_dims, in_channels, out_channels, strides=stride, kernel_size=kernel_size, bias=bias, conv_only=True,
    )
Ejemplo n.º 10
0
Archivo: vnet.py Proyecto: Nic-Ma/MONAI
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        act: Union[Tuple[str, Dict], str],
        bias: bool = False,
    ):
        super().__init__()

        if 16 % in_channels != 0:
            raise ValueError(
                f"16 should be divisible by in_channels, got in_channels={in_channels}."
            )

        self.spatial_dims = spatial_dims
        self.in_channels = in_channels
        self.act_function = get_acti_layer(act, 16)
        self.conv_block = Convolution(
            spatial_dims=spatial_dims,
            in_channels=in_channels,
            out_channels=16,
            kernel_size=5,
            act=None,
            norm=Norm.BATCH,
            bias=bias,
        )
Ejemplo n.º 11
0
 def _get_down_layer(self, in_channels, out_channels, strides, is_top):
     if self.num_res_units > 0:
         return ResidualUnit(self.dimensions, in_channels, out_channels,
                             strides, self.kernel_size, self.num_res_units,
                             self.act, self.norm, self.dropout)
     else:
         return Convolution(self.dimensions, in_channels, out_channels,
                            strides, self.kernel_size, self.act, self.norm,
                            self.dropout)
Ejemplo n.º 12
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        conv_out_channels: int,
        kernel_sizes=(1, 3, 3, 3),
        dilations=(1, 2, 4, 6),
        norm_type=Norm.BATCH,
        acti_type=Act.LEAKYRELU,
    ):
        """
        Args:
            spatial_dims: number of spatial dimensions, could be 1, 2, or 3.
            in_channels: number of input channels.
            conv_out_channels: number of output channels of each atrous conv.
                The final number of output channels is conv_out_channels * len(kernel_sizes).
            kernel_sizes: a sequence of four convolutional kernel sizes.
                Defaults to (1, 3, 3, 3) for four (dilated) convolutions.
            dilations: a sequence of four convolutional dilation parameters.
                Defaults to (1, 2, 4, 6) for four (dilated) convolutions.
            norm_type: final kernel-size-one convolution normalization type.
                Defaults to batch norm.
            acti_type: final kernel-size-one convolution activation type.
                Defaults to leaky ReLU.
        """
        super().__init__()
        if len(kernel_sizes) != len(dilations):
            raise ValueError(
                "len(kernel_sizes) and len(dilations) must be the same.")
        pads = tuple(
            same_padding(k, d) for k, d in zip(kernel_sizes, dilations))

        self.convs = nn.ModuleList()
        for k, d, p in zip(kernel_sizes, dilations, pads):
            _conv = Conv[Conv.CONV, spatial_dims](
                in_channels=in_channels,
                out_channels=conv_out_channels,
                kernel_size=k,
                dilation=d,
                padding=p,
            )
            self.convs.append(_conv)

        out_channels = conv_out_channels * len(
            pads)  # final conv. output channels
        self.conv_k1 = Convolution(
            dimensions=spatial_dims,
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=1,
            act=acti_type,
            norm=norm_type,
        )
Ejemplo n.º 13
0
    def __init__(self, in_channels: int = 3, out_channels: int = 1, upsample_mode: str = "bilinear"):
        super(MCFCN, self).__init__(out_channels=out_channels, upsample_mode=upsample_mode)

        self.init_proj = Convolution(
            dimensions=2,
            in_channels=in_channels,
            out_channels=3,
            kernel_size=1,
            act=("relu", {"inplace": True}),
            norm=Norm.BATCH,
            bias=False,
        )
Ejemplo n.º 14
0
 def _get_upsample_layer(self, in_channels, out_channels, strides,
                         up_kernel_size):
     conv = Convolution(
         self.dimensions,
         in_channels,
         out_channels,
         strides,
         up_kernel_size,
         self.act,
         self.norm,
         self.dropout,
         is_transposed=True,
     )
     return conv
Ejemplo n.º 15
0
 def __init__(
     self,
     spatial_dims: int,
     in_channels: int,
     out_channels: int,
     kernel_size: int = 3,
     strides: int = 1,
     dropout=0.0,
 ):
     super().__init__()
     layers = [
         Convolution(
             spatial_dims=spatial_dims,
             in_channels=in_channels,
             out_channels=out_channels,
             kernel_size=kernel_size,
             strides=strides,
             padding=None,
             adn_ordering="NDA",
             act="relu",
             norm=Norm.BATCH,
             dropout=dropout,
         ),
         Convolution(
             spatial_dims=spatial_dims,
             in_channels=out_channels,
             out_channels=out_channels,
             kernel_size=kernel_size,
             strides=1,
             padding=None,
             adn_ordering="NDA",
             act="relu",
             norm=Norm.BATCH,
             dropout=dropout,
         ),
     ]
     self.conv = nn.Sequential(*layers)
Ejemplo n.º 16
0
    def _make_layer(
        self,
        block: Type[Union[SEBottleneck, SEResNetBottleneck, SEResNeXtBottleneck]],
        planes: int,
        blocks: int,
        groups: int,
        reduction: int,
        stride: int = 1,
        downsample_kernel_size: int = 1,
    ) -> nn.Sequential:

        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = Convolution(
                dimensions=self.spatial_dims,
                in_channels=self.inplanes,
                out_channels=planes * block.expansion,
                strides=stride,
                kernel_size=downsample_kernel_size,
                act=None,
                norm=Norm.BATCH,
                bias=False,
            )

        layers = []
        layers.append(
            block(
                spatial_dims=self.spatial_dims,
                inplanes=self.inplanes,
                planes=planes,
                groups=groups,
                reduction=reduction,
                stride=stride,
                downsample=downsample,
            )
        )
        self.inplanes = planes * block.expansion
        for _num in range(1, blocks):
            layers.append(
                block(
                    spatial_dims=self.spatial_dims,
                    inplanes=self.inplanes,
                    planes=planes,
                    groups=groups,
                    reduction=reduction,
                )
            )

        return nn.Sequential(*layers)
Ejemplo n.º 17
0
    def __init__(self, spatial_dims: int, in_channels: int, out_channels: int, act: Union[Tuple[str, Dict], str]):
        super(OutputTransition, self).__init__()

        conv_type: Type[Union[nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims]

        self.act_function1 = get_acti_layer(act, out_channels)
        self.conv_block = Convolution(
            dimensions=spatial_dims,
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=5,
            act=None,
            norm=Norm.BATCH,
        )
        self.conv2 = conv_type(out_channels, out_channels, kernel_size=1)
Ejemplo n.º 18
0
Archivo: vnet.py Proyecto: Nic-Ma/MONAI
    def __init__(self,
                 spatial_dims: int,
                 nchan: int,
                 act: Union[Tuple[str, Dict], str],
                 bias: bool = False):
        super().__init__()

        self.act_function = get_acti_layer(act, nchan)
        self.conv_block = Convolution(
            spatial_dims=spatial_dims,
            in_channels=nchan,
            out_channels=nchan,
            kernel_size=5,
            act=None,
            norm=Norm.BATCH,
            bias=bias,
        )
Ejemplo n.º 19
0
Archivo: unet.py Proyecto: Nic-Ma/MONAI
    def _get_up_layer(self, in_channels: int, out_channels: int, strides: int, is_top: bool) -> nn.Module:
        """
        Returns the decoding (up) part of a layer of the network. This typically will upsample data at some point
        in its structure. Its output is used as input to the next layer up.

        Args:
            in_channels: number of input channels.
            out_channels: number of output channels.
            strides: convolution stride.
            is_top: True if this is the top block.
        """
        conv: Union[Convolution, nn.Sequential]

        conv = Convolution(
            self.dimensions,
            in_channels,
            out_channels,
            strides=strides,
            kernel_size=self.up_kernel_size,
            act=self.act,
            norm=self.norm,
            dropout=self.dropout,
            bias=self.bias,
            conv_only=is_top and self.num_res_units == 0,
            is_transposed=True,
            adn_ordering=self.adn_ordering,
        )

        if self.num_res_units > 0:
            ru = ResidualUnit(
                self.dimensions,
                out_channels,
                out_channels,
                strides=1,
                kernel_size=self.kernel_size,
                subunits=1,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                bias=self.bias,
                last_conv_only=is_top,
                adn_ordering=self.adn_ordering,
            )
            conv = nn.Sequential(conv, ru)

        return conv
Ejemplo n.º 20
0
    def __init__(self,
                 nin: int = 3,
                 nout: int = 1,
                 upsample_mode: str = "transpose"):
        super(MCFCN, self).__init__(nout=nout, upsample_mode=upsample_mode)

        self.init_proj = Convolution(
            dimensions=2,
            in_channels=nin,
            out_channels=3,
            kernel_size=1,
            act=("relu", {
                "inplace": True
            }),
            norm=Norm.BATCH,
            bias=False,
        )
Ejemplo n.º 21
0
    def __init__(self, in_channels: int, out_channels: int,
                 act: Union[Tuple[str, Dict], str]):
        super(InputTransition, self).__init__()

        if 16 % in_channels != 0:
            raise ValueError(
                f"16 should be divided by in_channels, got in_channels={in_channels}."
            )

        self.in_channels = in_channels
        self.act_function = get_acti_layer(act, 16)
        self.conv_block = Convolution(
            dimensions=3,
            in_channels=in_channels,
            out_channels=16,
            kernel_size=5,
            act=None,
            norm=Norm.BATCH,
        )
Ejemplo n.º 22
0
    def _get_up_layer(self, in_channels: int, out_channels: int, strides: int,
                      is_top: bool) -> nn.Module:
        """
        Args:
            in_channels: number of input channels.
            out_channels: number of output channels.
            strides: convolution stride.
            is_top: True if this is the top block.
        """
        conv: Union[Convolution, nn.Sequential]

        conv = Convolution(
            self.dimensions,
            in_channels,
            out_channels,
            strides=strides,
            kernel_size=self.up_kernel_size,
            act=self.act,
            norm=self.norm,
            evonorm=self.evonorm,
            dropout=self.dropout,
            conv_only=is_top and self.num_res_units == 0,
            is_transposed=True,
        )

        if self.num_res_units > 0:
            ru = ResidualUnit(
                self.dimensions,
                out_channels,
                out_channels,
                strides=1,
                kernel_size=self.kernel_size,
                subunits=1,
                act=self.act,
                norm=self.norm,
                evonorm=self.evonorm,
                dropout=self.dropout,
                last_conv_only=is_top,
            )
            conv = nn.Sequential(conv, ru)

        return conv
Ejemplo n.º 23
0
Archivo: unet.py Proyecto: Nic-Ma/MONAI
    def _get_down_layer(self, in_channels: int, out_channels: int, strides: int, is_top: bool) -> nn.Module:
        """
        Returns the encoding (down) part of a layer of the network. This typically will downsample data at some point
        in its structure. Its output is used as input to the next layer down and is concatenated with output from the
        next layer to form the input for the decode (up) part of the layer.

        Args:
            in_channels: number of input channels.
            out_channels: number of output channels.
            strides: convolution stride.
            is_top: True if this is the top block.
        """
        mod: nn.Module
        if self.num_res_units > 0:

            mod = ResidualUnit(
                self.dimensions,
                in_channels,
                out_channels,
                strides=strides,
                kernel_size=self.kernel_size,
                subunits=self.num_res_units,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                bias=self.bias,
                adn_ordering=self.adn_ordering,
            )
            return mod
        mod = Convolution(
            self.dimensions,
            in_channels,
            out_channels,
            strides=strides,
            kernel_size=self.kernel_size,
            act=self.act,
            norm=self.norm,
            dropout=self.dropout,
            bias=self.bias,
            adn_ordering=self.adn_ordering,
        )
        return mod
Ejemplo n.º 24
0
 def __init__(self,
              spatial_dims: int,
              in_channels: int,
              out_channels: int,
              submodule: nn.Module,
              dropout=0.0):
     super().__init__()
     self.attention = AttentionBlock(spatial_dims=spatial_dims,
                                     f_g=in_channels,
                                     f_l=in_channels,
                                     f_int=in_channels // 2)
     self.upconv = UpConv(spatial_dims=spatial_dims,
                          in_channels=out_channels,
                          out_channels=in_channels,
                          strides=2)
     self.merge = Convolution(spatial_dims=spatial_dims,
                              in_channels=2 * in_channels,
                              out_channels=in_channels,
                              dropout=dropout)
     self.submodule = submodule
Ejemplo n.º 25
0
 def __init__(self,
              spatial_dims: int,
              in_channels: int,
              out_channels: int,
              kernel_size=3,
              strides=2,
              dropout=0.0):
     super().__init__()
     self.up = Convolution(
         spatial_dims,
         in_channels,
         out_channels,
         strides=strides,
         kernel_size=kernel_size,
         act="relu",
         adn_ordering="NDA",
         norm=Norm.BATCH,
         dropout=dropout,
         is_transposed=True,
     )
Ejemplo n.º 26
0
Archivo: fcn.py Proyecto: Nic-Ma/MONAI
    def __init__(
        self,
        in_channels: int = 3,
        out_channels: int = 1,
        upsample_mode: str = "bilinear",
        pretrained: bool = True,
        progress: bool = True,
    ):
        super().__init__(
            out_channels=out_channels, upsample_mode=upsample_mode, pretrained=pretrained, progress=progress
        )

        self.init_proj = Convolution(
            spatial_dims=2,
            in_channels=in_channels,
            out_channels=3,
            kernel_size=1,
            act=("relu", {"inplace": True}),
            norm=Norm.BATCH,
            bias=False,
        )
Ejemplo n.º 27
0
Archivo: vnet.py Proyecto: Nic-Ma/MONAI
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        act: Union[Tuple[str, Dict], str],
        bias: bool = False,
    ):
        super().__init__()

        conv_type: Type[Union[nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV,
                                                            spatial_dims]

        self.act_function1 = get_acti_layer(act, out_channels)
        self.conv_block = Convolution(
            spatial_dims=spatial_dims,
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=5,
            act=None,
            norm=Norm.BATCH,
            bias=bias,
        )
        self.conv2 = conv_type(out_channels, out_channels, kernel_size=1)
Ejemplo n.º 28
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        conv_out_channels: int,
        kernel_sizes: Sequence[int] = (1, 3, 3, 3),
        dilations: Sequence[int] = (1, 2, 4, 6),
        norm_type: Optional[Union[Tuple, str]] = "BATCH",
        acti_type: Optional[Union[Tuple, str]] = "LEAKYRELU",
        bias: bool = False,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions, could be 1, 2, or 3.
            in_channels: number of input channels.
            conv_out_channels: number of output channels of each atrous conv.
                The final number of output channels is conv_out_channels * len(kernel_sizes).
            kernel_sizes: a sequence of four convolutional kernel sizes.
                Defaults to (1, 3, 3, 3) for four (dilated) convolutions.
            dilations: a sequence of four convolutional dilation parameters.
                Defaults to (1, 2, 4, 6) for four (dilated) convolutions.
            norm_type: final kernel-size-one convolution normalization type.
                Defaults to batch norm.
            acti_type: final kernel-size-one convolution activation type.
                Defaults to leaky ReLU.
            bias: whether to have a bias term in convolution blocks. Defaults to False.
                According to `Performance Tuning Guide <https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html>`_,
                if a conv layer is directly followed by a batch norm layer, bias should be False.

        Raises:
            ValueError: When ``kernel_sizes`` length differs from ``dilations``.

        See also:

            :py:class:`monai.networks.layers.Act`
            :py:class:`monai.networks.layers.Conv`
            :py:class:`monai.networks.layers.Norm`

        """
        super().__init__()
        if len(kernel_sizes) != len(dilations):
            raise ValueError(
                "kernel_sizes and dilations length must match, "
                f"got kernel_sizes={len(kernel_sizes)} dilations={len(dilations)}."
            )
        pads = tuple(
            same_padding(k, d) for k, d in zip(kernel_sizes, dilations))

        self.convs = nn.ModuleList()
        for k, d, p in zip(kernel_sizes, dilations, pads):
            _conv = Conv[Conv.CONV,
                         spatial_dims](in_channels=in_channels,
                                       out_channels=conv_out_channels,
                                       kernel_size=k,
                                       dilation=d,
                                       padding=p)
            self.convs.append(_conv)

        out_channels = conv_out_channels * len(
            pads)  # final conv. output channels
        self.conv_k1 = Convolution(
            spatial_dims=spatial_dims,
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=1,
            act=acti_type,
            norm=norm_type,
            bias=bias,
        )
Ejemplo n.º 29
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        conv_out_channels: int,
        kernel_sizes: Sequence[int] = (1, 3, 3, 3),
        dilations: Sequence[int] = (1, 2, 4, 6),
        norm_type=Norm.BATCH,
        acti_type=Act.LEAKYRELU,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions, could be 1, 2, or 3.
            in_channels: number of input channels.
            conv_out_channels: number of output channels of each atrous conv.
                The final number of output channels is conv_out_channels * len(kernel_sizes).
            kernel_sizes: a sequence of four convolutional kernel sizes.
                Defaults to (1, 3, 3, 3) for four (dilated) convolutions.
            dilations: a sequence of four convolutional dilation parameters.
                Defaults to (1, 2, 4, 6) for four (dilated) convolutions.
            norm_type: final kernel-size-one convolution normalization type.
                Defaults to batch norm.
            acti_type: final kernel-size-one convolution activation type.
                Defaults to leaky ReLU.

        Raises:
            ValueError: When ``kernel_sizes`` length differs from ``dilations``.

        See also:

            :py:class:`monai.networks.layers.Act`
            :py:class:`monai.networks.layers.Conv`
            :py:class:`monai.networks.layers.Norm`

        """
        super().__init__()
        if len(kernel_sizes) != len(dilations):
            raise ValueError(
                "kernel_sizes and dilations length must match, "
                f"got kernel_sizes={len(kernel_sizes)} dilations={len(dilations)}."
            )
        pads = tuple(
            same_padding(k, d) for k, d in zip(kernel_sizes, dilations))

        self.convs = nn.ModuleList()
        for k, d, p in zip(kernel_sizes, dilations, pads):
            _conv = Conv[Conv.CONV,
                         spatial_dims](in_channels=in_channels,
                                       out_channels=conv_out_channels,
                                       kernel_size=k,
                                       dilation=d,
                                       padding=p)
            self.convs.append(_conv)

        out_channels = conv_out_channels * len(
            pads)  # final conv. output channels
        self.conv_k1 = Convolution(
            dimensions=spatial_dims,
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=1,
            act=acti_type,
            norm=norm_type,
        )
Ejemplo n.º 30
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        channels: Sequence[int],
        strides: Sequence[int],
        kernel_size: Union[Sequence[int], int] = 3,
        up_kernel_size: Union[Sequence[int], int] = 3,
        dropout: float = 0.0,
    ):
        super().__init__()
        self.dimensions = spatial_dims
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.channels = channels
        self.strides = strides
        self.kernel_size = kernel_size
        self.dropout = dropout

        head = ConvBlock(spatial_dims=spatial_dims,
                         in_channels=in_channels,
                         out_channels=channels[0],
                         dropout=dropout)
        reduce_channels = Convolution(
            spatial_dims=spatial_dims,
            in_channels=channels[0],
            out_channels=out_channels,
            kernel_size=1,
            strides=1,
            padding=0,
            conv_only=True,
        )
        self.up_kernel_size = up_kernel_size

        def _create_block(channels: Sequence[int],
                          strides: Sequence[int],
                          level: int = 0) -> nn.Module:
            if len(channels) > 2:
                subblock = _create_block(channels[1:],
                                         strides[1:],
                                         level=level + 1)
                return AttentionLayer(
                    spatial_dims=spatial_dims,
                    in_channels=channels[0],
                    out_channels=channels[1],
                    submodule=nn.Sequential(
                        ConvBlock(
                            spatial_dims=spatial_dims,
                            in_channels=channels[0],
                            out_channels=channels[1],
                            strides=strides[0],
                            dropout=self.dropout,
                        ),
                        subblock,
                    ),
                    dropout=dropout,
                )
            else:
                # the next layer is the bottom so stop recursion,
                # create the bottom layer as the sublock for this layer
                return self._get_bottom_layer(channels[0],
                                              channels[1],
                                              strides[0],
                                              level=level + 1)

        encdec = _create_block(self.channels, self.strides)
        self.model = nn.Sequential(head, encdec, reduce_channels)