Ejemplo n.º 1
0
    def __init__(
        self,
        num_features: int,
        in_channels: int,
        out_channels: int,
        dropout_prob: float = 0.0,
        act: Union[str, tuple] = ("relu", {
            "inplace": True
        }),
        norm: Union[str, tuple] = "batch",
        kernel_size: int = 3,
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions of the input image.
            num_features: number of internal channels used for the layer
            in_channels: number of the input channels.
            out_channels: number of the output channels.
            dropout_prob: dropout rate after each dense layer.
            act: activation type and arguments. Defaults to relu.
            norm: feature normalization type and arguments. Defaults to batch norm.
            kernel_size: size of the kernel for >1 convolutions (dependent on mode)
        """
        super().__init__()

        conv_type: Callable = Conv[Conv.CONV, 2]
        dropout_type: Callable = Dropout[Dropout.DROPOUT, 2]

        self.layers = nn.Sequential()

        self.layers.add_module(
            "preact_bna/bn",
            get_norm_layer(name=norm, spatial_dims=2, channels=in_channels))
        self.layers.add_module("preact_bna/relu", get_act_layer(name=act))
        self.layers.add_module(
            "conv1",
            conv_type(in_channels, num_features, kernel_size=1, bias=False))
        self.layers.add_module(
            "conv1/norm",
            get_norm_layer(name=norm, spatial_dims=2, channels=num_features))
        self.layers.add_module("conv1/relu2", get_act_layer(name=act))
        self.layers.add_module(
            "conv2",
            conv_type(num_features,
                      out_channels,
                      kernel_size=kernel_size,
                      padding=0,
                      groups=4,
                      bias=False))

        if dropout_prob > 0:
            self.layers.add_module("dropout", dropout_type(dropout_prob))
Ejemplo n.º 2
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        act: Union[str, tuple] = ("relu", {"inplace": True}),
        norm: Union[str, tuple] = "batch",
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions of the input image.
            in_channels: number of the input channel.
            out_channels: number of the output classes.
            act: activation type and arguments. Defaults to relu.
            norm: feature normalization type and arguments. Defaults to batch norm.
        """
        super().__init__()

        conv_type: Callable = Conv[Conv.CONV, spatial_dims]
        pool_type: Callable = Pool[Pool.AVG, spatial_dims]

        self.add_module("norm", get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels))
        self.add_module("relu", get_act_layer(name=act))
        self.add_module("conv", conv_type(in_channels, out_channels, kernel_size=1, bias=False))
        self.add_module("pool", pool_type(kernel_size=2, stride=2))
Ejemplo n.º 3
0
    def __init__(
        self,
        ordering: str = "NDA",
        in_channels: Optional[int] = None,
        act: Optional[Union[Tuple, str]] = "RELU",
        norm: Optional[Union[Tuple, str]] = None,
        norm_dim: Optional[int] = None,
        dropout: Optional[Union[Tuple, str, float]] = None,
        dropout_dim: Optional[int] = None,
    ) -> None:
        super().__init__()

        op_dict = {"A": None, "D": None, "N": None}
        # define the normalization type and the arguments to the constructor
        if norm is not None:
            if norm_dim is None and dropout_dim is None:
                raise ValueError("norm_dim or dropout_dim needs to be specified.")
            op_dict["N"] = get_norm_layer(name=norm, spatial_dims=norm_dim or dropout_dim, channels=in_channels)

        # define the activation type and the arguments to the constructor
        if act is not None:
            op_dict["A"] = get_act_layer(act)

        if dropout is not None:
            if norm_dim is None and dropout_dim is None:
                raise ValueError("norm_dim or dropout_dim needs to be specified.")
            op_dict["D"] = get_dropout_layer(name=dropout, dropout_dim=dropout_dim or norm_dim)

        for item in ordering.upper():
            if item not in op_dict:
                raise ValueError(f"ordering must be a string of {op_dict}, got {item} in it.")
            if op_dict[item] is not None:
                self.add_module(item, op_dict[item])
Ejemplo n.º 4
0
 def __init__(
     self,
     spatial_dims: int,
     in_channels: int,
     out_channels: int,
     kernel_size: Union[Sequence[int], int],
     stride: Union[Sequence[int], int],
     norm_name: Union[tuple, str],
     act_name: Union[tuple, str] = ("leakyrelu", {
         "inplace": True,
         "negative_slope": 0.01
     }),
     dropout: Optional[Union[tuple, str, float]] = None,
 ):
     super().__init__()
     self.conv1 = get_conv_layer(
         spatial_dims,
         in_channels,
         out_channels,
         kernel_size=kernel_size,
         stride=stride,
         dropout=dropout,
         conv_only=True,
     )
     self.conv2 = get_conv_layer(spatial_dims,
                                 out_channels,
                                 out_channels,
                                 kernel_size=kernel_size,
                                 stride=1,
                                 dropout=dropout,
                                 conv_only=True)
     self.conv3 = get_conv_layer(spatial_dims,
                                 in_channels,
                                 out_channels,
                                 kernel_size=1,
                                 stride=stride,
                                 dropout=dropout,
                                 conv_only=True)
     self.lrelu = get_act_layer(name=act_name)
     self.norm1 = get_norm_layer(name=norm_name,
                                 spatial_dims=spatial_dims,
                                 channels=out_channels)
     self.norm2 = get_norm_layer(name=norm_name,
                                 spatial_dims=spatial_dims,
                                 channels=out_channels)
     self.norm3 = get_norm_layer(name=norm_name,
                                 spatial_dims=spatial_dims,
                                 channels=out_channels)
     self.downsample = in_channels != out_channels
     stride_np = np.atleast_1d(stride)
     if not np.all(stride_np == 1):
         self.downsample = True
Ejemplo n.º 5
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        growth_rate: int,
        bn_size: int,
        dropout_prob: float,
        act: Union[str, tuple] = ("relu", {"inplace": True}),
        norm: Union[str, tuple] = "batch",
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions of the input image.
            in_channels: number of the input channel.
            growth_rate: how many filters to add each layer (k in paper).
            bn_size: multiplicative factor for number of bottle neck layers.
                (i.e. bn_size * k features in the bottleneck layer)
            dropout_prob: dropout rate after each dense layer.
            act: activation type and arguments. Defaults to relu.
            norm: feature normalization type and arguments. Defaults to batch norm.
        """
        super().__init__()

        out_channels = bn_size * growth_rate
        conv_type: Callable = Conv[Conv.CONV, spatial_dims]
        dropout_type: Callable = Dropout[Dropout.DROPOUT, spatial_dims]

        self.layers = nn.Sequential()

        self.layers.add_module("norm1", get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels))
        self.layers.add_module("relu1", get_act_layer(name=act))
        self.layers.add_module("conv1", conv_type(in_channels, out_channels, kernel_size=1, bias=False))

        self.layers.add_module("norm2", get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=out_channels))
        self.layers.add_module("relu2", get_act_layer(name=act))
        self.layers.add_module("conv2", conv_type(out_channels, growth_rate, kernel_size=3, padding=1, bias=False))

        if dropout_prob > 0:
            self.layers.add_module("dropout", dropout_type(dropout_prob))
Ejemplo n.º 6
0
 def __init__(
     self,
     spatial_dims: int,
     in_channels: int,
     out_channels: int,
     kernel_size: Union[Sequence[int], int],
     stride: Union[Sequence[int], int],
     norm_name: Union[Tuple, str],
 ):
     super(UnetResBlock, self).__init__()
     self.conv1 = get_conv_layer(
         spatial_dims,
         in_channels,
         out_channels,
         kernel_size=kernel_size,
         stride=stride,
         conv_only=True,
     )
     self.conv2 = get_conv_layer(
         spatial_dims,
         out_channels,
         out_channels,
         kernel_size=kernel_size,
         stride=1,
         conv_only=True,
     )
     self.conv3 = get_conv_layer(
         spatial_dims,
         in_channels,
         out_channels,
         kernel_size=1,
         stride=stride,
         conv_only=True,
     )
     self.lrelu = get_act_layer(("leakyrelu", {
         "inplace": True,
         "negative_slope": 0.01
     }))
     self.norm1 = get_norm_layer(name=norm_name,
                                 spatial_dims=spatial_dims,
                                 channels=out_channels)
     self.norm2 = get_norm_layer(name=norm_name,
                                 spatial_dims=spatial_dims,
                                 channels=out_channels)
     self.norm3 = get_norm_layer(name=norm_name,
                                 spatial_dims=spatial_dims,
                                 channels=out_channels)
     self.downsample = in_channels != out_channels
     stride_np = np.atleast_1d(stride)
     if not np.all(stride_np == 1):
         self.downsample = True
Ejemplo n.º 7
0
    def __init__(
            self,
            in_channel: int,
            out_channel: int,
            spatial_dims: int = 3,
            act_name: Union[Tuple, str] = "RELU",
            norm_name: Union[Tuple, str] = ("INSTANCE", {
                "affine": True
            }),
    ):
        """
        Args:
            in_channel: number of input channels
            out_channel: number of output channels.
            spatial_dims: number of spatial dimensions.
            act_name: activation layer type and arguments.
            norm_name: feature normalization type and arguments.
        """
        super().__init__()
        self._in_channel = in_channel
        self._out_channel = out_channel
        self._spatial_dims = spatial_dims
        if self._spatial_dims not in (2, 3):
            raise ValueError("spatial_dims must be 2 or 3.")

        conv_type = Conv[Conv.CONV, self._spatial_dims]

        self.act = get_act_layer(name=act_name)
        self.conv_1 = conv_type(
            in_channels=self._in_channel,
            out_channels=self._out_channel // 2,
            kernel_size=1,
            stride=2,
            padding=0,
            groups=1,
            bias=False,
            dilation=1,
        )
        self.conv_2 = conv_type(
            in_channels=self._in_channel,
            out_channels=self._out_channel - self._out_channel // 2,
            kernel_size=1,
            stride=2,
            padding=0,
            groups=1,
            bias=False,
            dilation=1,
        )
        self.norm = get_norm_layer(name=norm_name,
                                   spatial_dims=self._spatial_dims,
                                   channels=self._out_channel)
Ejemplo n.º 8
0
    def __init__(
        self,
        spatial_dims: int = 3,
        init_filters: int = 8,
        in_channels: int = 1,
        out_channels: int = 2,
        dropout_prob: Optional[float] = None,
        act: Union[Tuple, str] = ("RELU", {
            "inplace": True
        }),
        norm: Union[Tuple, str] = ("GROUP", {
            "num_groups": 8
        }),
        norm_name: str = "",
        num_groups: int = 8,
        use_conv_final: bool = True,
        blocks_down: tuple = (1, 2, 2, 4),
        blocks_up: tuple = (1, 1, 1),
        upsample_mode: Union[UpsampleMode, str] = UpsampleMode.NONTRAINABLE,
    ):
        super().__init__()

        if spatial_dims not in (2, 3):
            raise ValueError("`spatial_dims` can only be 2 or 3.")

        self.spatial_dims = spatial_dims
        self.init_filters = init_filters
        self.in_channels = in_channels
        self.blocks_down = blocks_down
        self.blocks_up = blocks_up
        self.dropout_prob = dropout_prob
        self.act = act  # input options
        self.act_mod = get_act_layer(act)
        if norm_name:
            if norm_name.lower() != "group":
                raise ValueError(
                    f"Deprecating option 'norm_name={norm_name}', please use 'norm' instead."
                )
            norm = ("group", {"num_groups": num_groups})
        self.norm = norm
        self.upsample_mode = UpsampleMode(upsample_mode)
        self.use_conv_final = use_conv_final
        self.convInit = get_conv_layer(spatial_dims, in_channels, init_filters)
        self.down_layers = self._make_down_layers()
        self.up_layers, self.up_samples = self._make_up_layers()
        self.conv_final = self._make_final_conv(out_channels)

        if dropout_prob is not None:
            self.dropout = Dropout[Dropout.DROPOUT, spatial_dims](dropout_prob)
Ejemplo n.º 9
0
    def __init__(
            self,
            in_channel: int,
            out_channel: int,
            spatial_dims: int = 3,
            act_name: Union[Tuple, str] = "RELU",
            norm_name: Union[Tuple, str] = ("INSTANCE", {
                "affine": True
            }),
    ):
        """
        Args:
            in_channel: number of input channels
            out_channel: number of output channels
            spatial_dims: number of spatial dimensions
            act_name: activation layer type and arguments.
            norm_name: feature normalization type and arguments.
        """
        super().__init__()
        self._in_channel = in_channel
        self._out_channel = out_channel
        self._spatial_dims = spatial_dims
        if self._spatial_dims not in (2, 3):
            raise ValueError("spatial_dims must be 2 or 3.")

        conv_type = Conv[Conv.CONV, self._spatial_dims]
        mode = "trilinear" if self._spatial_dims == 3 else "bilinear"
        self.add_module(
            "up",
            torch.nn.Upsample(scale_factor=2, mode=mode, align_corners=True))
        self.add_module("acti", get_act_layer(name=act_name))
        self.add_module(
            "conv",
            conv_type(
                in_channels=self._in_channel,
                out_channels=self._out_channel,
                kernel_size=1,
                stride=1,
                padding=0,
                groups=1,
                bias=False,
                dilation=1,
            ),
        )
        self.add_module(
            "norm",
            get_norm_layer(name=norm_name,
                           spatial_dims=self._spatial_dims,
                           channels=self._out_channel))
Ejemplo n.º 10
0
    def __init__(
            self,
            in_channel: int,
            out_channel: int,
            kernel_size: int = 3,
            padding: int = 1,
            spatial_dims: int = 3,
            act_name: Union[Tuple, str] = "RELU",
            norm_name: Union[Tuple, str] = ("INSTANCE", {
                "affine": True
            }),
    ):
        """
        Args:
            in_channel: number of input channels.
            out_channel: number of output channels.
            kernel_size: kernel size of the convolution.
            padding: padding size of the convolution.
            spatial_dims: number of spatial dimensions.
            act_name: activation layer type and arguments.
            norm_name: feature normalization type and arguments.
        """
        super().__init__()
        self._in_channel = in_channel
        self._out_channel = out_channel
        self._spatial_dims = spatial_dims

        conv_type = Conv[Conv.CONV, self._spatial_dims]
        self.add_module("acti", get_act_layer(name=act_name))
        self.add_module(
            "conv",
            conv_type(
                in_channels=self._in_channel,
                out_channels=self._out_channel,
                kernel_size=kernel_size,
                stride=1,
                padding=padding,
                groups=1,
                bias=False,
                dilation=1,
            ),
        )
        self.add_module(
            "norm",
            get_norm_layer(name=norm_name,
                           spatial_dims=self._spatial_dims,
                           channels=self._out_channel))
Ejemplo n.º 11
0
    def __init__(self,
                 in_channels: int,
                 act: Union[str, tuple] = ("relu", {
                     "inplace": True
                 }),
                 norm: Union[str, tuple] = "batch") -> None:
        """
        Args:
            in_channels: number of the input channel.
            act: activation type and arguments. Defaults to relu.
            norm: feature normalization type and arguments. Defaults to batch norm.
        """
        super().__init__()

        self.add_module(
            "norm",
            get_norm_layer(name=norm, spatial_dims=2, channels=in_channels))
        self.add_module("relu", get_act_layer(name=act))
Ejemplo n.º 12
0
 def __init__(
     self,
     spatial_dims: int,
     in_channels: int,
     out_channels: int,
     kernel_size: Union[Sequence[int], int],
     stride: Union[Sequence[int], int],
     norm_name: Union[Tuple, str],
     act_name: Union[Tuple, str] = ("leakyrelu", {
         "inplace": True,
         "negative_slope": 0.01
     }),
     dropout: Optional[Union[Tuple, str, float]] = None,
 ):
     super().__init__()
     self.conv1 = get_conv_layer(
         spatial_dims,
         in_channels,
         out_channels,
         kernel_size=kernel_size,
         stride=stride,
         dropout=dropout,
         act=None,
         norm=None,
         conv_only=False,
     )
     self.conv2 = get_conv_layer(
         spatial_dims,
         out_channels,
         out_channels,
         kernel_size=kernel_size,
         stride=1,
         dropout=dropout,
         act=None,
         norm=None,
         conv_only=False,
     )
     self.lrelu = get_act_layer(name=act_name)
     self.norm1 = get_norm_layer(name=norm_name,
                                 spatial_dims=spatial_dims,
                                 channels=out_channels)
     self.norm2 = get_norm_layer(name=norm_name,
                                 spatial_dims=spatial_dims,
                                 channels=out_channels)
Ejemplo n.º 13
0
    def __init__(
        self,
        dim: int,
        in_channels: int,
        out_channels: int,
        kernel_size: int = 3,
        act: Optional[Union[Tuple, str]] = None,
        scale_factor: float = 1.0,
    ):
        """
        Segmentation head.
        This class refers to `segmentation_models.pytorch
        <https://github.com/qubvel/segmentation_models.pytorch>`_.

        Args:
            dim: number of spatial dimensions.
            in_channels: number of input channels for the block.
            out_channels: number of output channels for the block.
            kernel_size: kernel size for the conv layer.
            act: activation type and arguments.
            scale_factor: multiplier for spatial size. Has to match input size if it is a tuple.

        """
        conv_layer = Conv[Conv.CONV, dim](
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            padding=kernel_size // 2,
        )
        up_layer: nn.Module = nn.Identity()
        if scale_factor > 1.0:
            up_layer = UpSample(
                spatial_dims=dim,
                scale_factor=scale_factor,
                mode="nontrainable",
                pre_conv=None,
                interp_mode=InterpolateMode.LINEAR,
            )
        if act is not None:
            act_layer = get_act_layer(act)
        else:
            act_layer = nn.Identity()
        super().__init__(conv_layer, up_layer, act_layer)
Ejemplo n.º 14
0
    def __init__(
        self,
        spatial_dims: int = 3,
        init_filters: int = 8,
        in_channels: int = 1,
        out_channels: int = 2,
        dropout_prob: Optional[float] = None,
        act: Union[Tuple, str] = ("RELU", {
            "inplace": True
        }),
        norm: Union[Tuple, str] = ("GROUP", {
            "num_groups": 8
        }),
        use_conv_final: bool = True,
        blocks_down: tuple = (1, 2, 2, 4),
        blocks_up: tuple = (1, 1, 1),
        upsample_mode: Union[UpsampleMode, str] = UpsampleMode.NONTRAINABLE,
    ):
        super().__init__()

        if spatial_dims not in (2, 3):
            raise AssertionError("spatial_dims can only be 2 or 3.")

        self.spatial_dims = spatial_dims
        self.init_filters = init_filters
        self.in_channels = in_channels
        self.blocks_down = blocks_down
        self.blocks_up = blocks_up
        self.dropout_prob = dropout_prob
        self.act = get_act_layer(act)
        self.norm = norm
        self.upsample_mode = UpsampleMode(upsample_mode)
        self.use_conv_final = use_conv_final
        self.convInit = get_conv_layer(spatial_dims, in_channels, init_filters)
        self.down_layers = self._make_down_layers()
        self.up_layers, self.up_samples = self._make_up_layers()
        self.conv_final = self._make_final_conv(out_channels)

        if dropout_prob is not None:
            self.dropout = Dropout[Dropout.DROPOUT, spatial_dims](dropout_prob)
Ejemplo n.º 15
0
    def __init__(
            self,
            spatial_dims: int,
            in_channels: int,
            norm: Union[Tuple, str],
            kernel_size: int = 3,
            act: Union[Tuple, str] = ("RELU", {
                "inplace": True
            }),
    ) -> None:
        """
        Args:
            spatial_dims: number of spatial dimensions, could be 1, 2 or 3.
            in_channels: number of input channels.
            norm: feature normalization type and arguments.
            kernel_size: convolution kernel size, the value should be an odd number. Defaults to 3.
            act: activation type and arguments. Defaults to ``RELU``.
        """

        super().__init__()

        if kernel_size % 2 != 1:
            raise AssertionError("kernel_size should be an odd number.")

        self.norm1 = get_norm_layer(name=norm,
                                    spatial_dims=spatial_dims,
                                    channels=in_channels)
        self.norm2 = get_norm_layer(name=norm,
                                    spatial_dims=spatial_dims,
                                    channels=in_channels)
        self.act = get_act_layer(act)
        self.conv1 = get_conv_layer(spatial_dims,
                                    in_channels=in_channels,
                                    out_channels=in_channels)
        self.conv2 = get_conv_layer(spatial_dims,
                                    in_channels=in_channels,
                                    out_channels=in_channels)
Ejemplo n.º 16
0
 def __init__(
     self,
     spatial_dims: int,
     in_channels: int,
     out_channels: int,
     kernel_size: Union[Sequence[int], int],
     stride: Union[Sequence[int], int],
     norm_name: Union[Tuple, str],
 ):
     super(UnetBasicBlock, self).__init__()
     self.conv1 = get_conv_layer(
         spatial_dims,
         in_channels,
         out_channels,
         kernel_size=kernel_size,
         stride=stride,
         conv_only=True,
     )
     self.conv2 = get_conv_layer(
         spatial_dims,
         out_channels,
         out_channels,
         kernel_size=kernel_size,
         stride=1,
         conv_only=True,
     )
     self.lrelu = get_act_layer(("leakyrelu", {
         "inplace": True,
         "negative_slope": 0.01
     }))
     self.norm1 = get_norm_layer(name=norm_name,
                                 spatial_dims=spatial_dims,
                                 channels=out_channels)
     self.norm2 = get_norm_layer(name=norm_name,
                                 spatial_dims=spatial_dims,
                                 channels=out_channels)
Ejemplo n.º 17
0
    def __init__(
            self,
            in_channel: int,
            out_channel: int,
            kernel_size: int,
            padding: int,
            mode: int = 0,
            act_name: Union[Tuple, str] = "RELU",
            norm_name: Union[Tuple, str] = ("INSTANCE", {
                "affine": True
            }),
    ):
        """
        Args:
            in_channel: number of input channels.
            out_channel: number of output channels.
            kernel_size: kernel size to be expanded to 3D.
            padding: padding size to be expanded to 3D.
            mode: mode for the anisotropic kernels:

                - 0: ``(k, k, 1)``, ``(1, 1, k)``,
                - 1: ``(k, 1, k)``, ``(1, k, 1)``,
                - 2: ``(1, k, k)``. ``(k, 1, 1)``.

            act_name: activation layer type and arguments.
            norm_name: feature normalization type and arguments.
        """
        super().__init__()
        self._in_channel = in_channel
        self._out_channel = out_channel
        self._p3dmode = int(mode)

        conv_type = Conv[Conv.CONV, 3]

        if self._p3dmode == 0:  # (k, k, 1), (1, 1, k)
            kernel_size0 = (kernel_size, kernel_size, 1)
            kernel_size1 = (1, 1, kernel_size)
            padding0 = (padding, padding, 0)
            padding1 = (0, 0, padding)
        elif self._p3dmode == 1:  # (k, 1, k), (1, k, 1)
            kernel_size0 = (kernel_size, 1, kernel_size)
            kernel_size1 = (1, kernel_size, 1)
            padding0 = (padding, 0, padding)
            padding1 = (0, padding, 0)
        elif self._p3dmode == 2:  # (1, k, k), (k, 1, 1)
            kernel_size0 = (1, kernel_size, kernel_size)
            kernel_size1 = (kernel_size, 1, 1)
            padding0 = (0, padding, padding)
            padding1 = (padding, 0, 0)
        else:
            raise ValueError("`mode` must be 0, 1, or 2.")

        self.add_module("acti", get_act_layer(name=act_name))
        self.add_module(
            "conv",
            conv_type(
                in_channels=self._in_channel,
                out_channels=self._in_channel,
                kernel_size=kernel_size0,
                stride=1,
                padding=padding0,
                groups=1,
                bias=False,
                dilation=1,
            ),
        )
        self.add_module(
            "conv_1",
            conv_type(
                in_channels=self._in_channel,
                out_channels=self._out_channel,
                kernel_size=kernel_size1,
                stride=1,
                padding=padding1,
                groups=1,
                bias=False,
                dilation=1,
            ),
        )
        self.add_module(
            "norm",
            get_norm_layer(name=norm_name,
                           spatial_dims=3,
                           channels=self._out_channel))
Ejemplo n.º 18
0
    def __init__(
        self,
        dints_space,
        in_channels: int,
        num_classes: int,
        act_name: Union[Tuple, str] = "RELU",
        norm_name: Union[Tuple, str] = ("INSTANCE", {
            "affine": True
        }),
        spatial_dims: int = 3,
        use_downsample: bool = True,
        node_a=None,
    ):
        super().__init__()

        self.dints_space = dints_space
        self.filter_nums = dints_space.filter_nums
        self.num_blocks = dints_space.num_blocks
        self.num_depths = dints_space.num_depths
        if spatial_dims not in (2, 3):
            raise NotImplementedError(
                f"Spatial dimensions {spatial_dims} is not supported.")
        self._spatial_dims = spatial_dims
        if node_a is None:
            self.node_a = torch.ones((self.num_blocks + 1, self.num_depths))
        else:
            self.node_a = node_a

        # define stem operations for every block
        conv_type = Conv[Conv.CONV, spatial_dims]
        self.stem_down = nn.ModuleDict()
        self.stem_up = nn.ModuleDict()
        self.stem_finals = nn.Sequential(
            ActiConvNormBlock(
                self.filter_nums[0],
                self.filter_nums[0],
                act_name=act_name,
                norm_name=norm_name,
                spatial_dims=spatial_dims,
            ),
            conv_type(
                in_channels=self.filter_nums[0],
                out_channels=num_classes,
                kernel_size=1,
                stride=1,
                padding=0,
                groups=1,
                bias=True,
                dilation=1,
            ),
        )
        mode = "trilinear" if self._spatial_dims == 3 else "bilinear"
        for res_idx in range(self.num_depths):
            # define downsample stems before DiNTS search
            if use_downsample:
                self.stem_down[str(res_idx)] = StemTS(
                    nn.Upsample(scale_factor=1 / (2**res_idx),
                                mode=mode,
                                align_corners=True),
                    conv_type(
                        in_channels=in_channels,
                        out_channels=self.filter_nums[res_idx],
                        kernel_size=3,
                        stride=1,
                        padding=1,
                        groups=1,
                        bias=False,
                        dilation=1,
                    ),
                    get_norm_layer(name=norm_name,
                                   spatial_dims=spatial_dims,
                                   channels=self.filter_nums[res_idx]),
                    get_act_layer(name=act_name),
                    conv_type(
                        in_channels=self.filter_nums[res_idx],
                        out_channels=self.filter_nums[res_idx + 1],
                        kernel_size=3,
                        stride=2,
                        padding=1,
                        groups=1,
                        bias=False,
                        dilation=1,
                    ),
                    get_norm_layer(name=norm_name,
                                   spatial_dims=spatial_dims,
                                   channels=self.filter_nums[res_idx + 1]),
                )
                self.stem_up[str(res_idx)] = StemTS(
                    get_act_layer(name=act_name),
                    conv_type(
                        in_channels=self.filter_nums[res_idx + 1],
                        out_channels=self.filter_nums[res_idx],
                        kernel_size=3,
                        stride=1,
                        padding=1,
                        groups=1,
                        bias=False,
                        dilation=1,
                    ),
                    get_norm_layer(name=norm_name,
                                   spatial_dims=spatial_dims,
                                   channels=self.filter_nums[res_idx]),
                    nn.Upsample(scale_factor=2, mode=mode, align_corners=True),
                )

            else:
                self.stem_down[str(res_idx)] = StemTS(
                    nn.Upsample(scale_factor=1 / (2**res_idx),
                                mode=mode,
                                align_corners=True),
                    conv_type(
                        in_channels=in_channels,
                        out_channels=self.filter_nums[res_idx],
                        kernel_size=3,
                        stride=1,
                        padding=1,
                        groups=1,
                        bias=False,
                        dilation=1,
                    ),
                    get_norm_layer(name=norm_name,
                                   spatial_dims=spatial_dims,
                                   channels=self.filter_nums[res_idx]),
                )
                self.stem_up[str(res_idx)] = StemTS(
                    get_act_layer(name=act_name),
                    conv_type(
                        in_channels=self.filter_nums[res_idx],
                        out_channels=self.filter_nums[max(res_idx - 1, 0)],
                        kernel_size=3,
                        stride=1,
                        padding=1,
                        groups=1,
                        bias=False,
                        dilation=1,
                    ),
                    get_norm_layer(name=norm_name,
                                   spatial_dims=spatial_dims,
                                   channels=self.filter_nums[max(
                                       res_idx - 1, 0)]),
                    nn.Upsample(scale_factor=2**(res_idx != 0),
                                mode=mode,
                                align_corners=True),
                )
Ejemplo n.º 19
0
    def __init__(
        self,
        num_features: int,
        in_channels: int,
        out_channels: int,
        dropout_prob: float = 0.0,
        act: Union[str, tuple] = ("relu", {
            "inplace": True
        }),
        norm: Union[str, tuple] = "batch",
        drop_first_norm_relu: int = 0,
        kernel_size: int = 3,
    ) -> None:
        """Dense Convolutional Block.

        References:
            Huang, Gao, et al. "Densely connected convolutional networks."
            Proceedings of the IEEE conference on computer vision and
            pattern recognition. 2017.

        Args:
            num_features: number of internal channels used for the layer
            in_channels: number of the input channels.
            out_channels: number of the output channels.
            dropout_prob: dropout rate after each dense layer.
            act: activation type and arguments. Defaults to relu.
            norm: feature normalization type and arguments. Defaults to batch norm.
            drop_first_norm_relu - omits the first norm/relu for the first layer
            kernel_size: size of the kernel for >1 convolutions (dependent on mode)
        """
        super().__init__()

        self.layers = nn.Sequential()
        conv_type: Callable = Conv[Conv.CONV, 2]
        dropout_type: Callable = Dropout[Dropout.DROPOUT, 2]

        if not drop_first_norm_relu:
            self.layers.add_module(
                "preact_norm",
                get_norm_layer(name=norm, spatial_dims=2,
                               channels=in_channels))
            self.layers.add_module("preact_relu", get_act_layer(name=act))

        self.layers.add_module(
            "conv1",
            conv_type(in_channels,
                      num_features,
                      kernel_size=1,
                      padding=0,
                      bias=False))
        self.layers.add_module(
            "norm2",
            get_norm_layer(name=norm, spatial_dims=2, channels=num_features))
        self.layers.add_module("relu2", get_act_layer(name=act))

        if in_channels != 64 and drop_first_norm_relu:
            self.layers.add_module(
                "conv2",
                conv_type(num_features,
                          num_features,
                          kernel_size=kernel_size,
                          stride=2,
                          padding=2,
                          bias=False))
        else:
            self.layers.add_module(
                "conv2",
                conv_type(num_features,
                          num_features,
                          kernel_size=1,
                          padding=0,
                          bias=False))

        self.layers.add_module(
            "norm3",
            get_norm_layer(name=norm, spatial_dims=2, channels=num_features))
        self.layers.add_module("relu3", get_act_layer(name=act))
        self.layers.add_module(
            "conv3",
            conv_type(num_features,
                      out_channels,
                      kernel_size=1,
                      padding=0,
                      bias=False))

        if dropout_prob > 0:
            self.layers.add_module("dropout", dropout_type(dropout_prob))
Ejemplo n.º 20
0
    def __init__(
        self,
        spatial_dims: int,
        in_channels: int,
        out_channels: int,
        init_features: int = 64,
        growth_rate: int = 32,
        block_config: Sequence[int] = (6, 12, 24, 16),
        bn_size: int = 4,
        act: Union[str, tuple] = ("relu", {"inplace": True}),
        norm: Union[str, tuple] = "batch",
        dropout_prob: float = 0.0,
    ) -> None:

        super().__init__()

        conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims]
        pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims]
        avg_pool_type: Type[Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d]] = Pool[
            Pool.ADAPTIVEAVG, spatial_dims
        ]

        self.features = nn.Sequential(
            OrderedDict(
                [
                    ("conv0", conv_type(in_channels, init_features, kernel_size=7, stride=2, padding=3, bias=False)),
                    ("norm0", get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=init_features)),
                    ("relu0", get_act_layer(name=act)),
                    ("pool0", pool_type(kernel_size=3, stride=2, padding=1)),
                ]
            )
        )

        in_channels = init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(
                spatial_dims=spatial_dims,
                layers=num_layers,
                in_channels=in_channels,
                bn_size=bn_size,
                growth_rate=growth_rate,
                dropout_prob=dropout_prob,
                act=act,
                norm=norm,
            )
            self.features.add_module(f"denseblock{i + 1}", block)
            in_channels += num_layers * growth_rate
            if i == len(block_config) - 1:
                self.features.add_module(
                    "norm5", get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels)
                )
            else:
                _out_channels = in_channels // 2
                trans = _Transition(
                    spatial_dims, in_channels=in_channels, out_channels=_out_channels, act=act, norm=norm
                )
                self.features.add_module(f"transition{i + 1}", trans)
                in_channels = _out_channels

        # pooling and classification
        self.class_layers = nn.Sequential(
            OrderedDict(
                [
                    ("relu", get_act_layer(name=act)),
                    ("pool", avg_pool_type(1)),
                    ("flatten", nn.Flatten(1)),
                    ("out", nn.Linear(in_channels, out_channels)),
                ]
            )
        )

        for m in self.modules():
            if isinstance(m, conv_type):
                nn.init.kaiming_normal_(torch.as_tensor(m.weight))
            elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
                nn.init.constant_(torch.as_tensor(m.weight), 1)
                nn.init.constant_(torch.as_tensor(m.bias), 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(torch.as_tensor(m.bias), 0)
Ejemplo n.º 21
0
    def __init__(
        self,
        decode_config: Sequence[int] = (8, 4),
        act: Union[str, tuple] = ("relu", {
            "inplace": True
        }),
        norm: Union[str, tuple] = "batch",
        dropout_prob: float = 0.0,
        out_channels: int = 2,
        kernel_size: int = 3,
    ) -> None:
        """
        Args:
            decode_config: number of layers for each block.
            act: activation type and arguments. Defaults to relu.
            norm: feature normalization type and arguments. Defaults to batch norm.
            dropout_prob: dropout rate after each dense layer.
            num_features: number of internal features used.
            out_channels: number of the output channel.
            kernel_size: size of the kernel for >1 convolutions (dependent on mode)
        """
        super().__init__()
        conv_type: Callable = Conv[Conv.CONV, 2]

        # decode branches
        _in_channels = 1024
        _num_features = 128
        _out_channels = 32

        self.decoder_blocks = nn.Sequential()
        for i, num_layers in enumerate(decode_config):
            block = _DecoderBlock(
                layers=num_layers,
                num_features=_num_features,
                in_channels=_in_channels,
                out_channels=_out_channels,
                dropout_prob=dropout_prob,
                act=act,
                norm=norm,
                kernel_size=kernel_size,
            )
            self.decoder_blocks.add_module(f"decoderblock{i + 1}", block)
            _in_channels = 512

        # output layers
        self.output_features = nn.Sequential()
        _i = len(decode_config)
        _pad_size = (kernel_size - 1) // 2
        _seq_block = nn.Sequential(
            OrderedDict([("conva",
                          conv_type(256,
                                    64,
                                    kernel_size=kernel_size,
                                    stride=1,
                                    bias=False,
                                    padding=_pad_size))]))

        self.output_features.add_module(f"decoderblock{_i + 1}", _seq_block)

        _seq_block = nn.Sequential(
            OrderedDict([
                ("norm", get_norm_layer(name=norm, spatial_dims=2,
                                        channels=64)),
                ("relu", get_act_layer(name=act)),
                ("conv", conv_type(64, out_channels, kernel_size=1, stride=1)),
            ]))

        self.output_features.add_module(f"decoderblock{_i + 2}", _seq_block)

        self.upsample = UpSample(2,
                                 scale_factor=2,
                                 mode=UpsampleMode.NONTRAINABLE,
                                 interp_mode=InterpolateMode.BILINEAR,
                                 bias=False)
Ejemplo n.º 22
0
    def __init__(
        self,
        mode: Mode = Mode.FAST,
        in_channels: int = 3,
        out_classes: int = 0,
        act: Union[str, tuple] = ("relu", {
            "inplace": True
        }),
        norm: Union[str, tuple] = "batch",
        dropout_prob: float = 0.0,
    ) -> None:

        super().__init__()

        self.mode: int = self._mode_to_int(mode)

        if mode not in [self.Mode.ORIGINAL, self.Mode.FAST]:
            raise ValueError(
                "Input size should be 270 x 270 when using Mode.ORIGINAL")

        if out_classes > 128:
            raise ValueError(
                "Number of nuclear types classes exceeds maximum (128)")
        elif out_classes == 1:
            raise ValueError(
                "Number of nuclear type classes should either be None or >1")

        if dropout_prob > 1 or dropout_prob < 0:
            raise ValueError("Dropout can only be in the range 0.0 to 1.0")

        # number of filters in the first convolution layer.
        _init_features: int = 64
        # number of layers in each pooling block.
        _block_config: Sequence[int] = (3, 4, 6, 3)

        if mode == self.Mode.FAST:
            _ksize = 3
            _pad = 3
        else:
            _ksize = 5
            _pad = 0

        conv_type: Type[nn.Conv2d] = Conv[Conv.CONV, 2]

        self.input_features = nn.Sequential(
            OrderedDict([
                (
                    "conv0",
                    conv_type(in_channels,
                              _init_features,
                              kernel_size=7,
                              stride=1,
                              padding=_pad,
                              bias=False),
                ),
                ("norm0",
                 get_norm_layer(name=norm,
                                spatial_dims=2,
                                channels=_init_features)),
                ("relu0", get_act_layer(name=act)),
            ]))

        _in_channels = _init_features
        _out_channels = 256
        _num_features = _init_features

        self.res_blocks = nn.Sequential()

        for i, num_layers in enumerate(_block_config):
            block = _ResidualBlock(
                layers=num_layers,
                num_features=_num_features,
                in_channels=_in_channels,
                out_channels=_out_channels,
                dropout_prob=dropout_prob,
                act=act,
                norm=norm,
            )
            self.res_blocks.add_module(f"residualblock{i + 1}", block)

            _in_channels = _out_channels
            _out_channels *= 2
            _num_features *= 2

        # bottleneck convolution
        self.bottleneck = nn.Sequential()
        self.bottleneck.add_module(
            "conv_bottleneck",
            conv_type(_in_channels,
                      _num_features,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=False))
        self.upsample = UpSample(2,
                                 scale_factor=2,
                                 mode=UpsampleMode.NONTRAINABLE,
                                 interp_mode=InterpolateMode.BILINEAR,
                                 bias=False)

        # decode branches
        self.nucleus_prediction = _DecoderBranch(kernel_size=_ksize)
        self.horizontal_vertical = _DecoderBranch(kernel_size=_ksize)
        self.type_prediction: _DecoderBranch = None  # type: ignore

        if out_classes > 0:
            self.type_prediction = _DecoderBranch(out_channels=out_classes,
                                                  kernel_size=_ksize)

        for m in self.modules():
            if isinstance(m, conv_type):
                nn.init.kaiming_normal_(torch.as_tensor(m.weight))
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(torch.as_tensor(m.weight), 1)
                nn.init.constant_(torch.as_tensor(m.bias), 0)