コード例 #1
0
    def __init__(
        self,
        spatial_dims: int,
        in_chns: int,
        cat_chns: int,
        out_chns: int,
        act: Union[str, tuple],
        norm: Union[str, tuple],
        bias: bool,
        dropout: Union[float, tuple] = 0.0,
        upsample: str = "deconv",
        pre_conv: Optional[Union[nn.Module, str]] = "default",
        interp_mode: str = "linear",
        align_corners: Optional[bool] = True,
        halves: bool = True,
        dim: Optional[int] = None,
    ):
        """
        Args:
            spatial_dims: number of spatial dimensions.
            in_chns: number of input channels to be upsampled.
            cat_chns: number of channels from the decoder.
            out_chns: number of output channels.
            act: activation type and arguments.
            norm: feature normalization type and arguments.
            bias: whether to have a bias term in convolution blocks.
            dropout: dropout ratio. Defaults to no dropout.
            upsample: upsampling mode, available options are
                ``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``.
            pre_conv: a conv block applied before upsampling.
                Only used in the "nontrainable" or "pixelshuffle" mode.
            interp_mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``}
                Only used in the "nontrainable" mode.
            align_corners: set the align_corners parameter for upsample. Defaults to True.
                Only used in the "nontrainable" mode.
            halves: whether to halve the number of channels during upsampling.
                This parameter does not work on ``nontrainable`` mode if ``pre_conv`` is `None`.

        .. deprecated:: 0.6.0
            ``dim`` is deprecated, use ``spatial_dims`` instead.
        """
        super().__init__()
        if dim is not None:
            spatial_dims = dim
        if upsample == "nontrainable" and pre_conv is None:
            up_chns = in_chns
        else:
            up_chns = in_chns // 2 if halves else in_chns
        self.upsample = UpSample(
            spatial_dims,
            in_chns,
            up_chns,
            2,
            mode=upsample,
            pre_conv=pre_conv,
            interp_mode=interp_mode,
            align_corners=align_corners,
        )
        self.convs = TwoConv(spatial_dims, cat_chns + up_chns, out_chns, act,
                             norm, bias, dropout)
コード例 #2
0
    def __init__(
        self,
        dim: int,
        in_chns: int,
        cat_chns: int,
        out_chns: int,
        act: Union[str, tuple],
        norm: Union[str, tuple],
        dropout: Union[float, tuple] = 0.0,
        upsample: str = "deconv",
        halves: bool = True,
    ):
        """
        Args:
            dim: number of spatial dimensions.
            in_chns: number of input channels to be upsampled.
            cat_chns: number of channels from the decoder.
            out_chns: number of output channels.
            act: activation type and arguments.
            norm: feature normalization type and arguments.
            dropout: dropout ratio. Defaults to no dropout.
            upsample: upsampling mode, available options are
                ``"deconv"``, ``"pixelshuffle"``, ``"nontrainable"``.
            halves: whether to halve the number of channels during upsampling.
        """
        super().__init__()

        up_chns = in_chns // 2 if halves else in_chns
        self.upsample = UpSample(dim, in_chns, up_chns, 2, mode=upsample)
        self.convs = TwoConv(dim, cat_chns + up_chns, out_chns, act, norm,
                             dropout)
コード例 #3
0
ファイル: coplenet.py プロジェクト: yaritzabg/MONAI
 def __init__(self,
              in_channels1,
              in_channels2,
              out_channels,
              bilinear=True,
              dropout_p=0.5,
              spatial_dims: int = 2):
     super().__init__()
     self.up = UpSample(spatial_dims,
                        in_channels1,
                        in_channels2,
                        scale_factor=2,
                        with_conv=not bilinear)
     self.conv = ConvBNActBlock(in_channels2 * 2,
                                out_channels,
                                dropout_p,
                                spatial_dims=spatial_dims)
コード例 #4
0
ファイル: seg_model.py プロジェクト: wyli/tutorials
    def __init__(
        self,
        dim: int,
        in_channels: int,
        out_channels: int,
        kernel_size: int = 3,
        act: Optional[Union[Tuple, str]] = None,
        scale_factor: float = 1.0,
    ):
        """
        Segmentation head.
        This class refers to `segmentation_models.pytorch
        <https://github.com/qubvel/segmentation_models.pytorch>`_.

        Args:
            dim: number of spatial dimensions.
            in_channels: number of input channels for the block.
            out_channels: number of output channels for the block.
            kernel_size: kernel size for the conv layer.
            act: activation type and arguments.
            scale_factor: multiplier for spatial size. Has to match input size if it is a tuple.

        """
        conv_layer = Conv[Conv.CONV, dim](
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            padding=kernel_size // 2,
        )
        up_layer: nn.Module = nn.Identity()
        if scale_factor > 1.0:
            up_layer = UpSample(
                spatial_dims=dim,
                scale_factor=scale_factor,
                mode="nontrainable",
                pre_conv=None,
                interp_mode=InterpolateMode.LINEAR,
            )
        if act is not None:
            act_layer = get_act_layer(act)
        else:
            act_layer = nn.Identity()
        super().__init__(conv_layer, up_layer, act_layer)
コード例 #5
0
 def test_shape(self, input_param, input_shape, expected_shape):
     net = UpSample(**input_param)
     with eval_mode(net):
         result = net(torch.randn(input_shape))
         self.assertEqual(result.shape, expected_shape)
コード例 #6
0
ファイル: hovernet.py プロジェクト: Nic-Ma/MONAI
    def __init__(
        self,
        mode: Mode = Mode.FAST,
        in_channels: int = 3,
        out_classes: int = 0,
        act: Union[str, tuple] = ("relu", {
            "inplace": True
        }),
        norm: Union[str, tuple] = "batch",
        dropout_prob: float = 0.0,
    ) -> None:

        super().__init__()

        self.mode: int = self._mode_to_int(mode)

        if mode not in [self.Mode.ORIGINAL, self.Mode.FAST]:
            raise ValueError(
                "Input size should be 270 x 270 when using Mode.ORIGINAL")

        if out_classes > 128:
            raise ValueError(
                "Number of nuclear types classes exceeds maximum (128)")
        elif out_classes == 1:
            raise ValueError(
                "Number of nuclear type classes should either be None or >1")

        if dropout_prob > 1 or dropout_prob < 0:
            raise ValueError("Dropout can only be in the range 0.0 to 1.0")

        # number of filters in the first convolution layer.
        _init_features: int = 64
        # number of layers in each pooling block.
        _block_config: Sequence[int] = (3, 4, 6, 3)

        if mode == self.Mode.FAST:
            _ksize = 3
            _pad = 3
        else:
            _ksize = 5
            _pad = 0

        conv_type: Type[nn.Conv2d] = Conv[Conv.CONV, 2]

        self.input_features = nn.Sequential(
            OrderedDict([
                (
                    "conv0",
                    conv_type(in_channels,
                              _init_features,
                              kernel_size=7,
                              stride=1,
                              padding=_pad,
                              bias=False),
                ),
                ("norm0",
                 get_norm_layer(name=norm,
                                spatial_dims=2,
                                channels=_init_features)),
                ("relu0", get_act_layer(name=act)),
            ]))

        _in_channels = _init_features
        _out_channels = 256
        _num_features = _init_features

        self.res_blocks = nn.Sequential()

        for i, num_layers in enumerate(_block_config):
            block = _ResidualBlock(
                layers=num_layers,
                num_features=_num_features,
                in_channels=_in_channels,
                out_channels=_out_channels,
                dropout_prob=dropout_prob,
                act=act,
                norm=norm,
            )
            self.res_blocks.add_module(f"residualblock{i + 1}", block)

            _in_channels = _out_channels
            _out_channels *= 2
            _num_features *= 2

        # bottleneck convolution
        self.bottleneck = nn.Sequential()
        self.bottleneck.add_module(
            "conv_bottleneck",
            conv_type(_in_channels,
                      _num_features,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=False))
        self.upsample = UpSample(2,
                                 scale_factor=2,
                                 mode=UpsampleMode.NONTRAINABLE,
                                 interp_mode=InterpolateMode.BILINEAR,
                                 bias=False)

        # decode branches
        self.nucleus_prediction = _DecoderBranch(kernel_size=_ksize)
        self.horizontal_vertical = _DecoderBranch(kernel_size=_ksize)
        self.type_prediction: _DecoderBranch = None  # type: ignore

        if out_classes > 0:
            self.type_prediction = _DecoderBranch(out_channels=out_classes,
                                                  kernel_size=_ksize)

        for m in self.modules():
            if isinstance(m, conv_type):
                nn.init.kaiming_normal_(torch.as_tensor(m.weight))
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(torch.as_tensor(m.weight), 1)
                nn.init.constant_(torch.as_tensor(m.bias), 0)
コード例 #7
0
ファイル: hovernet.py プロジェクト: Nic-Ma/MONAI
    def __init__(
        self,
        decode_config: Sequence[int] = (8, 4),
        act: Union[str, tuple] = ("relu", {
            "inplace": True
        }),
        norm: Union[str, tuple] = "batch",
        dropout_prob: float = 0.0,
        out_channels: int = 2,
        kernel_size: int = 3,
    ) -> None:
        """
        Args:
            decode_config: number of layers for each block.
            act: activation type and arguments. Defaults to relu.
            norm: feature normalization type and arguments. Defaults to batch norm.
            dropout_prob: dropout rate after each dense layer.
            num_features: number of internal features used.
            out_channels: number of the output channel.
            kernel_size: size of the kernel for >1 convolutions (dependent on mode)
        """
        super().__init__()
        conv_type: Callable = Conv[Conv.CONV, 2]

        # decode branches
        _in_channels = 1024
        _num_features = 128
        _out_channels = 32

        self.decoder_blocks = nn.Sequential()
        for i, num_layers in enumerate(decode_config):
            block = _DecoderBlock(
                layers=num_layers,
                num_features=_num_features,
                in_channels=_in_channels,
                out_channels=_out_channels,
                dropout_prob=dropout_prob,
                act=act,
                norm=norm,
                kernel_size=kernel_size,
            )
            self.decoder_blocks.add_module(f"decoderblock{i + 1}", block)
            _in_channels = 512

        # output layers
        self.output_features = nn.Sequential()
        _i = len(decode_config)
        _pad_size = (kernel_size - 1) // 2
        _seq_block = nn.Sequential(
            OrderedDict([("conva",
                          conv_type(256,
                                    64,
                                    kernel_size=kernel_size,
                                    stride=1,
                                    bias=False,
                                    padding=_pad_size))]))

        self.output_features.add_module(f"decoderblock{_i + 1}", _seq_block)

        _seq_block = nn.Sequential(
            OrderedDict([
                ("norm", get_norm_layer(name=norm, spatial_dims=2,
                                        channels=64)),
                ("relu", get_act_layer(name=act)),
                ("conv", conv_type(64, out_channels, kernel_size=1, stride=1)),
            ]))

        self.output_features.add_module(f"decoderblock{_i + 2}", _seq_block)

        self.upsample = UpSample(2,
                                 scale_factor=2,
                                 mode=UpsampleMode.NONTRAINABLE,
                                 interp_mode=InterpolateMode.BILINEAR,
                                 bias=False)
コード例 #8
0
 def test_shape(self, input_param, input_data, expected_shape):
     net = UpSample(**input_param)
     net.eval()
     with torch.no_grad():
         result = net(input_data)
         self.assertEqual(result.shape, expected_shape)
コード例 #9
0
ファイル: unet_pipe.py プロジェクト: yaritzabg/MONAI
    def __init__(self,
                 spatial_dims: int,
                 in_channels: int,
                 out_channels: int,
                 n_feat: int = 32,
                 depth: int = 4):
        """
        A UNet-like architecture for model parallelism.

        Args:
            spatial_dims: number of input spatial dimensions,
                2 for (B, in_channels, H, W), 3 for (B, in_channels, H, W, D).
            in_channels: number of input channels.
            out_channels: number of output channels.
            n_feat: number of features in the first convolution.
            depth: number of downsampling stages.
        """
        super(UNetPipe, self).__init__()
        n_enc_filter: List[int] = [n_feat]
        for i in range(1, depth + 1):
            n_enc_filter.append(min(n_enc_filter[-1] * 2, 1024))
        namespaces = [Namespace() for _ in range(depth)]

        # construct the encoder
        encoder_layers: List[nn.Module] = []
        init_conv = Convolution(
            spatial_dims,
            in_channels,
            n_enc_filter[0],
            strides=2,
            act=Act.LEAKYRELU,
            norm=Norm.BATCH,
            bias=False,
        )
        encoder_layers.append(
            nn.Sequential(
                OrderedDict([(
                    "Conv",
                    init_conv,
                ), ("skip", Stash().isolate(namespaces[0]))])))
        for i in range(1, depth + 1):
            down_conv = DoubleConv(spatial_dims, n_enc_filter[i - 1],
                                   n_enc_filter[i])
            if i == depth:
                layer_dict = OrderedDict([("Down", down_conv)])
            else:
                layer_dict = OrderedDict([("Down", down_conv),
                                          ("skip",
                                           Stash().isolate(namespaces[i]))])
            encoder_layers.append(nn.Sequential(layer_dict))
        encoder = nn.Sequential(*encoder_layers)

        # construct the decoder
        decoder_layers: List[nn.Module] = []
        for i in reversed(range(1, depth + 1)):
            in_ch, out_ch = n_enc_filter[i], n_enc_filter[i - 1]
            layer_dict = OrderedDict([
                ("Up", UpSample(spatial_dims, in_ch, out_ch, 2, True)),
                ("skip", PopCat().isolate(namespaces[i - 1])),
                ("Conv1x1x1", Conv[Conv.CONV, spatial_dims](out_ch * 2,
                                                            in_ch,
                                                            kernel_size=1)),
                ("Conv",
                 DoubleConv(spatial_dims,
                            in_ch,
                            out_ch,
                            stride=1,
                            conv_only=True)),
            ])
            decoder_layers.append(nn.Sequential(layer_dict))
        in_ch = min(n_enc_filter[0] // 2, 32)
        layer_dict = OrderedDict([
            ("Up", UpSample(spatial_dims, n_feat, in_ch, 2, True)),
            ("RELU", Act[Act.LEAKYRELU](inplace=False)),
            (
                "out",
                Conv[Conv.CONV, spatial_dims](in_ch,
                                              out_channels,
                                              kernel_size=3,
                                              padding=1),
            ),
        ])
        decoder_layers.append(nn.Sequential(layer_dict))
        decoder = nn.Sequential(*decoder_layers)

        # making a sequential model
        self.add_module("encoder", encoder)
        self.add_module("decoder", decoder)

        for m in self.modules():
            if isinstance(m, Conv[Conv.CONV, spatial_dims]):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, Norm[Norm.BATCH, spatial_dims]):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, Conv[Conv.CONVTRANS, spatial_dims]):
                nn.init.kaiming_normal_(m.weight)