コード例 #1
0
    def _get_up_layer(self, in_channels, out_channels, strides, is_top):
        conv = Convolution(self.dimensions,
                           in_channels,
                           out_channels,
                           strides,
                           self.up_kernel_size,
                           self.act,
                           self.norm,
                           self.dropout,
                           conv_only=is_top and self.num_res_units == 0,
                           is_transposed=True)

        if self.num_res_units > 0:
            ru = ResidualUnit(self.dimensions,
                              out_channels,
                              out_channels,
                              1,
                              self.kernel_size,
                              1,
                              self.act,
                              self.norm,
                              self.dropout,
                              last_conv_only=is_top)
            return nn.Sequential(conv, ru)
        else:
            return conv
コード例 #2
0
ファイル: unet.py プロジェクト: lsho76/MONAI
 def _get_down_layer(self, in_channels: int, out_channels: int, strides: int, is_top: bool) -> nn.Module:
     """
     Args:
         in_channels: number of input channels.
         out_channels: number of output channels.
         strides: convolution stride.
         is_top: True if this is the top block.
     """
     if self.num_res_units > 0:
         return ResidualUnit(
             self.dimensions,
             in_channels,
             out_channels,
             strides=strides,
             kernel_size=self.kernel_size,
             subunits=self.num_res_units,
             act=self.act,
             norm=self.norm,
             dropout=self.dropout,
         )
     else:
         return Convolution(
             self.dimensions,
             in_channels,
             out_channels,
             strides=strides,
             kernel_size=self.kernel_size,
             act=self.act,
             norm=self.norm,
             dropout=self.dropout,
         )
コード例 #3
0
 def _get_down_layer(self, in_channels: int, out_channels: int,
                     strides: int, kernel_size: int, is_top: bool):
     if self.num_res_units > 0:
         return ResidualUnit(
             self.dimensions,
             in_channels,
             out_channels,
             strides=strides,
             kernel_size=kernel_size,
             subunits=self.num_res_units,
             act=self.act,
             norm=self.norm,
             dropout=self.dropout,
         )
     else:
         return Convolution(
             self.dimensions,
             in_channels,
             out_channels,
             strides=strides,
             kernel_size=kernel_size,
             act=self.act,
             norm=self.norm,
             dropout=self.dropout,
         )
コード例 #4
0
    def _get_up_layer(self, in_channels: int, out_channels: int, strides: int,
                      kernel_size: int, is_top: bool):
        conv = Convolution(
            self.dimensions,
            in_channels,
            out_channels,
            strides=strides,
            kernel_size=kernel_size,
            act=self.act,
            norm=self.norm,
            dropout=self.dropout,
            conv_only=is_top and self.num_res_units == 0,
            is_transposed=True,
        )

        if self.num_res_units > 0:
            ru = ResidualUnit(
                self.dimensions,
                out_channels,
                out_channels,
                strides=1,
                kernel_size=kernel_size,
                subunits=1,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                last_conv_only=is_top,
            )
            return nn.Sequential(conv, ru)
        else:
            return conv
コード例 #5
0
 def _get_down_layer(self, in_channels, out_channels, strides, is_top):
     if self.num_res_units > 0:
         return ResidualUnit(self.dimensions, in_channels, out_channels,
                             strides, self.kernel_size, self.num_res_units,
                             self.act, self.norm, self.dropout)
     else:
         return Convolution(self.dimensions, in_channels, out_channels,
                            strides, self.kernel_size, self.act, self.norm,
                            self.dropout)
コード例 #6
0
ファイル: unet.py プロジェクト: Nic-Ma/MONAI
    def _get_up_layer(self, in_channels: int, out_channels: int, strides: int, is_top: bool) -> nn.Module:
        """
        Returns the decoding (up) part of a layer of the network. This typically will upsample data at some point
        in its structure. Its output is used as input to the next layer up.

        Args:
            in_channels: number of input channels.
            out_channels: number of output channels.
            strides: convolution stride.
            is_top: True if this is the top block.
        """
        conv: Union[Convolution, nn.Sequential]

        conv = Convolution(
            self.dimensions,
            in_channels,
            out_channels,
            strides=strides,
            kernel_size=self.up_kernel_size,
            act=self.act,
            norm=self.norm,
            dropout=self.dropout,
            bias=self.bias,
            conv_only=is_top and self.num_res_units == 0,
            is_transposed=True,
            adn_ordering=self.adn_ordering,
        )

        if self.num_res_units > 0:
            ru = ResidualUnit(
                self.dimensions,
                out_channels,
                out_channels,
                strides=1,
                kernel_size=self.kernel_size,
                subunits=1,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                bias=self.bias,
                last_conv_only=is_top,
                adn_ordering=self.adn_ordering,
            )
            conv = nn.Sequential(conv, ru)

        return conv
コード例 #7
0
    def _get_up_layer(self, in_channels, out_channels, kernel_size, is_top):

        if self.num_res_units > 0:
            ru = ResidualUnit(
                self.dimensions,
                in_channels,
                out_channels,
                strides=1,
                kernel_size=kernel_size,
                subunits=1,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                last_conv_only=is_top,
            )
            return ru  # nn.Sequential(ru)
        else:
            return nn.Sequential()
コード例 #8
0
    def _get_up_layer(self, in_channels: int, out_channels: int, strides: int,
                      is_top: bool) -> nn.Module:
        """
        Args:
            in_channels: number of input channels.
            out_channels: number of output channels.
            strides: convolution stride.
            is_top: True if this is the top block.
        """
        conv: Union[Convolution, nn.Sequential]

        conv = Convolution(
            self.dimensions,
            in_channels,
            out_channels,
            strides=strides,
            kernel_size=self.up_kernel_size,
            act=self.act,
            norm=self.norm,
            evonorm=self.evonorm,
            dropout=self.dropout,
            conv_only=is_top and self.num_res_units == 0,
            is_transposed=True,
        )

        if self.num_res_units > 0:
            ru = ResidualUnit(
                self.dimensions,
                out_channels,
                out_channels,
                strides=1,
                kernel_size=self.kernel_size,
                subunits=1,
                act=self.act,
                norm=self.norm,
                evonorm=self.evonorm,
                dropout=self.dropout,
                last_conv_only=is_top,
            )
            conv = nn.Sequential(conv, ru)

        return conv
コード例 #9
0
ファイル: unet.py プロジェクト: Nic-Ma/MONAI
    def _get_down_layer(self, in_channels: int, out_channels: int, strides: int, is_top: bool) -> nn.Module:
        """
        Returns the encoding (down) part of a layer of the network. This typically will downsample data at some point
        in its structure. Its output is used as input to the next layer down and is concatenated with output from the
        next layer to form the input for the decode (up) part of the layer.

        Args:
            in_channels: number of input channels.
            out_channels: number of output channels.
            strides: convolution stride.
            is_top: True if this is the top block.
        """
        mod: nn.Module
        if self.num_res_units > 0:

            mod = ResidualUnit(
                self.dimensions,
                in_channels,
                out_channels,
                strides=strides,
                kernel_size=self.kernel_size,
                subunits=self.num_res_units,
                act=self.act,
                norm=self.norm,
                dropout=self.dropout,
                bias=self.bias,
                adn_ordering=self.adn_ordering,
            )
            return mod
        mod = Convolution(
            self.dimensions,
            in_channels,
            out_channels,
            strides=strides,
            kernel_size=self.kernel_size,
            act=self.act,
            norm=self.norm,
            dropout=self.dropout,
            bias=self.bias,
            adn_ordering=self.adn_ordering,
        )
        return mod
コード例 #10
0
 def test_dropout1(self):
     conv = ResidualUnit(2, 1, self.output_channels, dropout=0.15)
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[0], self.im_shape[1])
     self.assertEqual(out.shape, expected_shape)
コード例 #11
0
 def test_stride1(self):
     conv = ResidualUnit(2, 1, self.output_channels, strides=2)
     out = conv(self.imt)
     expected_shape = (1, self.output_channels, self.im_shape[0] // 2, self.im_shape[1] // 2)
     self.assertEqual(out.shape, expected_shape)