def __init__( self, spatial_dims: int, in_channels: int, out_channels: int, kernel_size: int, norm_type: Optional[Union[Normalisation, str]] = None, acti_type: Optional[Union[Activation, str]] = None, dropout_prob: Optional[float] = None, ): super(ConvNormActi, self).__init__() layers = nn.ModuleList() conv_type = Conv[Conv.CONV, spatial_dims] padding_size = same_padding(kernel_size) conv = conv_type(in_channels, out_channels, kernel_size, padding=padding_size) layers.append(conv) if norm_type is not None: norm_type = Normalisation(norm_type) layers.append( SUPPORTED_NORM[norm_type](spatial_dims)(out_channels)) if acti_type is not None: acti_type = Activation(acti_type) layers.append(SUPPORTED_ACTI[acti_type](inplace=True)) if dropout_prob is not None: dropout_type = Dropout[Dropout.DROPOUT, spatial_dims] layers.append(dropout_type(p=dropout_prob)) self.layers = nn.Sequential(*layers)
def __init__(self, spatial_dims: int, sigma, truncated: float = 4.0): """ Args: spatial_dims: number of spatial dimensions of the input image. must have shape (Batch, channels, H[, W, ...]). sigma (float or sequence of floats): std. truncated: spreads how many stds. """ super().__init__() self.spatial_dims = int(spatial_dims) _sigma = ensure_tuple_rep(sigma, self.spatial_dims) self.kernel = [ torch.nn.Parameter( torch.as_tensor(gaussian_1d(s, truncated), dtype=torch.float), False) for s in _sigma ] self.padding = [same_padding(k.size()[0]) for k in self.kernel] self.conv_n = [F.conv1d, F.conv2d, F.conv3d][spatial_dims - 1] for idx, param in enumerate(self.kernel): self.register_parameter(f"kernel_{idx}", param)
def __init__( self, in_shape, out_shape, channels, strides, kernel_size=3, num_res_units=2, act=Act.PRELU, norm=Norm.INSTANCE, dropout=None, bias=True, ): """ Construct the regressor network with the number of layers defined by `channels` and `strides`. Inputs are first passed through the convolutional layers in the forward pass, the output from this is then pass through a fully connected layer to relate them to the final output tensor. Args: in_shape: tuple of integers stating the dimension of the input tensor (minus batch dimension) out_shape: tuple of integers stating the dimension of the final output tensor channels: tuple of integers stating the output channels of each convolutional layer strides: tuple of integers stating the stride (downscale factor) of each convolutional layer kernel_size: integer or tuple of integers stating size of convolutional kernels num_res_units: integer stating number of convolutions in residual units, 0 means no residual units act: name or type defining activation layers norm: name or type defining normalization layers dropout: optional float value in range [0, 1] stating dropout probability for layers, None for no dropout bias: boolean stating if convolution layers should have a bias component """ super().__init__() self.in_channels, *self.in_shape = in_shape self.dimensions = len(self.in_shape) self.channels = channels self.strides = strides self.out_shape = out_shape self.kernel_size = kernel_size self.num_res_units = num_res_units self.act = act self.norm = norm self.dropout = dropout self.bias = bias self.net = nn.Sequential() echannel = self.in_channels padding = same_padding(kernel_size) self.final_size = np.asarray(self.in_shape, np.int) self.reshape = Reshape(*self.out_shape) # encode stage for i, (c, s) in enumerate(zip(self.channels, self.strides)): layer = self._get_layer(echannel, c, s, i == len(channels) - 1) echannel = c # use the output channel number as the input for the next loop self.net.add_module("layer_%i" % i, layer) self.final_size = calculate_out_shape(self.final_size, kernel_size, s, padding) self.final = self._get_final_layer((echannel, ) + self.final_size)
def __init__( self, spatial_dims: int, in_channels: int, out_channels: int, kernels=(3, 3), dilation=1, norm_type: Union[Normalisation, str] = Normalisation.INSTANCE, acti_type: Union[Activation, str] = Activation.RELU, channel_matching: Union[ChannelMatching, str] = ChannelMatching.PAD, ): """ Args: kernels (list of int): each integer k in `kernels` corresponds to a convolution layer with kernel size k. norm_type: {``"batch"``, ``"instance"``} Feature normalisation with batchnorm or instancenorm. Defaults to ``"instance"``. acti_type: {``"relu"``, ``"prelu"``, ``"relu6"``} Non-linear activation using ReLU or PReLU. Defaults to ``"relu"``. channel_matching: {``"pad"``, ``"project"``} Specifies handling residual branch and conv branch channel mismatches. Defaults to ``"pad"``. - ``"pad"``: with zero padding. - ``"project"``: with a trainable conv with kernel size. Raises: ValueError: channel matching must be pad or project, got {channel_matching}. ValueError: in_channels > out_channels is incompatible with `channel_matching=pad`. """ super(HighResBlock, self).__init__() conv_type = Conv[Conv.CONV, spatial_dims] norm_type = Normalisation(norm_type) acti_type = Activation(acti_type) self.project, self.pad = None, None if in_channels != out_channels: channel_matching = ChannelMatching(channel_matching) if channel_matching == ChannelMatching.PROJECT: self.project = conv_type(in_channels, out_channels, kernel_size=1) if channel_matching == ChannelMatching.PAD: if in_channels > out_channels: raise ValueError( "in_channels > out_channels is incompatible with `channel_matching=pad`." ) pad_1 = (out_channels - in_channels) // 2 pad_2 = out_channels - in_channels - pad_1 pad = [0, 0] * spatial_dims + [pad_1, pad_2] + [0, 0] self.pad = lambda input: F.pad(input, pad) layers = nn.ModuleList() _in_chns, _out_chns = in_channels, out_channels for kernel_size in kernels: layers.append(SUPPORTED_NORM[norm_type](spatial_dims)(_in_chns)) layers.append(SUPPORTED_ACTI[acti_type](inplace=True)) layers.append( conv_type(_in_chns, _out_chns, kernel_size, padding=same_padding(kernel_size, dilation), dilation=dilation)) _in_chns = _out_chns self.layers = nn.Sequential(*layers)
def __init__( self, dimensions, in_channels, out_channels, strides=1, kernel_size=3, subunits=2, act=Act.PRELU, norm=Norm.INSTANCE, dropout=None, dilation=1, bias=True, last_conv_only=False, ) -> None: super().__init__() self.dimensions = dimensions self.in_channels = in_channels self.out_channels = out_channels self.conv = nn.Sequential() self.residual = nn.Identity() padding = same_padding(kernel_size, dilation) schannels = in_channels sstrides = strides subunits = max(1, subunits) for su in range(subunits): conv_only = last_conv_only and su == (subunits - 1) unit = Convolution( dimensions, schannels, out_channels, sstrides, kernel_size, act, norm, dropout, dilation, bias, conv_only, ) self.conv.add_module(f"unit{su:d}", unit) # after first loop set channels and strides to what they should be for subsequent units schannels = out_channels sstrides = 1 # apply convolution to input to change number of output channels and size to match that coming from self.conv if np.prod(strides) != 1 or in_channels != out_channels: rkernel_size = kernel_size rpadding = padding if np.prod( strides ) == 1: # if only adapting number of channels a 1x1 kernel is used with no padding rkernel_size = 1 rpadding = 0 conv_type = Conv[Conv.CONV, dimensions] self.residual = conv_type(in_channels, out_channels, rkernel_size, strides, rpadding, bias=bias)
def __init__( self, dimensions, in_channels, out_channels, strides=1, kernel_size=3, act=Act.PRELU, norm=Norm.INSTANCE, dropout=None, dilation=1, bias=True, conv_only=False, is_transposed=False, ) -> None: super().__init__() self.dimensions = dimensions self.in_channels = in_channels self.out_channels = out_channels self.is_transposed = is_transposed padding = same_padding(kernel_size, dilation) conv_type = Conv[Conv.CONVTRANS if is_transposed else Conv.CONV, dimensions] # define the normalisation type and the arguments to the constructor norm_name, norm_args = split_args(norm) norm_type = Norm[norm_name, dimensions] # define the activation type and the arguments to the constructor act_name, act_args = split_args(act) act_type = Act[act_name] if dropout: # if dropout was specified simply as a p value, use default name and make a keyword map with the value if isinstance(dropout, (int, float)): drop_name = Dropout.DROPOUT drop_args = {"p": dropout} else: drop_name, drop_args = split_args(dropout) drop_type = Dropout[drop_name, dimensions] if is_transposed: conv = conv_type(in_channels, out_channels, kernel_size, strides, padding, strides - 1, 1, bias, dilation) else: conv = conv_type(in_channels, out_channels, kernel_size, strides, padding, dilation, bias=bias) self.add_module("conv", conv) if not conv_only: self.add_module("norm", norm_type(out_channels, **norm_args)) if dropout: self.add_module("dropout", drop_type(**drop_args)) self.add_module("act", act_type(**act_args))