Ejemplo n.º 1
0
    def __init__(self, in_channels, out_channels, use_coord_conv=True):
        """
        constructor of the class
        :param in_channels: number of input channels
        :param out_channels: number of output channels
        :param use_coord_conv: whether to use coord_conv [default=True]
        """
        from torch.nn import AvgPool2d, LeakyReLU

        if use_coord_conv:
            Conv = CoordConv
        else:
            from torch.nn import Conv2d as Conv

        super().__init__()

        # convolutional modules
        self.conv_1 = Conv(in_channels,
                           in_channels, (3, 3),
                           padding=1,
                           bias=True)
        self.conv_2 = Conv(in_channels,
                           out_channels, (3, 3),
                           padding=1,
                           bias=True)
        self.downSampler = AvgPool2d(2)  # downsampler

        # leaky_relu:
        self.lrelu = LeakyReLU(0.2)
Ejemplo n.º 2
0
    def __init__(self, in_channels, use_coord_conv=True):
        """
        constructor of the class
        :param in_channels: number of input channels
        :param use_coord_conv: whether to use coord_conv [default = True]
        """
        from torch.nn import LeakyReLU
        if use_coord_conv:
            Conv = CoordConv
        else:
            from torch.nn import Conv2d as Conv

        super().__init__()

        # declare the required modules for forward pass
        self.batch_discriminator = MinibatchStdDev()

        # modules required:
        self.conv_1 = Conv(in_channels + 1,
                           in_channels, (3, 3),
                           padding=1,
                           bias=True)
        self.conv_2 = Conv(in_channels, in_channels, (4, 4), bias=True)

        # final conv layer emulates a fully connected layer
        self.conv_3 = Conv(in_channels, 1, (1, 1), bias=True)

        # leaky_relu:
        self.lrelu = LeakyReLU(0.2)
Ejemplo n.º 3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 use_coord_conv=True,
                 use_upsampling=False):
        """
        constructor for the class
        :param in_channels: number of input channels to the block
        :param out_channels: number of output channels required
        :param use_coord_conv: whether to use coord_conv
        :param use_upsampling: whether to use upsampling or to use
                               Transpose Conv.
        """
        from torch.nn import LeakyReLU

        super().__init__()

        if use_coord_conv:
            Conv, ConvT = CoordConv, CoordConvTranspose
        else:
            from torch.nn import Conv2d as Conv, ConvTranspose2d as ConvT
        self.conv_1 = Conv(in_channels,
                           out_channels, (3, 3),
                           padding=1,
                           bias=True)
        self.conv_2 = Conv(out_channels,
                           out_channels, (3, 3),
                           padding=1,
                           bias=True)

        if use_upsampling:
            self.upsampler = ConvT(in_channels, in_channels, (4, 4), stride=2)

        # leaky_relu:
        self.lrelu = LeakyReLU(0.2)
Ejemplo n.º 4
0
 def __init__(self, in_channels: int, out_channels: int, nb_layers = 3, norm_type: str = 'instance'):
     
     """
                                 Discriminator Architecture!
     C64 - C128 - C256 - C512, where Ck denote a Convolution-InstanceNorm-LeakyReLU layer with k filters
     """
     
     """
     Parameters: 
         in_channels:    Number of input channels
         out_channels:   Number of output channels
         nb_layers:      Number of layers in the 70*70 Patch Discriminator
     """
     
     super().__init__(); in_f = 1; out_f = 2; bias = norm_type == 'instance' 
     norm_layer = InstanceNorm if norm_type == "instance" else BatchNorm
     
     
     conv = Conv(in_channels, out_channels, 4, stride = 2, padding = 1, bias = True)
     layers = [conv, nn.LeakyReLU(0.2, True)]
     
     for idx in range(1, nb_layers):
         conv = Conv(out_channels * in_f, out_channels * out_f, 4, stride = 2, padding = 1, bias = bias)
         layers += [conv, norm_layer(out_channels * out_f), nn.LeakyReLU(0.2, True)]
         in_f = out_f; out_f *= 2
     
     out_f = min(2 ** nb_layers, 8)
     conv = Conv(out_channels * in_f, out_channels * out_f, 4, stride = 1, padding = 1, bias = bias)
     layers += [conv, norm_layer(out_channels * out_f), nn.LeakyReLU(0.2, True)]      
     
     conv = Conv(out_channels * out_f, 1, 4, stride = 1, padding = 1, bias = True)
     layers += [conv]
     
     self.net = nn.Sequential(*layers)
Ejemplo n.º 5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 bias=True):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation

        config = Config()
        if config.dim is Dim.ONE:
            from torch.nn import Conv1d as Conv
        elif config.dim is Dim.TWO:
            from torch.nn import Conv2d as Conv
        elif config.dim is Dim.THREE:
            from torch.nn import Conv3d as Conv

        dw_bias = not config.sep_conv_kwargs['norm_between']
        dw = Conv(self.in_channels,
                  self.in_channels,
                  self.kernel_size,
                  padding=self.padding,
                  padding_mode=config.padding_mode,
                  groups=in_channels,
                  bias=dw_bias,
                  dilation=self.dilation,
                  stride=self.stride)
        pw = Conv(self.in_channels, self.out_channels, 1, bias=bias)

        self.add_module('depthwise', dw)
        if config.sep_conv_kwargs['norm_between']:
            self.add_module('norm', create_norm(in_channels))
        if config.sep_conv_kwargs['activ_between']:
            self.add_module('activ', create_activ())
        self.add_module('pointwise', pw)
Ejemplo n.º 6
0
    def __init__(self, in_channels, use_coord_conv=True):
        """
        constructor for the inner class
        :param in_channels: number of input channels to the block
        :param use_coord_conv: whether to use coord_conv or not
        """
        from torch.nn import LeakyReLU
        if use_coord_conv:
            Conv, ConvT = CoordConv, CoordConvTranspose
        else:
            from torch.nn import Conv2d as Conv, ConvTranspose2d as ConvT
        super().__init__()

        self.conv_1 = ConvT(in_channels, in_channels, (4, 4), bias=True)
        self.conv_2 = Conv(in_channels,
                           in_channels, (3, 3),
                           padding=(1, 1),
                           bias=True)

        # leaky_relu:
        self.lrelu = LeakyReLU(0.2)
Ejemplo n.º 7
0
def create_conv(in_channels, out_channels, kernel_size, **kwargs):
    """Creates a convolutional layer.  
    Note:
        This function supports creating a 2D or 3D convolutional layer
        configured by :meth:`pytorch_layers.Config.dim`.

    Note:
        The function passes all keyword arguments directly to the Conv class.
        Check pytorch documentation for all keyword arguments (``bias``, for
        example).

    Args:
        in_channels (int): The number of input channels.
        out_channels (int): The number of output channels.
        kernel_size (int): The size of kernel.

    Returns:
        torch.nn.Module: The created convolutional layer.

    """
    if 'padding_mode' in kwargs:
        message = ('"padding_mode" is ignored when creating conv. '
                   'Use pytorch_layers.Config to change it.')
        warnings.warn(message, RuntimeWarning, stacklevel=2)
        kwargs.pop('padding_mode')
    
    config = Config()

    if config.dim is Dim.ONE:
        from torch.nn import Conv1d as Conv
    elif config.dim is Dim.TWO:
        from torch.nn import Conv2d as Conv
    elif config.dim is Dim.THREE:
        from torch.nn import Conv3d as Conv

    model = Conv(in_channels, out_channels, kernel_size,
                 padding_mode=config.padding_mode, **kwargs)

    return  model
Ejemplo n.º 8
0
    def __init__(self,
                 input_channels: int,
                 inner_channels: int,
                 innermost: bool = False,
                 outermost: bool = False,
                 apply_dp: bool = False,
                 submodule=None,
                 add_skip_conn: bool = True,
                 norm_type: str = 'instance'):
        """Defines a Unet submodule with/without skip connection!
        X -----------------identity(optional)--------------------
        |-- downsampling -- |submodule| -- upsampling --|
        """
        """
        Parameters: 
            input_channels: Number of output channels in the DeConvolutional layer
            inner_channels: Number of output channels in the Convolutional layer
            innermost:      If this module is the innermost module
            outermost:      If this module is the outermost module
            apply_dp:       If apply_dp is set to True, then activations are 0'ed out with prob 0.5
            submodule:      Previously defined UNet submodule
            add_skip_conn:  If set to true, skip connections are added b/w Encoder and Decoder
            norm_type:      Type of Normalization layer - InstanceNorm2D or BatchNorm2D
        """

        super().__init__()

        self.outermost = outermost
        self.add_skip_conn = add_skip_conn

        bias = norm_type == 'instance'
        f = 2 if add_skip_conn else 1
        norm_layer = InstanceNorm if norm_type == 'instance' else BatchNorm

        if innermost:
            dn_conv = Conv(in_channels=input_channels,
                           out_channels=inner_channels,
                           kernel_size=4,
                           stride=2,
                           padding=1,
                           bias=True,
                           padding_mode='zeros')
            up_conv = Deconv(in_channels=inner_channels,
                             out_channels=input_channels,
                             kernel_size=4,
                             stride=2,
                             padding=1,
                             bias=bias,
                             padding_mode='zeros')

            dn_layers = [nn.LeakyReLU(0.2, True), dn_conv]
            up_layers = [nn.ReLU(True), up_conv, norm_layer(input_channels)]
            layers = dn_layers + up_layers

        elif outermost:
            dn_conv = Conv(in_channels=1 * input_channels,
                           out_channels=inner_channels,
                           kernel_size=4,
                           stride=2,
                           padding=1,
                           bias=True,
                           padding_mode='zeros')
            up_conv = Deconv(in_channels=f * inner_channels,
                             out_channels=input_channels,
                             kernel_size=4,
                             stride=2,
                             padding=1,
                             bias=True,
                             padding_mode='zeros')

            dn_layers = [dn_conv]
            up_layers = [nn.ReLU(True), up_conv, nn.Tanh()]
            layers = dn_layers + [submodule] + up_layers

        else:
            dn_conv = Conv(in_channels=1 * input_channels,
                           out_channels=inner_channels,
                           kernel_size=4,
                           stride=2,
                           padding=1,
                           bias=bias,
                           padding_mode='zeros')
            up_conv = Deconv(in_channels=f * inner_channels,
                             out_channels=input_channels,
                             kernel_size=4,
                             stride=2,
                             padding=1,
                             bias=bias,
                             padding_mode='zeros')

            dn_layers = [
                nn.LeakyReLU(0.2, True), dn_conv,
                norm_layer(inner_channels)
            ]
            up_layers = [nn.ReLU(True), up_conv, norm_layer(input_channels)]

            if apply_dp:
                layers = dn_layers + [submodule
                                      ] + up_layers + [nn.Dropout(0.5)]
            else:
                layers = dn_layers + [submodule] + up_layers

        self.net = nn.Sequential(*layers)