Beispiel #1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 padding=0,
                 bias=True,
                 pad_type='zero',
                 norm='none',
                 activation='relu'):
        super(upconv_block, self).__init__()

        self.deconv = nn.ConvTranspose2d(in_channels, out_channels, 4, 2, 1)
        self.act = _activation('relu')
        self.norm = _norm('in', out_channels)

        self.conv = conv_block(out_channels,
                               out_channels,
                               kernel_size,
                               stride,
                               bias=bias,
                               padding=padding,
                               pad_type=pad_type,
                               norm=norm,
                               activation=activation)
Beispiel #2
0
    def __init__(self, layer, buffer, in_channels, out_channels, kernel_size=3, stride=2, dilation=1, groups=1,
                 bias=True, padding=1, output_padding=1, norm='in', activation='elu', pad_type='zero'):
        """

        :param layer:
        :param buffer:
        :param in_channels:
        :param out_channels:
        :param kernel_size:
        :param stride:
        :param dilation:
        :param groups:
        :param bias:
        :param padding:
        :param output_padding:
        :param norm:
        :param activation:
        :param pad_type:
        """
        super(RDeConv, self).__init__()

        self.buffer = buffer
        self.layer = layer
        self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
                                         padding=padding, output_padding=output_padding, dilation=dilation, groups=groups, bias=bias)
        self.act = _activation(activation)
        self.norm = _norm(norm, out_channels)
        self.conv = conv_block(out_channels, out_channels, kernel_size, stride=1, bias=bias,
                               padding=1, pad_type=pad_type, norm=norm, activation=activation)
        self.fusion = conv_block(out_channels * 2, out_channels, kernel_size=1, norm=norm,
                                 activation=activation)
Beispiel #3
0
 def __init__(self, in_channels):
     super(DMFB, self).__init__()
     self.c1 = conv_layer(in_channels, in_channels // 4, 3, 1)
     self.d1 = conv_layer(in_channels // 4, in_channels // 4, 3, 1,
                          1)  # rate = 1
     self.d2 = conv_layer(in_channels // 4, in_channels // 4, 3, 1,
                          2)  # rate = 2
     self.d3 = conv_layer(in_channels // 4, in_channels // 4, 3, 1,
                          4)  # rate = 4
     self.d4 = conv_layer(in_channels // 4, in_channels // 4, 3, 1,
                          8)  # rate = 8
     self.act = _activation('relu')
     self.norm = _norm('in', in_channels)
     self.c2 = conv_layer(in_channels, in_channels, 1, 1)  # fusion
Beispiel #4
0
    def __init__(self, channels, kernel_size, stride=1, dilation=1, groups=1,
                 bias=True, padding=0, norm='in', activation='relu', pad_type='zero'):
        super(ResConv, self).__init__()
        self.activation = _activation(activation)
        self.norm = _norm(norm, channels)
        self.pad = _padding(pad_type, padding)

        self.conv = nn.Sequential(
            nn.Conv2d(channels, channels, kernel_size, stride, 0, dilation, groups, bias),
            self.norm,
            self.activation,
            nn.Conv2d(channels, channels, kernel_size, stride, 1, 1, groups, bias),
            self.norm
        )
Beispiel #5
0
    def __init__(self,
                 in_channels_1, in_channels_2, out_channels,
                 kernel_size_1, kernel_size_2,
                 stride_1, up_stride_2,
                 padding_1, up_padding_2, output_padding=0,
                 activation_in='relu', activation_out='lrelu',
                 norm_in='bn', norm_out='none'):
        """
        1 convolution - from f {l-1}{r-1}
        2 transposed convolution - from f {l} {k}
        3 convolution - the one to be summed with result of f {l}{k} (2) convolution
        4 convolution - the one to be producted with result of f {l}{k} (2) convolution

        :param in_channels_1: Input channels of the 1st convolution layer
        :param in_channels_2: Input channels of the 2st convolution layer
        :param out_channels: Output channels of the 1st, 2st, 3rd, 4th convolution layers

        :param kernel_size_1: Kernel size of 1st convolution layer
        :param kernel_size_2: Kernel size of 2nd transposed convolution layer

        :param stride_1: Stride of 1st convolution layer
        :param up_stride_2: Stride of 2nd transposed convolution layer

        :param padding_1: Padding of 1st convolution layer
        :param up_padding_2: Padding of 2nd transposed convolution layer
        :param output_padding: Output padding of 2nd transposed convolution layer

        :param activation_in: Activation layer of 1st convolution layer
        :param activation_out: Activation layer 2nd transposed convolution layer

        :param norm_in: Normalization layer of 1st convolution layer
        :param norm_out: Normalization layer of 2nd transposed convolution layer
        """

        super(RefinementBlock, self).__init__()

        self.conv_1 = conv_block(
            in_channels=in_channels_1,
            out_channels=out_channels,
            kernel_size=kernel_size_1,
            stride=stride_1,
            padding=padding_1,
            norm='none',
            activation=activation_in
        )

        self.upconv_2 = upconv_block(
            in_channels=in_channels_2,
            out_channels=out_channels,
            kernel_size=kernel_size_2,
            stride=up_stride_2,
            padding=up_padding_2,
            output_padding=output_padding,
            norm=norm_in,
            activation='none'
        )

        self.conv_3 = conv_block(
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=3,
            stride=1,
            padding=1,
            norm='none',
            activation='none'
        )

        self.conv_4 = conv_block(
            in_channels=out_channels,
            out_channels=out_channels,
            kernel_size=3,
            stride=1,
            padding=1,
            norm='none',
            activation='none'
        )

        self.out_act = _activation(act_type=activation_out)
        self.out_norm = _norm(norm_type=norm_out, channels=out_channels)
Beispiel #6
0
    def __init__(self,
                 in_channels_m, out_channels_m,
                 in_channels_e, out_channels_e,
                 kernel_size_m, kernel_size_e,
                 stride_m, stride_e,
                 padding_m, padding_e,
                 activation_m='relu', activation_e='relu',
                 norm_m='none', norm_e='bn',
                 device=torch.device('cpu')):
        """
        :param in_channels_m: input channels of mask layer - m {l-1}
        :param out_channels_m: output channels of mask layer - m {l}
        :param in_channels_e: input chanels of image layer - e {l-1}
        :param out_channels_e: output channels of image layer - e {l}

        :param kernel_size_m: kernel size for transformation m {l-1} -> m {l}
        :param kernel_size_e: kernel size for transformation e {l-1} -> e {l}
            and for kernel creation m {l} -> kernel for e {l} convolution

        :param stride_m: stride for m {l-1} -> m {l} transformation
        :param stride_e: stride for e {l-1} -> e {l} transformation

        :param padding_m: padding for m {l-1} -> m {l} transformation
        :param padding_e: padding for e {l-1} -> e {l} transformation

        :param activation_m: activation_m for m {l-1} -> m {l} transformation
        :param activation_e: activation_e for e {l-1} -> e {l} transformation
        """
        super(MADF, self).__init__()
        self.in_channels_m = in_channels_m
        self.out_channels_m = out_channels_m
        self.in_channels_e = in_channels_e
        self.out_channels_e = out_channels_e

        self.kernel_size_e = kernel_size_e
        self.kernel_size_m = kernel_size_m

        self.padding_e = padding_e
        self.padding_m = padding_m

        self.stride_e = stride_e
        self.stride_m = stride_m

        self.activation_m = activation_m
        self.activation_e = activation_e

        self.norm_m = norm_m
        self.norm_e = norm_e

        self.conv_m = conv_block(
            in_channels=in_channels_m,
            out_channels=out_channels_m,
            kernel_size=kernel_size_m,
            stride=stride_m,
            padding=padding_m,
            activation=activation_m,
            norm=norm_m)

        self.conv_filters = conv_block(
            in_channels=out_channels_m,
            out_channels=in_channels_e * kernel_size_e *
            out_channels_e * kernel_size_e,
            kernel_size=1,
            stride=1,
            padding=0,
            activation="none",
            norm='none')

        self.device = device
        self.activation_e = _activation(activation_e)
        self.norm = _norm(norm_e, out_channels_e)
Beispiel #7
0
    def __init__(self, device, in_nc=3, kernel_size=4, nf=48, im_size=256):
        """
        :param in_nc: number of in channels
        :param kernel_size: kernel size
        :param nf: number of convolution filters after 1 layer
        """
        super(InpaintingDiscriminator, self).__init__()

        self.patch_dis = nn.ModuleList([
            SNBlock(in_channels=in_nc,
                    out_channels=nf,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            SNBlock(in_channels=nf,
                    out_channels=nf * 2,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size // 2, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            SNBlock(in_channels=nf * 2,
                    out_channels=nf * 2,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size // 4, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            SNBlock(in_channels=nf * 2,
                    out_channels=nf * 4,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size // 8, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            SNBlock(in_channels=nf * 4,
                    out_channels=nf * 4,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size // 16, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            SNBlock(in_channels=nf * 4,
                    out_channels=nf * 4,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size // 32, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            nn.Flatten(),
            nn.Linear(nf * 4 * im_size // 64 * im_size // 64, 512)
        ])
        self.flat = nn.Flatten()

        self.edge_dis = nn.Sequential(
            SobelFilter(device,
                        in_nc=3,
                        filter_c=1,
                        padding=get_pad(256, 3, 1),
                        stride=1),
            SNBlock(in_channels=2,
                    out_channels=nf // 2,
                    kernel_size=kernel_size,
                    stride=4,
                    padding=get_pad(im_size, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            SNBlock(in_channels=nf // 2,
                    out_channels=nf,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size // 4, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            SNBlock(in_channels=nf,
                    out_channels=nf * 2,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size // 8, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            SNBlock(in_channels=nf * 2,
                    out_channels=nf * 4,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size // 16, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            SNBlock(in_channels=nf * 4,
                    out_channels=nf * 4,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size // 32, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'),
            SNBlock(in_channels=nf * 4,
                    out_channels=nf * 4,
                    kernel_size=kernel_size,
                    stride=2,
                    padding=get_pad(im_size // 64, kernel_size, 2),
                    norm='in',
                    activation='relu',
                    pad_type='zero'), nn.Flatten(),
            nn.Linear(nf * 4 * im_size // 128 * im_size // 128, 512))

        self.out = nn.Sequential(_activation('relu'), nn.Linear(1024, 1))