Exemplo n.º 1
0
def deconv2d_block(in_channels,
                   out_channels,
                   kernel_size,
                   stride=1,
                   padding=0,
                   output_padding=0,
                   dilation=1,
                   groups=1,
                   init_type=None,
                   pad_type='zero',
                   activation=None,
                   norm_type=None):
    # transpose conv2d + norm + activation
    block = []
    if pad_type == 'zero':
        block.append(nn.ZeroPad2d(padding))
    elif pad_type == 'reflect':
        block.append(nn.ReflectionPad2d(padding))
    elif pad_type == 'replicate':
        block.append(nn.ReplicatePad2d(padding))
    else:
        raise ValueError
    block.append(nn.ConvTranspose2d(
        in_channels=in_channels,
        out_channels=out_channels,
        kernel_size=kernel_size,
        stride=stride,
        padding=0,
        output_padding=output_padding,
        groups=groups
    ))
    weight_init_(block[-1].weight, init_type, activation)
    if norm_type is None:
        pass
    elif norm_type == 'BN':
        block.append(nn.BatchNorm2d(out_channels))
    elif norm_type == 'IN':
        block.append(nn.InstanceNorm2d(out_channels, affine=True))
    elif norm_type == 'LN':
        block.append(LayerNorm(out_channels, affine=True))
    elif norm_type == 'AdaptiveIN':
        block.append(AdaptiveInstanceNorm2d(out_channels))
    else:
        raise ValueError
    if activation is not None:
        block.append(activation)
    return sequential_pack(block)
Exemplo n.º 2
0
def conv2d_block(in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 init_type=None,
                 pad_type='reflect',
                 activation=None,
                 norm_type=None):
    # conv2d + norm + activation
    block = []
    if pad_type == 'zero':
        block.append(nn.ZeroPad2d(padding))
    elif pad_type == 'reflect':
        block.append(nn.ReflectionPad2d(padding))
    elif pad_type == 'replicate':
        block.append(nn.ReplicatePad2d(padding))
    else:
        raise ValueError
    block.append(nn.Conv2d(in_channels, out_channels,
                           kernel_size, stride, padding=0, dilation=dilation, groups=groups))
    weight_init_(block[-1].weight, init_type, activation)
    if norm_type is None:
        pass
    elif norm_type == 'BN':
        block.append(nn.BatchNorm2d(out_channels))
    elif norm_type == 'IN':
        block.append(nn.InstanceNorm2d(out_channels, affine=True))
    elif norm_type == 'LN':
        block.append(LayerNorm(out_channels, affine=True))
    elif norm_type == 'AdaptiveIN':
        block.append(AdaptiveInstanceNorm2d(out_channels))
    elif norm_type == 'WN':
        block[-1] = nn.utils.weight_norm(block[-1])
    else:
        raise ValueError('invalid norm type:{}'.format(norm_type))
    if activation is not None:
        block.append(activation)
    return sequential_pack(block)
Exemplo n.º 3
0
def deconv2d_block_bn(in_channels,
                      out_channels,
                      kernel_size,
                      stride=1,
                      padding=0,
                      output_padding=0,
                      dilation=1,
                      groups=1,
                      init_type=None,
                      pad_type='zero',
                      activation=None,
                      use_batchnorm=False):
    # transpose conv2d + bn + activation
    block = []
    if pad_type == 'zero':
        block.append(nn.ZeroPad2d(padding))
    elif pad_type == 'reflect':
        block.append(nn.ReflectionPad2d(padding))
    elif pad_type == 'replicate':
        block.append(nn.ReplicatePad2d(padding))
    else:
        raise ValueError
    block.append(nn.ConvTranspose2d(
        in_channels=in_channels,
        out_channels=out_channels,
        kernel_size=kernel_size,
        stride=stride,
        padding=0,
        output_padding=output_padding,
        groups=groups
    ))
    weight_init_(block[-1].weight, init_type, activation)
    if use_batchnorm:
        block.append(nn.BatchNorm2d(out_channels))
    if activation is not None:
        block.append(activation)
    return sequential_pack(block)