def __init__(self, in_channels, out_channels, kernel_size, downsample=False, resample_kernel=(1, 3, 3, 1), bias=True, activate=True): layers = [] # downsample if downsample: layers.append( UpFirDnSmooth(resample_kernel, upsample_factor=1, downsample_factor=2, kernel_size=kernel_size)) stride = 2 self.padding = 0 else: stride = 1 self.padding = kernel_size // 2 # conv layers.append( EqualConv2d(in_channels, out_channels, kernel_size, stride=stride, padding=self.padding, bias=bias and not activate)) # activation if activate: if bias: layers.append(FusedLeakyReLU(out_channels)) else: layers.append(ScaledLeakyReLU(0.2)) super(ConvLayer, self).__init__(*layers)