def __init__(self, in_channels, mid_channels, out_channels, num_blocks, kernel_size, padding, double_after_norm): super(ResNet, self).__init__() self.in_norm = nn.BatchNorm2d(in_channels) self.double_after_norm = double_after_norm self.in_conv = WNConv2d(2 * in_channels, mid_channels, kernel_size, padding, bias=True) self.in_skip = WNConv2d(mid_channels, mid_channels, kernel_size=1, padding=0, bias=True) self.blocks = nn.ModuleList([ ResidualBlock(mid_channels, mid_channels) for _ in range(num_blocks) ]) self.skips = nn.ModuleList([ WNConv2d(mid_channels, mid_channels, kernel_size=1, padding=0, bias=True) for _ in range(num_blocks) ]) self.out_norm = nn.BatchNorm2d(mid_channels) self.out_conv = WNConv2d(mid_channels, out_channels, kernel_size=1, padding=0, bias=True)
def __init__(self, in_channels, mid_channels, out_channels, num_blocks, kernel_size, padding): super(ResNet, self).__init__() self.in_pad = nn.ReflectionPad2d(padding) self.in_conv = WNConv2d(in_channels, mid_channels, kernel_size, padding=0, bias=True) self.in_skip = WNConv2d(mid_channels, mid_channels, kernel_size=1, padding=0, bias=True) self.blocks = nn.ModuleList([ ResidualBlock(mid_channels, mid_channels) for _ in range(num_blocks) ]) self.skips = nn.ModuleList([ WNConv2d(mid_channels, mid_channels, kernel_size=1, padding=0, bias=True) for _ in range(num_blocks) ]) self.out_norm = nn.BatchNorm2d(mid_channels) self.out_conv = WNConv2d(mid_channels, out_channels, kernel_size=1, padding=0, bias=True)
def __init__(self, in_channels, num_channels, num_blocks, num_components, drop_prob, use_attn=True, aux_channels=None, condition_embd_size=0): super(NN, self).__init__() self.k = num_components # k = number of mixture components self.in_conv = WNConv2d(in_channels, num_channels, kernel_size=3, padding=1) self.mid_convs = nn.ModuleList([ ConvAttnBlock(num_channels, drop_prob, use_attn, aux_channels) for _ in range(num_blocks) ]) self.out_conv = WNConv2d(num_channels, in_channels * (2 + 3 * self.k), kernel_size=3, padding=1) self.rescale = weight_norm(Rescale(in_channels)) if condition_embd_size > 0: self.conditioning_projection = nn.Linear(condition_embd_size, 2 * in_channels * (2 + 3 * self.k), bias=False)
def __init__(self, in_channels, num_channels, num_blocks, num_components, drop_prob, use_attn=True, aux_channels=None): super(NN, self).__init__() self.k = num_components # k = number of mixture components self.in_conv = WNConv2d(in_channels, num_channels, kernel_size=3, padding=1) self.mid_convs = nn.ModuleList([ConvAttnBlock(num_channels, drop_prob, use_attn, aux_channels) for _ in range(num_blocks)]) self.out_conv = WNConv2d(num_channels, in_channels * (2 + 3 * self.k), kernel_size=3, padding=1) self.rescale = weight_norm(Rescale(in_channels))
def __init__(self, num_channels, drop_prob=0., aux_channels=None): super(GatedConv, self).__init__() self.nlin = concat_elu self.conv = WNConv2d(2 * num_channels, num_channels, kernel_size=3, padding=1) self.drop = nn.Dropout2d(drop_prob) self.gate = WNConv2d(2 * num_channels, 2 * num_channels, kernel_size=1, padding=0) if aux_channels is not None: self.aux_conv = WNConv2d(2 * aux_channels, num_channels, kernel_size=1, padding=0) else: self.aux_conv = None
def __init__(self, in_channels, mid_channels, out_channels, num_blocks, drop_prob): super(NN, self).__init__() conv_blocks = [ WNConv2d(in_channels, mid_channels, kernel_size=3, padding=1) ] conv_blocks += [ ConvBlock(mid_channels, drop_prob) for _ in range(num_blocks) ] conv_blocks += [ WNConv2d(mid_channels, out_channels, kernel_size=3, padding=1) ] self.conv_blocks = nn.Sequential(*conv_blocks)
def __init__(self, in_channels, out_channels): super(ResidualBlock, self).__init__() self.in_norm = nn.BatchNorm2d(in_channels) self.in_conv = WNConv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False) self.out_norm = nn.BatchNorm2d(out_channels) self.out_conv = WNConv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=True)
def __init__(self, in_channels, out_channels, drop_prob=0.): super(GatedConv, self).__init__() self.nlin = concat_elu self.conv = WNConv2d(2 * in_channels, out_channels, kernel_size=3, padding=1) self.drop = nn.Dropout2d(drop_prob) self.gate = Gate(2 * out_channels, 2 * out_channels)