def __init__(self, n_channels, dilations, use_skip=True): super(DenseResBlocks1d, self).__init__() self.use_skip = use_skip self.n_layers = len(dilations) self.convs = nn.ModuleList() self.skips = nn.ModuleList() for i, dilation in enumerate(dilations): self.convs.append( nn.Sequential( nn.ReplicationPad1d(dilation), utils.wn_xavier( nn.Conv1d(n_channels, 2 * n_channels, 3, dilation=dilation)), Func(F.glu, dim=1), )) last_c = (4 * n_channels if use_skip and i < self.n_layers - 1 else 2 * n_channels) self.skips.append( nn.Sequential( utils.wn_xavier(nn.Conv1d(n_channels, last_c, 1)), Func(F.glu, dim=1), ))
def __init__(self, base_dim): super(LinearResBlock, self).__init__() self.update = nn.Sequential( utils.wn_xavier(nn.Linear(base_dim, 2 * base_dim, bias=False)), Func(F.glu, dim=-1), utils.wn_xavier(nn.Linear(base_dim, 2 * base_dim, bias=False)), Func(F.glu, dim=-1), )
def __init__(self, channels): super().__init__(channels) self.update = nn.Sequential( nn.ReflectionPad2d(1), utils.wn_xavier(nn.Conv2d(channels, 2 * channels, 3, bias=False)), Func(F.glu, dim=1), nn.ReflectionPad2d(1), utils.wn_xavier(nn.Conv2d(channels, 2 * channels, 3, bias=False)), Func(F.glu, dim=1), )
def __init__(self, base_dim): super(InfusedResBlock1d, self).__init__() self.update = nn.Sequential( utils.wn_xavier( nn.Conv1d(base_dim, 2 * base_dim, 3, padding=1, bias=False)), Func(F.glu, dim=1), utils.wn_xavier( nn.Conv1d(base_dim, 2 * base_dim, 3, padding=1, bias=False)), Func(F.glu, dim=1), ) self.alpha = nn.Parameter(torch.ones(1, base_dim, 1))
def __init__(self, in_channels, hidden_channels=None): if hidden_channels is None: hidden_channels = in_channels self.pad = nn.ConstantPad1d((2, 0), 0.0) self.input = nn.Sequential( utils.wn_xavier(nn.Conv1d(in_channels, 2 * hidden_channels, 3)), Func(F.glu, dim=1), ) self.output = nn.Sequential( utils.wn_xavier(nn.Conv1d(hidden_channels, 2 * in_channels, 1)), Func(F.glu, dim=1), )
def __init__(self, out_channels, inner_channels=128, zero_init=True): super().__init__() self.input = utils.wn_xavier(nn.Conv1d(out_channels, inner_channels, 3)) self.mid = utils.wn_xavier(nn.Conv1d(inner_channels, inner_channels, 3)) self.output = nn.Conv1d(inner_channels, 2 * out_channels, 3) if zero_init: self.output.weight.data[...] = 0.0 self.output.bias.data[...] = 0.0 self.alpha = nn.Parameter(torch.zeros(1, out_channels, 1)) self.beta = nn.Parameter(torch.zeros(1, out_channels, 1)) self.inner_channels = inner_channels self.out_channels = out_channels
def __init__( self, out_channels, inner_channels=None, layers=3, rank=2, zero_init=True, net=None, ): super().__init__() if rank < 0 or rank > 3: raise ValueError("supported rank: 0 <= rank <= 3") if net is not None: self.net = net else: if inner_channels is None: raise ValueError("either `net` or `inner_channels` must be given") kwargs = {} if rank == 0 else {"kernel_size": 3, "padding": 1} if rank == 0: conv = nn.Linear elif rank == 1: conv = nn.Conv1d elif rank == 2: conv = nn.Conv2d elif rank == 3: conv = nn.Conv3d else: raise ValueError(f"invalid rank: {rank}") modules = [ utils.wn_xavier(conv(out_channels, inner_channels, **kwargs)), nn.ELU(), ] for _ in range(layers): modules.append( utils.wn_xavier(conv(inner_channels, inner_channels, **kwargs)) ) modules.append(nn.ELU()) end = conv(inner_channels, 2 * out_channels, **kwargs) if zero_init: end.weight.data.zero_() end.bias.data.zero_() modules.append(end) self.net = nn.Sequential(*modules) self.alpha = nn.Parameter(torch.ones(out_channels)) self.beta = nn.Parameter(torch.zeros(out_channels)) self.rank = rank