def __init__(self, dataloader, encoder, output_dim): super().__init__() self.dataloader = dataloader self._transfer_ = {"encoder": encoder} self.region_proposals = nn.Sequential( nn.Conv2d(256, 64, 4, 1, 1), nn.BatchNorm2d(64), nn.ReLU(True), nn.Conv2d(64, 16, 7, 1, 1), ResBlock(16), ResBlock(16), MassBlock(32, 32), Lambda(lambda x: x.view(x.shape[0], 16 * 3)), nn.Linear(16 * 3, 4), nn.Sigmoid(), ) self.h, self.w = 128, 128 self.points_dest = torch.FloatTensor( [[[0, 0], [self.w - 1, 0], [self.w - 1, self.h - 1], [0, self.h - 1]]] ) self.conv_classifier = nn.Sequential( nn.Conv2d(256, 128, 4, 1, 1), nn.BatchNorm2d(128), nn.ReLU(True), nn.Conv2d(128, 64, 7, 1, 1), ResBlock(64), ResBlock(64), EmbedSpace(64, 32, 32), nn.MaxPool2d(32), ) self.fn = nn.Sequential(nn.Linear(64, output_dim))
def __init__(self, out_channels=1, num_blocks=4, num_layers=6, front_channels=32, residual_channels=64, gate_channels=32, skip_channels=None, kernel_size=3, cin_channels=80, causal=True): super(Wavenet_Flow, self). __init__() self.causal = causal self.num_blocks = num_blocks self.num_layers = num_layers self.front_channels = front_channels self.out_channels = out_channels self.gate_channels = gate_channels self.residual_channels = residual_channels self.skip_channels = skip_channels self.cin_channels = cin_channels self.kernel_size = kernel_size self.front_conv = nn.Sequential( Conv(1, self.residual_channels, self.front_channels, causal=self.causal), nn.ReLU() ) self.res_blocks = nn.ModuleList() self.res_blocks_fast = nn.ModuleList() for b in range(self.num_blocks): for n in range(self.num_layers): self.res_blocks.append(ResBlock(self.residual_channels, self.gate_channels, self.skip_channels, self.kernel_size, dilation=self.kernel_size**n, cin_channels=self.cin_channels, local_conditioning=True, causal=self.causal, mode='SAME')) self.final_conv = nn.Sequential( nn.ReLU(), Conv(self.skip_channels, self.skip_channels, 1, causal=self.causal), nn.ReLU(), Conv(self.skip_channels, self.out_channels, 1, causal=self.causal) )
def __init__(self, out_channels=1, num_blocks=3, num_layers=10, residual_channels=512, gate_channels=512, skip_channels=512, kernel_size=2, cin_channels=128, upsample_scales=None, causal=True): super(Wavenet, self).__init__() self.causal = causal self.num_blocks = num_blocks self.num_layers = num_layers self.out_channels = out_channels self.gate_channels = gate_channels self.residual_channels = residual_channels self.skip_channels = skip_channels self.cin_channels = cin_channels self.kernel_size = kernel_size self.front_channels = 32 self.front_conv = nn.Sequential( Conv(1, self.residual_channels, self.front_channels, causal=self.causal), nn.ReLU()) self.res_blocks = nn.ModuleList() for b in range(self.num_blocks): for n in range(self.num_layers): self.res_blocks.append( ResBlock(self.residual_channels, self.gate_channels, self.skip_channels, self.kernel_size, dilation=self.kernel_size**n, cin_channels=self.cin_channels, local_conditioning=True, causal=self.causal, mode='SAME')) self.final_conv = nn.Sequential( nn.ReLU(), Conv(self.skip_channels, self.skip_channels, 1, causal=self.causal), nn.ReLU(), Conv(self.skip_channels, self.out_channels, 1, causal=self.causal)) self.upsample_conv = nn.ModuleList() for s in upsample_scales: convt = nn.ConvTranspose2d(1, 1, (3, 2 * s), padding=(1, s // 2), stride=(1, s)) convt = nn.utils.weight_norm(convt) nn.init.kaiming_normal_(convt.weight) self.upsample_conv.append(convt) self.upsample_conv.append(nn.LeakyReLU(0.4))