def inverse(self, z): log_det = torch.zeros(z.shape[0]) lower, upper = z[:, :self.dim // 2], z[:, self.dim // 2:] out = self.f2(upper).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim=2) W, H = torch.softmax(W, dim=2), torch.softmax(H, dim=2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) lower, ld = unconstrained_RQS(lower, W, H, D, inverse=True, tail_bound=self.B) log_det += torch.sum(ld, dim=1) out = self.f1(lower).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim=2) W, H = torch.softmax(W, dim=2), torch.softmax(H, dim=2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) upper, ld = unconstrained_RQS(upper, W, H, D, inverse=True, tail_bound=self.B) log_det += torch.sum(ld, dim=1) return torch.cat([lower, upper], dim=1), log_det
def inverse(self, z): x = torch.zeros_like(z, device=z.device) log_det = torch.zeros(x.shape[0], device=z.device) for i in range(self.dim): if i == 0: init_param = self.init_param.expand(x.shape[0], 3 * self.K - 1) W, H, D = torch.split(init_param, self.K, dim = 1) else: out = self.layers[i - 1](x[:, :i]) W, H, D = torch.split(out, self.K, dim = 1) W, H = torch.softmax(W, dim = 1), torch.softmax(H, dim = 1) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) x[:, i], ld = unconstrained_RQS( z[:, i], W, H, D, inverse = True, tail_bound = self.B) log_det += ld return x, log_det
def forward(self, x): z = torch.zeros_like(x) log_det = torch.zeros(z.shape[0]) for i in range(self.dim): if i == 0: init_param = self.init_param.expand(x.shape[0], 3 * self.K - 1) W, H, D = torch.split(init_param, self.K, dim=1) else: out = self.layers[i - 1](x[:, :i]) W, H, D = torch.split(out, self.K, dim=1) W, H = torch.softmax(W, dim=1), torch.softmax(H, dim=1) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) z[:, i], ld = unconstrained_RQS(x[:, i], W, H, D, inverse=False, tail_bound=self.B) log_det += ld return z, log_det