def _coupling_transform_forward(self, inputs, transform_params): scale, shift = self._scale_and_shift(transform_params) log_scale = torch.log(scale) outputs = inputs * scale + shift logabsdet = torchutils.sum_except_batch(log_scale, num_batch_dims=1) return outputs, logabsdet
def _coupling_transform_inverse(self, inputs, transform_params): scale, shift = self._scale_and_shift(transform_params) log_scale = torch.log(scale) outputs = (inputs - shift) / scale logabsdet = -torchutils.sum_except_batch(log_scale, num_batch_dims=1) return outputs, logabsdet
def forward(self, inputs, context=None): outputs = (1 / np.pi) * torch.atan(inputs) + 0.5 logabsdet = torchutils.sum_except_batch(-np.log(np.pi) - torch.log(1 + inputs**2)) return outputs, logabsdet
def forward(self, inputs, context=None): outputs = torch.tanh(inputs) logabsdet = torch.log(1 - outputs**2) logabsdet = torchutils.sum_except_batch(logabsdet, num_batch_dims=1) return outputs, logabsdet
def forward(self, inputs, context=None): outputs = F.leaky_relu(inputs, negative_slope=self.negative_slope) mask = (inputs < 0).type(torch.Tensor) logabsdet = self.log_negative_slope * mask logabsdet = torchutils.sum_except_batch(logabsdet, num_batch_dims=1) return outputs, logabsdet