def __call__(self, x, inverse=False): if inverse: if self.mode == 'min-max': return apply_to_tensor(x, self.norm_min_max_inv) if self.mode == 'standardize': return apply_to_tensor(x, self.norm_standard_inv) else: if self.mode == 'min-max': return apply_to_tensor(x, self.norm_min_max) if self.mode == 'standardize': return apply_to_tensor(x, self.norm_standard)
def _detach_hidden(hidden: Union[torch.Tensor, Sequence, Mapping, str, bytes]): """Cut backpropagation graph. Auxillary function to cut the backpropagation graph by detaching the hidden vector. """ return apply_to_tensor(hidden, torch.Tensor.detach)
def _detach_hidden(hidden): """Cut backpropagation graph. Auxillary function to cut the backpropagation graph by detaching the hidden vector. """ return apply_to_tensor(hidden, torch.Tensor.detach)
def __call__(self, x): return apply_to_tensor( x, lambda y: clamp(as_tensor(y), self.floor, self.ceil))