예제 #1
0
    def __call__(self, x, inverse=False):

        if inverse:
            if self.mode == 'min-max':
                return apply_to_tensor(x, self.norm_min_max_inv)
            if self.mode == 'standardize':
                return apply_to_tensor(x, self.norm_standard_inv)
        else:
            if self.mode == 'min-max':
                return apply_to_tensor(x, self.norm_min_max)
            if self.mode == 'standardize':
                return apply_to_tensor(x, self.norm_standard)
예제 #2
0
def _detach_hidden(hidden: Union[torch.Tensor, Sequence, Mapping, str, bytes]):
    """Cut backpropagation graph.

    Auxillary function to cut the backpropagation graph by detaching the hidden
    vector.
    """
    return apply_to_tensor(hidden, torch.Tensor.detach)
예제 #3
0
def _detach_hidden(hidden):
    """Cut backpropagation graph.

    Auxillary function to cut the backpropagation graph by detaching the hidden
    vector.
    """
    return apply_to_tensor(hidden, torch.Tensor.detach)
예제 #4
0
 def __call__(self, x):
     return apply_to_tensor(
         x, lambda y: clamp(as_tensor(y), self.floor, self.ceil))