def __init__(self, permutation, dim=1):
        if permutation.dim() != 1:
            raise ValueError("Permutation must be a 1D tensor.")
        if not is_positive_int(dim):
            raise ValueError("dim must be a positive integer.")

        super().__init__()
        self._dim = dim
        self.register_buffer("_permutation", permutation)
    def __init__(self, features):
        """
        Transform that performs activation normalization. Works for 2D and 4D inputs. For 4D
        inputs (images) normalization is performed per-channel, assuming BxCxHxW input shape.
        Reference:
        > D. Kingma et. al., Glow: Generative flow with invertible 1x1 convolutions, NeurIPS 2018.
        """
        if not is_positive_int(features):
            raise TypeError("Number of features must be a positive integer.")
        super().__init__()

        self.initialized = False
        self.log_scale = nn.Parameter(torch.zeros(features))
        self.shift = nn.Parameter(torch.zeros(features))
 def __init__(self, in_channels, dim=1):
     if not is_positive_int(in_channels):
         raise ValueError("Number of features must be a positive integer.")
     super().__init__(torch.arange(in_channels - 1, -1, -1), dim)
예제 #4
0
 def __init__(self, in_channels: int, dim: int = 1) -> None:
     if not is_positive_int(in_channels):
         raise ValueError("Number of features must be a positive integer.")
     super().__init__(torch.randperm(in_channels), dim)