def get_transforms(cache_size): transforms = [ AbsTransform(cache_size=cache_size), ExpTransform(cache_size=cache_size), PowerTransform(exponent=2, cache_size=cache_size), PowerTransform(exponent=torch.tensor(5.).normal_(), cache_size=cache_size), PowerTransform(exponent=torch.tensor(5.).normal_(), cache_size=cache_size), SigmoidTransform(cache_size=cache_size), TanhTransform(cache_size=cache_size), AffineTransform(0, 1, cache_size=cache_size), AffineTransform(1, -2, cache_size=cache_size), AffineTransform(torch.randn(5), torch.randn(5), cache_size=cache_size), AffineTransform(torch.randn(4, 5), torch.randn(4, 5), cache_size=cache_size), SoftmaxTransform(cache_size=cache_size), SoftplusTransform(cache_size=cache_size), StickBreakingTransform(cache_size=cache_size), LowerCholeskyTransform(cache_size=cache_size), CorrCholeskyTransform(cache_size=cache_size), ComposeTransform([ AffineTransform(torch.randn(4, 5), torch.randn(4, 5), cache_size=cache_size), ]), ComposeTransform([ AffineTransform(torch.randn(4, 5), torch.randn(4, 5), cache_size=cache_size), ExpTransform(cache_size=cache_size), ]), ComposeTransform([ AffineTransform(0, 1, cache_size=cache_size), AffineTransform(torch.randn(4, 5), torch.randn(4, 5), cache_size=cache_size), AffineTransform(1, -2, cache_size=cache_size), AffineTransform(torch.randn(4, 5), torch.randn(4, 5), cache_size=cache_size), ]), ReshapeTransform((4, 5), (2, 5, 2)), IndependentTransform( AffineTransform(torch.randn(5), torch.randn(5), cache_size=cache_size), 1), CumulativeDistributionTransform(Normal(0, 1)), ] transforms += [t.inv for t in transforms] return transforms
def __init__(self, concentration1, concentration0, validate_args=None): self.concentration1, self.concentration0 = broadcast_all(concentration1, concentration0) finfo = torch.finfo(self.concentration0.dtype) base_dist = Uniform(torch.full_like(self.concentration0, 0), torch.full_like(self.concentration0, 1), validate_args=validate_args) transforms = [PowerTransform(exponent=self.concentration0.reciprocal()), AffineTransform(loc=1., scale=-1.), PowerTransform(exponent=self.concentration1.reciprocal())] super(Kumaraswamy, self).__init__(base_dist, transforms, validate_args=validate_args)
def __init__(self, concentration, rate, validate_args=None): base_dist = Gamma(concentration, rate) super().__init__( base_dist, PowerTransform(-base_dist.rate.new_ones(())), validate_args=validate_args, )
def __init__(self, a, b, validate_args=None): self.a, self.b = broadcast_all(a, b) self.a_reciprocal = self.a.reciprocal() self.b_reciprocal = self.b.reciprocal() base_dist = Uniform(torch.full_like(self.a, EPS), torch.full_like(self.a, 1. - EPS)) transforms = [ AffineTransform(loc=1, scale=-1), PowerTransform(self.b_reciprocal), AffineTransform(loc=1, scale=-1), PowerTransform(self.a_reciprocal) ] super(Kumaraswamy, self).__init__(base_dist, transforms, validate_args=validate_args)
def get_transforms(cache_size): transforms = [ AbsTransform(cache_size=cache_size), ExpTransform(cache_size=cache_size), PowerTransform(exponent=2, cache_size=cache_size), PowerTransform(exponent=torch.tensor(5.).normal_(), cache_size=cache_size), SigmoidTransform(cache_size=cache_size), TanhTransform(cache_size=cache_size), AffineTransform(0, 1, cache_size=cache_size), AffineTransform(1, -2, cache_size=cache_size), AffineTransform(torch.randn(5), torch.randn(5), cache_size=cache_size), AffineTransform(torch.randn(4, 5), torch.randn(4, 5), cache_size=cache_size), SoftmaxTransform(cache_size=cache_size), StickBreakingTransform(cache_size=cache_size), LowerCholeskyTransform(cache_size=cache_size), CorrCholeskyTransform(cache_size=cache_size), ComposeTransform([ AffineTransform(torch.randn(4, 5), torch.randn(4, 5), cache_size=cache_size), ]), ComposeTransform([ AffineTransform(torch.randn(4, 5), torch.randn(4, 5), cache_size=cache_size), ExpTransform(cache_size=cache_size), ]), ComposeTransform([ AffineTransform(0, 1, cache_size=cache_size), AffineTransform(torch.randn(4, 5), torch.randn(4, 5), cache_size=cache_size), AffineTransform(1, -2, cache_size=cache_size), AffineTransform(torch.randn(4, 5), torch.randn(4, 5), cache_size=cache_size), ]), ] transforms += [t.inv for t in transforms] return transforms
def __init__(self, scale, concentration, validate_args=None): self.scale, self.concentration = broadcast_all(scale, concentration) self.concentration_reciprocal = self.concentration.reciprocal() base_dist = Exponential(torch.ones_like(self.scale), validate_args=validate_args) transforms = [PowerTransform(exponent=self.concentration_reciprocal), AffineTransform(loc=0, scale=self.scale)] super(Weibull, self).__init__(base_dist, transforms, validate_args=validate_args)
def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Kuma, _instance) new.a = self.a.expand(batch_shape) new.b = self.b.expand(batch_shape) new.a_reciprocal = new.a.reciprocal() new.b_reciprocal = new.b.reciprocal() base_dist = self.base_dist.expand(batch_shape) transforms = [ AffineTransform(loc=1, scale=-1), PowerTransform(self.b_reciprocal), AffineTransform(loc=1, scale=-1), PowerTransform(self.a_reciprocal) ] super(Kumaraswamy, new).__init__(base_dist, transforms, validate_args=False) new._validate_args = self._validate_args return new
def __init__( self, concentration1: Union[float, Tensor], concentration0: Union[float, Tensor], validate_args: bool = False, ): self.concentration1, self.concentration0 = broadcast_all( concentration1, concentration0) base_dist = Uniform( torch.full_like(self.concentration0, 0.0), torch.full_like(self.concentration0, 1.0), ) transforms = [ AffineTransform(loc=1.0, scale=-1.0), PowerTransform(exponent=self.concentration0.reciprocal()), AffineTransform(loc=1.0, scale=-1.0), PowerTransform(exponent=self.concentration1.reciprocal()), ] super().__init__(base_dist, transforms, validate_args=validate_args)
def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Weibull, _instance) new.scale = self.scale.expand(batch_shape) new.concentration = self.concentration.expand(batch_shape) new.concentration_reciprocal = new.concentration.reciprocal() base_dist = self.base_dist.expand(batch_shape) transforms = [PowerTransform(exponent=new.concentration_reciprocal), AffineTransform(loc=0, scale=new.scale)] super(Weibull, new).__init__(base_dist, transforms, validate_args=False) new._validate_args = self._validate_args return new
def __init__(self, a, theta, alpha, beta): """ The Amoroso distribution is a very flexible 4 parameter distribution which contains many important exponential families as special cases. *PDF* ``` Amoroso(x | a, θ, α, β) = 1/gamma(α) * abs(β/θ) * ((x - a)/θ)**(α*β-1) * exp(-((x - a)/θ)**β) for: x, a, θ, α, β \in reals, α > 0 support: x >= a if θ > 0 x <= a if θ < 0 ``` """ self.a, self.theta, self.alpha, self.beta = broadcast_all( a, theta, alpha, beta) base_dist = Gamma(self.alpha, 1.) transform = ComposeTransform([ AffineTransform(-self.a / self.theta, 1 / self.theta), PowerTransform(self.beta), ]).inv super().__init__(base_dist, transform)
def __init__(self, concentration, rate, validate_args=None): base_dist = Gamma(concentration, rate) super(InverseGamma, self).__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
def __init__(self, low, high, alpha): if alpha == -1.: raise ValueError("Not implemented for alpha = -1") self.support = constraints.interval(low**(alpha+1), high**(alpha+1)) base_dist = torch.distributions.Uniform(low**(alpha+1), high**(alpha+1)) super(_TruncatedPower, self).__init__(base_dist, [PowerTransform(1/(alpha+1))])
class TransformMixIn: """Mixin for providing pre- and post-processing capabilities to encoders. Class should have a ``transformation`` attribute to indicate how to preprocess data. """ # dict of PyTorch functions that transforms and inversely transforms values. # inverse entry required if "reverse" is not the "inverse" of "forward". TRANSFORMATIONS = { "log": dict(forward=_clipped_log, reverse=torch.exp, inverse_torch=ExpTransform()), "log1p": dict(forward=torch.log1p, reverse=torch.exp, inverse=torch.expm1, inverse_torch=Expm1Transform()), "logit": dict(forward=_clipped_logit, reverse=_clipped_sigmoid, inverse_torch=SigmoidTransform()), "count": dict(forward=_plus_one, reverse=F.softplus, inverse=_minus_one, inverse_torch=MinusOneTransform()), "softplus": dict(forward=softplus_inv, reverse=F.softplus, inverse_torch=SoftplusTransform()), "relu": dict(forward=_identity, reverse=F.relu, inverse=_identity, inverse_torch=ReLuTransform()), "sqrt": dict(forward=torch.sqrt, reverse=_square, inverse_torch=PowerTransform(exponent=2.0)), } @classmethod def get_transform( cls, transformation: Union[str, Dict[str, Callable]]) -> Dict[str, Callable]: """Return transformation functions. Args: transformation (Union[str, Dict[str, Callable]]): name of transformation or dictionary with transformation information. Returns: Dict[str, Callable]: dictionary with transformation functions (forward, reverse, inverse and inverse_torch) """ return cls.TRANSFORMATIONS.get(transformation, transformation) def preprocess( self, y: Union[pd.Series, pd.DataFrame, np.ndarray, torch.Tensor] ) -> Union[np.ndarray, torch.Tensor]: """ Preprocess input data (e.g. take log). Uses ``transform`` attribute to determine how to apply transform. Returns: Union[np.ndarray, torch.Tensor]: return rescaled series with type depending on input type """ if self.transformation is None: return y if isinstance(y, torch.Tensor): y = self.get_transform(self.transformation)["forward"](y) else: # convert first to tensor, then transform and then convert to numpy array if isinstance(y, (pd.Series, pd.DataFrame)): y = y.to_numpy() y = torch.as_tensor(y) y = self.get_transform(self.transformation)["forward"](y) y = np.asarray(y) return y def inverse_preprocess( self, y: Union[pd.Series, np.ndarray, torch.Tensor] ) -> Union[np.ndarray, torch.Tensor]: """ Inverse preprocess re-scaled data (e.g. take exp). Uses ``transform`` attribute to determine how to apply inverse transform. Returns: Union[np.ndarray, torch.Tensor]: return rescaled series with type depending on input type """ if self.transformation is None: pass elif isinstance(y, torch.Tensor): y = self.get_transform(self.transformation)["reverse"](y) else: # convert first to tensor, then transform and then convert to numpy array y = torch.as_tensor(y) y = self.get_transform(self.transformation)["reverse"](y) y = np.asarray(y) return y