def __init__(self, alpha=1., beta=1., learnable=True): super().__init__() if not isinstance(alpha, torch.Tensor): alpha = torch.tensor(alpha).view(1, -1) if not isinstance(beta, torch.Tensor): beta = torch.tensor(beta).view(1, -1) self._alpha = utils.softplus_inverse(alpha.float()) self._beta = utils.softplus_inverse(beta.float()) self.n_dims = len(alpha) if learnable: self._alpha = Parameter(self._alpha) self._beta = Parameter(self._beta)
def __init__(self, scale=1., concentration=1., learnable=True): super().__init__() if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1, -1) if not isinstance(concentration, torch.Tensor): concentration = torch.tensor(concentration).view(1, -1) self._scale = utils.softplus_inverse(scale.float()) self._concentration = utils.softplus_inverse(concentration.float()) self.n_dims = len(scale) if learnable: self._scale = Parameter(self._scale) self._concentration = Parameter(self._concentration)
def __init__(self, df_1=1., df_2=1., learnable=True): super().__init__() if not isinstance(df_1, torch.Tensor): df_1 = torch.tensor(df_1).view(-1) self.n_dims = len(df_1) if not isinstance(df_2, torch.Tensor): df_2 = torch.tensor(df_2).view(-1) self._df_1 = utils.softplus_inverse(df_1.float()) self._df_2 = utils.softplus_inverse(df_2.float()) if learnable: self._df_1 = Parameter(self._df_1) self._df_2 = Parameter(self._df_2)
def __init__(self, scale=1., alpha=1., learnable=True): super().__init__() if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(-1) if not isinstance(alpha, torch.Tensor): alpha = torch.tensor(alpha).view(-1) self._scale = softplus_inverse(scale.float()) self._alpha = softplus_inverse(alpha.float()) self.n_dims = len(scale) if learnable: self._scale = Parameter(self._scale) self._alpha = Parameter(self._alpha)
def __init__(self, loc=0., scale=1., asymmetry=1., learnable=True): super().__init__() if not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(-1) self.n_dims = len(loc) if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(-1) if not isinstance(asymmetry, torch.Tensor): asymmetry = torch.tensor(asymmetry).view(-1) self.loc = loc.float() self._scale = utils.softplus_inverse(scale.float()) self._asymmetry = utils.softplus_inverse(asymmetry.float()) if learnable: self.loc = Parameter(self.loc) self._scale = Parameter(self._scale) self._asymmetry = Parameter(self._asymmetry)
def __init__(self, rate=1., learnable=True): super().__init__() if not isinstance(rate, torch.Tensor): rate = torch.tensor(rate).view(-1) self.n_dims = len(rate) self._rate = utils.softplus_inverse(rate.float()) if learnable: self._rate = Parameter(self._rate)
def __init__(self, df=1., learnable=True): super().__init__() if not isinstance(df, torch.Tensor): df = torch.tensor(df).view(-1) self._df = utils.softplus_inverse(df.float()) self.n_dims = len(df) if learnable: self._df = Parameter(self._df)
def __init__(self, df, loc, scale, loc_learnable=True, scale_learnable=True, df_learnable=True): super().__init__() self.loc = torch.tensor(loc).view(-1) self.n_dims = len(self.loc) if loc_learnable: self.loc = Parameter(self.loc) self._scale = utils.softplus_inverse(torch.tensor(scale).view(-1)) if scale_learnable: self._scale = Parameter(self._scale) self._df = utils.softplus_inverse(torch.tensor(df).view(-1)) if df_learnable: self._df = Parameter(self._df)
def __init__(self, loc=0.0, scale=1.0, learnable=True): super().__init__() if not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).view(1, -1) if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).view(1, -1) self.loc = loc.float() self._scale = utils.softplus_inverse(scale.float()) if learnable: self.loc = Parameter(self.loc) self._scale = Parameter(self._scale)
def __init__(self, skewness=0.0, tailweight=1.0, learnable=True): super().__init__() if not isinstance(skewness, torch.Tensor): skewness = torch.tensor(skewness).view(1, -1) if not isinstance(tailweight, torch.Tensor): tailweight = torch.tensor(tailweight).view(1, -1) self.skewness = skewness.float() self._tailweight = utils.softplus_inverse(tailweight.float()) if learnable: self.skewness = Parameter(self.skewness) self._tailweight = Parameter(self._tailweight)
def __init__(self, loc=0., scale=1., learnable=True): super().__init__() if not isinstance(loc, torch.Tensor): loc = torch.tensor(loc).float() if not isinstance(scale, torch.Tensor): scale = torch.tensor(scale).float() if len(loc.shape) == 0: loc = loc.view(-1) scale = scale.view(-1) self.n_dims = 1 self._scale = softplus_inverse(scale) self._diag_type = 'diag' if len(loc.shape) == 1: self.n_dims = len(loc) scale = scale.view(-1) if scale.numel() == 1: scale = scale.expand_as(loc) if scale.shape == loc.shape: self._scale = softplus_inverse(scale) self._diag_type = 'diag' else: self._scale = scale.view(self.n_dims, self.n_dims).cholesky() self._diag_type = 'cholesky' self.loc = loc if len(loc.shape) > 1: assert len(loc.shape) == len(scale.shape) self.loc = loc scale = scale.expand_as(loc) self._diag_type = 'diag' self._scale = softplus_inverse(scale) self.n_dims = loc.shape if learnable: self.loc = Parameter(self.loc) self._scale = Parameter(self._scale)
def __init__(self, in_shape=1): super().__init__() self.z_0 = Parameter(torch.zeros(in_shape)) self.in_shape = in_shape self._alpha = Parameter(utils.softplus_inverse(torch.rand(1).float())) self.beta = Parameter(torch.randn(1).float())
def inverse(self, y): return self.hinge_softness * utils.softplus_inverse( y / self.hinge_softness)