예제 #1
0
 def __init__(self, alpha=1., beta=1., learnable=True):
     super().__init__()
     if not isinstance(alpha, torch.Tensor):
         alpha = torch.tensor(alpha).view(1, -1)
     if not isinstance(beta, torch.Tensor):
         beta = torch.tensor(beta).view(1, -1)
     self._alpha = utils.softplus_inverse(alpha.float())
     self._beta = utils.softplus_inverse(beta.float())
     self.n_dims = len(alpha)
     if learnable:
         self._alpha = Parameter(self._alpha)
         self._beta = Parameter(self._beta)
예제 #2
0
 def __init__(self, scale=1., concentration=1., learnable=True):
     super().__init__()
     if not isinstance(scale, torch.Tensor):
         scale = torch.tensor(scale).view(1, -1)
     if not isinstance(concentration, torch.Tensor):
         concentration = torch.tensor(concentration).view(1, -1)
     self._scale = utils.softplus_inverse(scale.float())
     self._concentration = utils.softplus_inverse(concentration.float())
     self.n_dims = len(scale)
     if learnable:
         self._scale = Parameter(self._scale)
         self._concentration = Parameter(self._concentration)
예제 #3
0
 def __init__(self, df_1=1., df_2=1., learnable=True):
     super().__init__()
     if not isinstance(df_1, torch.Tensor):
         df_1 = torch.tensor(df_1).view(-1)
     self.n_dims = len(df_1)
     if not isinstance(df_2, torch.Tensor):
         df_2 = torch.tensor(df_2).view(-1)
     self._df_1 = utils.softplus_inverse(df_1.float())
     self._df_2 = utils.softplus_inverse(df_2.float())
     if learnable:
         self._df_1 = Parameter(self._df_1)
         self._df_2 = Parameter(self._df_2)
예제 #4
0
    def __init__(self, scale=1., alpha=1., learnable=True):
        super().__init__()
        if not isinstance(scale, torch.Tensor):
            scale = torch.tensor(scale).view(-1)
        if not isinstance(alpha, torch.Tensor):
            alpha = torch.tensor(alpha).view(-1)

        self._scale = softplus_inverse(scale.float())
        self._alpha = softplus_inverse(alpha.float())
        self.n_dims = len(scale)

        if learnable:
            self._scale = Parameter(self._scale)
            self._alpha = Parameter(self._alpha)
예제 #5
0
 def __init__(self, loc=0., scale=1., asymmetry=1., learnable=True):
     super().__init__()
     if not isinstance(loc, torch.Tensor):
         loc = torch.tensor(loc).view(-1)
     self.n_dims = len(loc)
     if not isinstance(scale, torch.Tensor):
         scale = torch.tensor(scale).view(-1)
     if not isinstance(asymmetry, torch.Tensor):
         asymmetry = torch.tensor(asymmetry).view(-1)
     self.loc = loc.float()
     self._scale = utils.softplus_inverse(scale.float())
     self._asymmetry = utils.softplus_inverse(asymmetry.float())
     if learnable:
         self.loc = Parameter(self.loc)
         self._scale = Parameter(self._scale)
         self._asymmetry = Parameter(self._asymmetry)
예제 #6
0
 def __init__(self, rate=1., learnable=True):
     super().__init__()
     if not isinstance(rate, torch.Tensor):
         rate = torch.tensor(rate).view(-1)
     self.n_dims = len(rate)
     self._rate = utils.softplus_inverse(rate.float())
     if learnable:
         self._rate = Parameter(self._rate)
예제 #7
0
 def __init__(self, df=1., learnable=True):
     super().__init__()
     if not isinstance(df, torch.Tensor):
         df = torch.tensor(df).view(-1)
     self._df = utils.softplus_inverse(df.float())
     self.n_dims = len(df)
     if learnable:
         self._df = Parameter(self._df)
예제 #8
0
 def __init__(self,
              df,
              loc,
              scale,
              loc_learnable=True,
              scale_learnable=True,
              df_learnable=True):
     super().__init__()
     self.loc = torch.tensor(loc).view(-1)
     self.n_dims = len(self.loc)
     if loc_learnable:
         self.loc = Parameter(self.loc)
     self._scale = utils.softplus_inverse(torch.tensor(scale).view(-1))
     if scale_learnable:
         self._scale = Parameter(self._scale)
     self._df = utils.softplus_inverse(torch.tensor(df).view(-1))
     if df_learnable:
         self._df = Parameter(self._df)
예제 #9
0
 def __init__(self, loc=0.0, scale=1.0, learnable=True):
     super().__init__()
     if not isinstance(loc, torch.Tensor):
         loc = torch.tensor(loc).view(1, -1)
     if not isinstance(scale, torch.Tensor):
         scale = torch.tensor(scale).view(1, -1)
     self.loc = loc.float()
     self._scale = utils.softplus_inverse(scale.float())
     if learnable:
         self.loc = Parameter(self.loc)
         self._scale = Parameter(self._scale)
예제 #10
0
 def __init__(self, skewness=0.0, tailweight=1.0, learnable=True):
     super().__init__()
     if not isinstance(skewness, torch.Tensor):
         skewness = torch.tensor(skewness).view(1, -1)
     if not isinstance(tailweight, torch.Tensor):
         tailweight = torch.tensor(tailweight).view(1, -1)
     self.skewness = skewness.float()
     self._tailweight = utils.softplus_inverse(tailweight.float())
     if learnable:
         self.skewness = Parameter(self.skewness)
         self._tailweight = Parameter(self._tailweight)
예제 #11
0
    def __init__(self, loc=0., scale=1., learnable=True):
        super().__init__()
        if not isinstance(loc, torch.Tensor):
            loc = torch.tensor(loc).float()
        if not isinstance(scale, torch.Tensor):
            scale = torch.tensor(scale).float()

        if len(loc.shape) == 0:
            loc = loc.view(-1)
            scale = scale.view(-1)
            self.n_dims = 1
            self._scale = softplus_inverse(scale)
            self._diag_type = 'diag'

        if len(loc.shape) == 1:
            self.n_dims = len(loc)
            scale = scale.view(-1)
            if scale.numel() == 1:
                scale = scale.expand_as(loc)

            if scale.shape == loc.shape:
                self._scale = softplus_inverse(scale)
                self._diag_type = 'diag'
            else:
                self._scale = scale.view(self.n_dims, self.n_dims).cholesky()
                self._diag_type = 'cholesky'

            self.loc = loc

        if len(loc.shape) > 1:
            assert len(loc.shape) == len(scale.shape)
            self.loc = loc

            scale = scale.expand_as(loc)
            self._diag_type = 'diag'
            self._scale = softplus_inverse(scale)
            self.n_dims = loc.shape

        if learnable:
            self.loc = Parameter(self.loc)
            self._scale = Parameter(self._scale)
예제 #12
0
 def __init__(self, in_shape=1):
     super().__init__()
     self.z_0 = Parameter(torch.zeros(in_shape))
     self.in_shape = in_shape
     self._alpha = Parameter(utils.softplus_inverse(torch.rand(1).float()))
     self.beta = Parameter(torch.randn(1).float())
예제 #13
0
 def inverse(self, y):
     return self.hinge_softness * utils.softplus_inverse(
         y / self.hinge_softness)