コード例 #1
0
 def rsample(self, sample_shape=torch.Size(())):
     shape = self._extended_shape(sample_shape)
     #   X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
     #   Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
     X1 = self._gamma1.rsample(sample_shape).view(shape)
     X2 = self._gamma2.rsample(sample_shape).view(shape)
     X2.clamp_(min=_finfo(X2).tiny)
     Y = X1 / X2
     Y.clamp_(min=_finfo(X2).tiny)
     return Y
コード例 #2
0
ファイル: laplace.py プロジェクト: vikasgoel2000/lambda-packs
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     if torch._C._get_tracing_state():
         # [JIT WORKAROUND] lack of support for .uniform_()
         u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1
         return self.loc - self.scale * u.sign() * torch.log1p(-u.abs().clamp(min=_finfo(self.loc).tiny))
     u = self.loc.new(shape).uniform_(_finfo(self.loc).eps - 1, 1)
     # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
     # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
     return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
コード例 #3
0
ファイル: fishersnedecor.py プロジェクト: RichieMay/pytorch
 def rsample(self, sample_shape=torch.Size(())):
     shape = self._extended_shape(sample_shape)
     #   X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
     #   Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
     X1 = self._gamma1.rsample(sample_shape).view(shape)
     X2 = self._gamma2.rsample(sample_shape).view(shape)
     X2.clamp_(min=_finfo(X2).tiny)
     Y = X1 / X2
     Y.clamp_(min=_finfo(X2).tiny)
     return Y
コード例 #4
0
 def sample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     with torch.no_grad():
         if torch._C._get_tracing_state():
             # [JIT WORKAROUND] lack of support for .uniform_()
             u = torch.rand(shape,
                            dtype=self.probs.dtype,
                            device=self.probs.device)
             u = u.clamp(min=_finfo(self.probs).tiny)
         else:
             u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
         return (u.log() / (-self.probs).log1p()).floor()
コード例 #5
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     value = _standard_gamma(
         self.concentration.expand(shape)) / self.rate.expand(shape)
     data = value.data if isinstance(value, Variable) else value
     data.clamp_(min=_finfo(value).tiny)  # do not record in autograd graph
     return value
コード例 #6
0
ファイル: gamma.py プロジェクト: madieragold1/micro--
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     value = _standard_gamma(
         self.concentration.expand(shape)) / self.rate.expand(shape)
     value.detach().clamp_(
         min=_finfo(value).tiny)  # do not record in autograd graph
     return value
コード例 #7
0
 def __init__(self, loc, scale, validate_args=None):
     self.loc, self.scale = broadcast_all(loc, scale)
     finfo = _finfo(self.loc)
     if isinstance(loc, Number) and isinstance(scale, Number):
         base_dist = Uniform(finfo.tiny, 1 - finfo.eps)
     else:
         base_dist = Uniform(self.loc.new(self.loc.size()).fill_(finfo.tiny), 1 - finfo.eps)
     transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),
                   ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)]
     super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args)
コード例 #8
0
ファイル: gumbel.py プロジェクト: gtgalone/pytorch
 def __init__(self, loc, scale, validate_args=None):
     self.loc, self.scale = broadcast_all(loc, scale)
     finfo = _finfo(self.loc)
     if isinstance(loc, Number) and isinstance(scale, Number):
         batch_shape = torch.Size()
         base_dist = Uniform(finfo.tiny, 1 - finfo.eps)
     else:
         batch_shape = self.scale.size()
         base_dist = Uniform(self.loc.new(self.loc.size()).fill_(finfo.tiny), 1 - finfo.eps)
     transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),
                   ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)]
     super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args)
コード例 #9
0
ファイル: gumbel.py プロジェクト: rsumner31/pytorch-2
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     uni_dist = self.scale.new(shape).uniform_(_finfo(self.scale).eps, 1)
     # X ~ Uniform(0, 1)
     # Y = loc - scale * ln (-ln (X)) ~ Gumbel(loc, scale)
     return self.loc - self.scale * torch.log(-uni_dist.log())
コード例 #10
0
ファイル: geometric.py プロジェクト: madieragold1/micro--
 def sample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     with torch.no_grad():
         u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
         return (u.log() / (-self.probs).log1p()).floor()
コード例 #11
0
ファイル: gamma.py プロジェクト: lxlhh/pytorch
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     value = _standard_gamma(self.alpha.expand(shape)) / self.beta.expand(shape)
     data = value.data if isinstance(value, Variable) else value
     data.clamp_(min=_finfo(value).tiny)  # do not record in autograd graph
     return value
コード例 #12
0
ファイル: laplace.py プロジェクト: MaheshBhosale/pytorch
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     u = self.loc.new(shape).uniform_(_finfo(self.loc).eps - 1, 1)
     # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
     # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
     return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
コード例 #13
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     u = self.loc.new(shape).uniform_(_finfo(self.loc).eps - 1, 1)
     # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
     # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
     return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
コード例 #14
0
ファイル: dirichlet.py プロジェクト: lxlhh/pytorch
def _dirichlet_sample_nograd(alpha):
    probs = torch._C._standard_gamma(alpha)
    probs /= probs.sum(-1, True)
    eps = _finfo(probs).eps
    return probs.clamp_(min=eps, max=1 - eps)
コード例 #15
0
ファイル: geometric.py プロジェクト: RichieMay/pytorch
 def sample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     with torch.no_grad():
         u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
         return (u.log() / (-self.probs).log1p()).floor()
コード例 #16
0
ファイル: gamma.py プロジェクト: RichieMay/pytorch
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape)
     value.data.clamp_(min=_finfo(value).tiny)  # do not record in autograd graph
     return value
コード例 #17
0
ファイル: gumbel.py プロジェクト: lxlhh/pytorch
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     uni_dist = self.scale.new(shape).uniform_(_finfo(self.scale).eps, 1)
     # X ~ Uniform(0, 1)
     # Y = loc - scale * ln (-ln (X)) ~ Gumbel(loc, scale)
     return self.loc - self.scale * torch.log(-uni_dist.log())