Пример #1
0
 def rsample(self, sample_shape=torch.Size(())):
     shape = self._extended_shape(sample_shape)
     #   X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
     #   Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
     X1 = self._gamma1.rsample(sample_shape).view(shape)
     X2 = self._gamma2.rsample(sample_shape).view(shape)
     X2.clamp_(min=_finfo(X2).tiny)
     Y = X1 / X2
     Y.clamp_(min=_finfo(X2).tiny)
     return Y
Пример #2
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     if torch._C._get_tracing_state():
         # [JIT WORKAROUND] lack of support for .uniform_()
         u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1
         return self.loc - self.scale * u.sign() * torch.log1p(-u.abs().clamp(min=_finfo(self.loc).tiny))
     u = self.loc.new(shape).uniform_(_finfo(self.loc).eps - 1, 1)
     # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
     # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
     return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
Пример #3
0
 def rsample(self, sample_shape=torch.Size(())):
     shape = self._extended_shape(sample_shape)
     #   X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
     #   Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
     X1 = self._gamma1.rsample(sample_shape).view(shape)
     X2 = self._gamma2.rsample(sample_shape).view(shape)
     X2.clamp_(min=_finfo(X2).tiny)
     Y = X1 / X2
     Y.clamp_(min=_finfo(X2).tiny)
     return Y
Пример #4
0
 def sample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     with torch.no_grad():
         if torch._C._get_tracing_state():
             # [JIT WORKAROUND] lack of support for .uniform_()
             u = torch.rand(shape,
                            dtype=self.probs.dtype,
                            device=self.probs.device)
             u = u.clamp(min=_finfo(self.probs).tiny)
         else:
             u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
         return (u.log() / (-self.probs).log1p()).floor()
Пример #5
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     value = _standard_gamma(
         self.concentration.expand(shape)) / self.rate.expand(shape)
     data = value.data if isinstance(value, Variable) else value
     data.clamp_(min=_finfo(value).tiny)  # do not record in autograd graph
     return value
Пример #6
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     value = _standard_gamma(
         self.concentration.expand(shape)) / self.rate.expand(shape)
     value.detach().clamp_(
         min=_finfo(value).tiny)  # do not record in autograd graph
     return value
Пример #7
0
 def __init__(self, loc, scale, validate_args=None):
     self.loc, self.scale = broadcast_all(loc, scale)
     finfo = _finfo(self.loc)
     if isinstance(loc, Number) and isinstance(scale, Number):
         base_dist = Uniform(finfo.tiny, 1 - finfo.eps)
     else:
         base_dist = Uniform(self.loc.new(self.loc.size()).fill_(finfo.tiny), 1 - finfo.eps)
     transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),
                   ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)]
     super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args)
Пример #8
0
 def __init__(self, loc, scale, validate_args=None):
     self.loc, self.scale = broadcast_all(loc, scale)
     finfo = _finfo(self.loc)
     if isinstance(loc, Number) and isinstance(scale, Number):
         batch_shape = torch.Size()
         base_dist = Uniform(finfo.tiny, 1 - finfo.eps)
     else:
         batch_shape = self.scale.size()
         base_dist = Uniform(self.loc.new(self.loc.size()).fill_(finfo.tiny), 1 - finfo.eps)
     transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),
                   ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)]
     super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args)
Пример #9
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     uni_dist = self.scale.new(shape).uniform_(_finfo(self.scale).eps, 1)
     # X ~ Uniform(0, 1)
     # Y = loc - scale * ln (-ln (X)) ~ Gumbel(loc, scale)
     return self.loc - self.scale * torch.log(-uni_dist.log())
Пример #10
0
 def sample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     with torch.no_grad():
         u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
         return (u.log() / (-self.probs).log1p()).floor()
Пример #11
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     value = _standard_gamma(self.alpha.expand(shape)) / self.beta.expand(shape)
     data = value.data if isinstance(value, Variable) else value
     data.clamp_(min=_finfo(value).tiny)  # do not record in autograd graph
     return value
Пример #12
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     u = self.loc.new(shape).uniform_(_finfo(self.loc).eps - 1, 1)
     # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
     # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
     return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
Пример #13
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     u = self.loc.new(shape).uniform_(_finfo(self.loc).eps - 1, 1)
     # TODO: If we ever implement tensor.nextafter, below is what we want ideally.
     # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
     return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
Пример #14
0
def _dirichlet_sample_nograd(alpha):
    probs = torch._C._standard_gamma(alpha)
    probs /= probs.sum(-1, True)
    eps = _finfo(probs).eps
    return probs.clamp_(min=eps, max=1 - eps)
Пример #15
0
 def sample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     with torch.no_grad():
         u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
         return (u.log() / (-self.probs).log1p()).floor()
Пример #16
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape)
     value.data.clamp_(min=_finfo(value).tiny)  # do not record in autograd graph
     return value
Пример #17
0
 def rsample(self, sample_shape=torch.Size()):
     shape = self._extended_shape(sample_shape)
     uni_dist = self.scale.new(shape).uniform_(_finfo(self.scale).eps, 1)
     # X ~ Uniform(0, 1)
     # Y = loc - scale * ln (-ln (X)) ~ Gumbel(loc, scale)
     return self.loc - self.scale * torch.log(-uni_dist.log())