예제 #1
0
파일: beta.py 프로젝트: xianweilv/pytorch
class Beta(Distribution):
    r"""
    Beta distribution parameterized by `concentration1` and `concentration0`.

    Example::

        >>> m = Beta(torch.Tensor([0.5]), torch.Tensor([0.5]))
        >>> m.sample()  # Beta distributed with concentration concentration1 and concentration0
         0.1046
        [torch.FloatTensor of size 1]

    Args:
        concentration1 (float or Tensor or Variable): 1st concentration parameter of the distribution
            (often referred to as alpha)
        concentration0 (float or Tensor or Variable): 2nd concentration parameter of the distribution
            (often referred to as beta)
    """
    params = {'concentration1': constraints.positive, 'concentration0': constraints.positive}
    support = constraints.unit_interval
    has_rsample = True

    def __init__(self, concentration1, concentration0):
        if isinstance(concentration1, Number) and isinstance(concentration0, Number):
            concentration1_concentration0 = torch.Tensor([concentration1, concentration0])
        else:
            concentration1, concentration0 = broadcast_all(concentration1, concentration0)
            concentration1_concentration0 = torch.stack([concentration1, concentration0], -1)
        self._dirichlet = Dirichlet(concentration1_concentration0)
        super(Beta, self).__init__(self._dirichlet._batch_shape)

    def rsample(self, sample_shape=()):
        value = self._dirichlet.rsample(sample_shape).select(-1, 0)
        if isinstance(value, Number):
            value = self._dirichlet.concentration.new([value])
        return value

    def log_prob(self, value):
        self._validate_log_prob_arg(value)
        heads_tails = torch.stack([value, 1.0 - value], -1)
        return self._dirichlet.log_prob(heads_tails)

    def entropy(self):
        return self._dirichlet.entropy()

    @property
    def concentration1(self):
        result = self._dirichlet.concentration[..., 0]
        if isinstance(result, Number):
            return torch.Tensor([result])
        else:
            return result

    @property
    def concentration0(self):
        result = self._dirichlet.concentration[..., 1]
        if isinstance(result, Number):
            return torch.Tensor([result])
        else:
            return result
예제 #2
0
class Beta(Distribution):
    r"""
    Creates a Beta distribution parameterized by concentration `alpha` and `beta`.

    Example::

        >>> m = Beta(torch.Tensor([0.5]), torch.Tensor([0.5]))
        >>> m.sample()  # Beta distributed with concentration alpha and beta
         0.1046
        [torch.FloatTensor of size 1]

    Args:
        alpha (float or Tensor or Variable): 1st concentration parameter of the distribution
        beta (float or Tensor or Variable): 2nd concentration parameter of the distribution
    """
    params = {'alpha': constraints.positive, 'beta': constraints.positive}
    support = constraints.unit_interval
    has_rsample = True

    def __init__(self, alpha, beta):
        if isinstance(alpha, Number) and isinstance(beta, Number):
            alpha_beta = torch.Tensor([alpha, beta])
        else:
            alpha, beta = broadcast_all(alpha, beta)
            alpha_beta = torch.stack([alpha, beta], -1)
        self._dirichlet = Dirichlet(alpha_beta)
        super(Beta, self).__init__(self._dirichlet._batch_shape)

    def rsample(self, sample_shape=()):
        value = self._dirichlet.rsample(sample_shape).select(-1, 0)
        if isinstance(value, Number):
            value = self._dirichlet.alpha.new([value])
        return value

    def log_prob(self, value):
        self._validate_log_prob_arg(value)
        heads_tails = torch.stack([value, 1.0 - value], -1)
        return self._dirichlet.log_prob(heads_tails)

    def entropy(self):
        return self._dirichlet.entropy()

    @property
    def alpha(self):
        result = self._dirichlet.alpha[..., 0]
        if isinstance(result, Number):
            return torch.Tensor([result])
        else:
            return result

    @property
    def beta(self):
        result = self._dirichlet.alpha[..., 1]
        if isinstance(result, Number):
            return torch.Tensor([result])
        else:
            return result
예제 #3
0
 def log_joint_pdf(self, samples, log_f):
     """
     Returns (the log of) p(piece | cluster, transposition); assuming uniform prior over clusters and transpositions,
     this is proportional to the joint p(piece, cluster, transposition)
     :param samples: array of shape (n_pieces, n_samples, n_pitches, n_clusters, n_transpositions)
     :param log_f: array of shape (n_pieces, n_samples, n_pitches, n_clusters, n_transpositions)
     :return: array of shape (...)
     """
     # construct Dirichlet distributions (move pitch dimension last
     dir = Dirichlet(torch.einsum('abcde->abdec', log_f.exp()))
     # get point-wise probabilities and multiply up (log-sum) samples for each piece
     probs = dir.log_prob(torch.einsum('abcde->abdec', samples))
     return probs.sum(dim=1)
예제 #4
0
def loss_function(targets,
                  outputs,
                  mu,
                  logvar,
                  alphas,
                  topics,
                  bow=None,
                  joint=False):
    """

    Inputs:
        targets: target tokens
        outputs: predicted tokens
        mu:      latent mean
        logvar:  log of the latent variance
        alphas:  parameters of the dirichlet prior p(w|z) given latent code
        topics:  actual distribution of topics q(w|x,z) i.e. posterior given x

    Outputs:
        ce_loss: cross entropy loss of the tokens
        kld:     D(q(z|x)||p(z))
        kld_tpc: D(q(w|x,z)||p(w|z))
    
    """
    ce_loss = F.cross_entropy(outputs.view(
        outputs.size(0) * outputs.size(1), outputs.size(2)),
                              targets.view(-1),
                              size_average=False,
                              ignore_index=PAD_ID)
    if bow is None:
        bow_loss = torch.tensor(0., device=outputs.device)
    else:
        bow = bow.unsqueeze(1).repeat(1, outputs.size(1), 1).contiguous()
        bow_loss = F.cross_entropy(bow.view(
            bow.size(0) * bow.size(1), bow.size(2)),
                                   targets.view(-1),
                                   size_average=False,
                                   ignore_index=PAD_ID)
    if type(mu) == torch.Tensor:
        kld = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
    else:
        kld = -0.5 * torch.sum(1 + logvar[1] - logvar[0] - (
            (mu[1] - mu[0]).pow(2) + logvar[1].exp()) / logvar[0].exp())
    prior = Dirichlet(alphas)
    if joint:
        loss_tpc = -torch.sum(prior.log_prob(topics))
    else:
        alphas2 = topics * topics.size(1)
        posterior = Dirichlet(alphas2)
        loss_tpc = kl_divergence(posterior, prior).sum()
    return ce_loss, kld, loss_tpc, bow_loss
예제 #5
0
def test_dirichlet_logpdf():
    alpha = torch.tensor([0.5, 0.6, 1.2])
    ps = torch.tensor([0.2, 0.3, 0.5])

    log_pdf = dirichlet_logpdf(ps, alpha)

    # pytorch implementation
    dist = Dirichlet(concentration=alpha)
    log_prob = dist.log_prob(ps)

    print(log_pdf)
    print(log_prob)

    assert log_pdf == log_prob
예제 #6
0
파일: beta.py 프로젝트: lxlhh/pytorch
class Beta(Distribution):
    r"""
    Creates a Beta distribution parameterized by concentration `alpha` and `beta`.

    Example::

        >>> m = Beta(torch.Tensor([0.5]), torch.Tensor([0.5]))
        >>> m.sample()  # Beta distributed with concentrarion alpha
         0.1046
        [torch.FloatTensor of size 2]

    Args:
        alpha (Tensor or Variable): concentration parameter of the distribution
    """
    params = {'alpha': constraints.positive, 'beta': constraints.positive}
    support = constraints.unit_interval
    has_rsample = True

    def __init__(self, alpha, beta):
        if isinstance(alpha, Number) and isinstance(beta, Number):
            alpha_beta = torch.Tensor([alpha, beta])
        else:
            alpha, beta = broadcast_all(alpha, beta)
            alpha_beta = torch.stack([alpha, beta], -1)
        self._dirichlet = Dirichlet(alpha_beta)
        super(Beta, self).__init__(self._dirichlet._batch_shape)

    def rsample(self, sample_shape=()):
        value = self._dirichlet.rsample(sample_shape).select(-1, 0)
        if isinstance(value, Number):
            value = self._dirichlet.alpha.new([value])
        return value

    def log_prob(self, value):
        self._validate_log_prob_arg(value)
        heads_tails = torch.stack([value, 1.0 - value], -1)
        return self._dirichlet.log_prob(heads_tails)

    def entropy(self):
        return self._dirichlet.entropy()
예제 #7
0
class Beta(Distribution):
    r"""
    Creates a Beta distribution parameterized by concentration `alpha` and `beta`.

    Example::

        >>> m = Beta(torch.Tensor([0.5]), torch.Tensor([0.5]))
        >>> m.sample()  # Beta distributed with concentrarion alpha
         0.1046
        [torch.FloatTensor of size 2]

    Args:
        alpha (Tensor or Variable): concentration parameter of the distribution
    """
    has_rsample = True

    def __init__(self, alpha, beta):
        if isinstance(alpha, Number) and isinstance(beta, Number):
            alpha_beta = torch.Tensor([alpha, beta])
        else:
            alpha, beta = broadcast_all(alpha, beta)
            alpha_beta = torch.stack([alpha, beta], -1)
        self._dirichlet = Dirichlet(alpha_beta)
        super(Beta, self).__init__(self._dirichlet._batch_shape)

    def rsample(self, sample_shape=()):
        value = self._dirichlet.rsample(sample_shape).select(-1, 0)
        if isinstance(value, Number):
            value = self._dirichlet.alpha.new([value])
        return value

    def log_prob(self, value):
        self._validate_log_prob_arg(value)
        heads_tails = torch.stack([value, 1.0 - value], -1)
        return self._dirichlet.log_prob(heads_tails)

    def entropy(self):
        return self._dirichlet.entropy()
예제 #8
0
class Beta(ExponentialFamily):
    r"""
    Beta distribution parameterized by `concentration1` and `concentration0`.

    Example::

        >>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5]))
        >>> m.sample()  # Beta distributed with concentration concentration1 and concentration0
         0.1046
        [torch.FloatTensor of size 1]

    Args:
        concentration1 (float or Tensor): 1st concentration parameter of the distribution
            (often referred to as alpha)
        concentration0 (float or Tensor): 2nd concentration parameter of the distribution
            (often referred to as beta)
    """
    arg_constraints = {
        'concentration1': constraints.positive,
        'concentration0': constraints.positive
    }
    support = constraints.unit_interval
    has_rsample = True

    def __init__(self, concentration1, concentration0, validate_args=None):
        if isinstance(concentration1, Number) and isinstance(
                concentration0, Number):
            concentration1_concentration0 = torch.tensor(
                [float(concentration1),
                 float(concentration0)])
        else:
            concentration1, concentration0 = broadcast_all(
                concentration1, concentration0)
            concentration1_concentration0 = torch.stack(
                [concentration1, concentration0], -1)
        self._dirichlet = Dirichlet(concentration1_concentration0)
        super(Beta, self).__init__(self._dirichlet._batch_shape,
                                   validate_args=validate_args)

    @property
    def mean(self):
        return self.concentration1 / (self.concentration1 +
                                      self.concentration0)

    @property
    def variance(self):
        total = self.concentration1 + self.concentration0
        return (self.concentration1 * self.concentration0 / (total.pow(2) *
                                                             (total + 1)))

    def rsample(self, sample_shape=()):
        value = self._dirichlet.rsample(sample_shape).select(-1, 0)
        if isinstance(value, Number):
            value = self._dirichlet.concentration.new_tensor(value)
        return value

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        heads_tails = torch.stack([value, 1.0 - value], -1)
        return self._dirichlet.log_prob(heads_tails)

    def entropy(self):
        return self._dirichlet.entropy()

    @property
    def concentration1(self):
        result = self._dirichlet.concentration[..., 0]
        if isinstance(result, Number):
            return torch.Tensor([result])
        else:
            return result

    @property
    def concentration0(self):
        result = self._dirichlet.concentration[..., 1]
        if isinstance(result, Number):
            return torch.Tensor([result])
        else:
            return result

    @property
    def _natural_params(self):
        return (self.concentration1, self.concentration0)

    def _log_normalizer(self, x, y):
        return torch.lgamma(x) + torch.lgamma(y) - torch.lgamma(x + y)
예제 #9
0
파일: beta.py 프로젝트: gtgalone/pytorch
class Beta(ExponentialFamily):
    r"""
    Beta distribution parameterized by `concentration1` and `concentration0`.

    Example::

        >>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5]))
        >>> m.sample()  # Beta distributed with concentration concentration1 and concentration0
        tensor([ 0.1046])

    Args:
        concentration1 (float or Tensor): 1st concentration parameter of the distribution
            (often referred to as alpha)
        concentration0 (float or Tensor): 2nd concentration parameter of the distribution
            (often referred to as beta)
    """
    arg_constraints = {'concentration1': constraints.positive, 'concentration0': constraints.positive}
    support = constraints.unit_interval
    has_rsample = True

    def __init__(self, concentration1, concentration0, validate_args=None):
        if isinstance(concentration1, Number) and isinstance(concentration0, Number):
            concentration1_concentration0 = torch.tensor([float(concentration1), float(concentration0)])
        else:
            concentration1, concentration0 = broadcast_all(concentration1, concentration0)
            concentration1_concentration0 = torch.stack([concentration1, concentration0], -1)
        self._dirichlet = Dirichlet(concentration1_concentration0)
        super(Beta, self).__init__(self._dirichlet._batch_shape, validate_args=validate_args)

    @property
    def mean(self):
        return self.concentration1 / (self.concentration1 + self.concentration0)

    @property
    def variance(self):
        total = self.concentration1 + self.concentration0
        return (self.concentration1 * self.concentration0 /
                (total.pow(2) * (total + 1)))

    def rsample(self, sample_shape=()):
        value = self._dirichlet.rsample(sample_shape).select(-1, 0)
        if isinstance(value, Number):
            value = self._dirichlet.concentration.new_tensor(value)
        return value

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        heads_tails = torch.stack([value, 1.0 - value], -1)
        return self._dirichlet.log_prob(heads_tails)

    def entropy(self):
        return self._dirichlet.entropy()

    @property
    def concentration1(self):
        result = self._dirichlet.concentration[..., 0]
        if isinstance(result, Number):
            return torch.tensor([result])
        else:
            return result

    @property
    def concentration0(self):
        result = self._dirichlet.concentration[..., 1]
        if isinstance(result, Number):
            return torch.tensor([result])
        else:
            return result

    @property
    def _natural_params(self):
        return (self.concentration1, self.concentration0)

    def _log_normalizer(self, x, y):
        return torch.lgamma(x) + torch.lgamma(y) - torch.lgamma(x + y)
예제 #10
0
 def forward(self, alpha, value):
     dirichlet = TorchDirichlet(alpha)
     return dirichlet.log_prob(value)
예제 #11
0
class ZOIBeta(ExponentialFamily):
    """ Zero one inflated Beta distribution
    
    Args: 
        p (float or Tensor): Pr(y = 0)
        q (float or Tensor): Pr(y = 1 | y != 0)
        concentration1 (float or Tensor): 1st Beta dist. parameter 
            (often referred to as alpha)
        concentration0 (float or Tensor): 2nd Beta dist. parameter
            (often referred to as beta)
    """

    arg_constraints = {
        'p': constraints.unit_interval,
        'q': constraints.unit_interval,
        'concentration1': constraints.positive,
        'concentration0': constraints.positive
    }
    support = constraints.unit_interval  # does this include 0 and 1?
    has_rsample = False

    def __init__(self,
                 p,
                 q,
                 concentration1,
                 concentration0,
                 validate_args=None):
        if isinstance(concentration1, Number) and isinstance(
                concentration0, Number):
            self.concentration1_concentration0 = torch.tensor(
                [float(concentration1),
                 float(concentration0)])
        else:
            concentration1, concentration0 = broadcast_all(
                concentration1, concentration0)
            self.concentration1_concentration0 = torch.stack(
                [concentration1, concentration0], -1)
        self._dirichlet = Dirichlet(self.concentration1_concentration0)
        self.log_p = torch.log(p)
        self.log1m_p = torch.log(1 - p)
        self.log_q = torch.log(q)
        self.log1m_q = torch.log(1 - q)
        super(ZOIBeta, self).__init__(self._dirichlet._batch_shape,
                                      validate_args=validate_args)

    def beta_lp(self, value):
        if self._validate_args:
            self._validate_sample(value)
        heads_tails = torch.stack([value, 1.0 - value], -1)
        return self._dirichlet.log_prob(heads_tails)

    def log_prob(self, value):
        lp = torch.zeros_like(value, dtype=torch.float)
        if torch.mul(value > 0., value < 1.).any():
            beta_idx = torch.where(torch.mul(value > 0., value < 1.))
            self._dirichlet = Dirichlet(
                self.concentration1_concentration0[beta_idx])
            lp[beta_idx] = self.log1m_p[beta_idx] + self.log1m_q[
                beta_idx] + self.beta_lp(value[beta_idx])
        lp[torch.where(value == 0.)] = self.log_p[torch.where(value == 0.)]
        lp[torch.where(value == 1.)] = self.log1m_p[torch.where(
            value == 1.)] + self.log_q[torch.where(value == 1.)]
        return lp
예제 #12
0
class Beta(Distribution):
    r"""
    Beta distribution parameterized by `concentration1` and `concentration0`.

    Example::

        >>> m = Beta(torch.Tensor([0.5]), torch.Tensor([0.5]))
        >>> m.sample()  # Beta distributed with concentration concentration1 and concentration0
         0.1046
        [torch.FloatTensor of size 1]

    Args:
        concentration1 (float or Tensor or Variable): 1st concentration parameter of the distribution
            (often referred to as alpha)
        concentration0 (float or Tensor or Variable): 2nd concentration parameter of the distribution
            (often referred to as beta)
    """
    params = {'concentration1': constraints.positive, 'concentration0': constraints.positive}
    support = constraints.unit_interval
    has_rsample = True

    def __init__(self, concentration1, concentration0):
        if isinstance(concentration1, Number) and isinstance(concentration0, Number):
            concentration1_concentration0 = variable([concentration1, concentration0])
        else:
            concentration1, concentration0 = broadcast_all(concentration1, concentration0)
            concentration1_concentration0 = torch.stack([concentration1, concentration0], -1)
        self._dirichlet = Dirichlet(concentration1_concentration0)
        super(Beta, self).__init__(self._dirichlet._batch_shape)

    @property
    def mean(self):
        return self.concentration1 / (self.concentration1 + self.concentration0)

    @property
    def variance(self):
        total = self.concentration1 + self.concentration0
        return (self.concentration1 * self.concentration0 /
                (total.pow(2) * (total + 1)))

    def rsample(self, sample_shape=()):
        value = self._dirichlet.rsample(sample_shape).select(-1, 0)
        if isinstance(value, Number):
            value = self._dirichlet.concentration.new([value])
        return value

    def log_prob(self, value):
        self._validate_log_prob_arg(value)
        heads_tails = torch.stack([value, 1.0 - value], -1)
        return self._dirichlet.log_prob(heads_tails)

    def entropy(self):
        return self._dirichlet.entropy()

    @property
    def concentration1(self):
        result = self._dirichlet.concentration[..., 0]
        if isinstance(result, Number):
            return torch.Tensor([result])
        else:
            return result

    @property
    def concentration0(self):
        result = self._dirichlet.concentration[..., 1]
        if isinstance(result, Number):
            return torch.Tensor([result])
        else:
            return result
예제 #13
0
파일: beta.py 프로젝트: huaxz1986/pytorch
class Beta(ExponentialFamily):
    r"""
    Beta distribution parameterized by :attr:`concentration1` and :attr:`concentration0`.

    Example::

        >>> # xdoctest: +IGNORE_WANT("non-deterinistic")
        >>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5]))
        >>> m.sample()  # Beta distributed with concentration concentration1 and concentration0
        tensor([ 0.1046])

    Args:
        concentration1 (float or Tensor): 1st concentration parameter of the distribution
            (often referred to as alpha)
        concentration0 (float or Tensor): 2nd concentration parameter of the distribution
            (often referred to as beta)
    """
    arg_constraints = {
        'concentration1': constraints.positive,
        'concentration0': constraints.positive
    }
    support = constraints.unit_interval
    has_rsample = True

    def __init__(self, concentration1, concentration0, validate_args=None):
        if isinstance(concentration1, Real) and isinstance(
                concentration0, Real):
            concentration1_concentration0 = torch.tensor(
                [float(concentration1),
                 float(concentration0)])
        else:
            concentration1, concentration0 = broadcast_all(
                concentration1, concentration0)
            concentration1_concentration0 = torch.stack(
                [concentration1, concentration0], -1)
        self._dirichlet = Dirichlet(concentration1_concentration0,
                                    validate_args=validate_args)
        super(Beta, self).__init__(self._dirichlet._batch_shape,
                                   validate_args=validate_args)

    def expand(self, batch_shape, _instance=None):
        new = self._get_checked_instance(Beta, _instance)
        batch_shape = torch.Size(batch_shape)
        new._dirichlet = self._dirichlet.expand(batch_shape)
        super(Beta, new).__init__(batch_shape, validate_args=False)
        new._validate_args = self._validate_args
        return new

    @property
    def mean(self):
        return self.concentration1 / (self.concentration1 +
                                      self.concentration0)

    @property
    def mode(self):
        return self._dirichlet.mode[..., 0]

    @property
    def variance(self):
        total = self.concentration1 + self.concentration0
        return (self.concentration1 * self.concentration0 / (total.pow(2) *
                                                             (total + 1)))

    def rsample(self, sample_shape=()):
        return self._dirichlet.rsample(sample_shape).select(-1, 0)

    def log_prob(self, value):
        if self._validate_args:
            self._validate_sample(value)
        heads_tails = torch.stack([value, 1.0 - value], -1)
        return self._dirichlet.log_prob(heads_tails)

    def entropy(self):
        return self._dirichlet.entropy()

    @property
    def concentration1(self):
        result = self._dirichlet.concentration[..., 0]
        if isinstance(result, Number):
            return torch.tensor([result])
        else:
            return result

    @property
    def concentration0(self):
        result = self._dirichlet.concentration[..., 1]
        if isinstance(result, Number):
            return torch.tensor([result])
        else:
            return result

    @property
    def _natural_params(self):
        return (self.concentration1, self.concentration0)

    def _log_normalizer(self, x, y):
        return torch.lgamma(x) + torch.lgamma(y) - torch.lgamma(x + y)