예제 #1
0
def vonmises_logpdf(data, mus, kappas, mask=None):
    """
    Compute the log probability density of a von Mises distribution.  
    This will broadcast as long as data, mus, and kappas have the same 
    (or at least compatible) leading dimensions. 

    Parameters
    ----------
    data : array_like (..., D)
        The points at which to evaluate the log density

    mus : array_like (..., D)
        The means of the von Mises distribution(s)

    kappas : array_like (..., D)
        The concentration of the von Mises distribution(s)

    mask : array_like (..., D) bool
        Optional mask indicating which entries in the data are observed

    Returns
    -------
    lps : array_like (...,)
        Log probabilities under the von Mises distribution(s).
    """
    D = data.shape[-1]
    assert mus.shape[-1] == D
    assert kappas.shape[-1] == D

    # Check mask
    mask = mask if mask is not None else np.ones_like(data, dtype=bool)
    assert mask.shape == data.shape

    ll = kappas * np.cos(data - mus) - np.log(2 * np.pi) - np.log(i0(kappas))
    return np.sum(ll * mask, axis=-1)
예제 #2
0
    def log_likelihoods(self, data, input, mask, tag):
        from autograd.scipy.special import i0
        # Compute the log likelihood of the data under each of the K classes
        # Return a TxK array of probability of data[t] under class k
        mus, kappas = self.mus, np.exp(self.log_kappas)
        mask = np.ones_like(data, dtype=bool) if mask is None else mask

        return np.sum(
            (kappas * (np.cos(data[:, None, :] - mus)) - np.log(2 * np.pi) -
             np.log(i0(kappas))) * mask[:, None, :],
            axis=2)
예제 #3
0
    def m_step(self, expectations, datas, inputs, masks, tags, **kwargs):
        from autograd.scipy.special import i0, i1
        x = np.concatenate(datas)

        weights = np.concatenate([Ez for Ez, _, _ in expectations])

        # convert angles to 2D representation and employ closed form solutions
        x_k = np.stack((np.sin(x), np.cos(x)), axis=1)

        r_k = np.tensordot(weights.T, x_k, (-1, 0))

        r_norm = np.sqrt(np.sum(r_k**2, 1))
        mus_k = r_k / r_norm[:, None]
        r_bar = r_norm / weights.sum(0)[:, None]

        # truncated newton approximation with 2 iterations
        kappa_0 = r_bar * (2 - r_bar**2) / (1 - r_bar**2)

        kappa_1 = kappa_0 - ((i1(kappa_0)/i0(kappa_0)) - r_bar) / \
                  (1 - (i1(kappa_0)/i0(kappa_0)) ** 2 - (i1(kappa_0)/i0(kappa_0)) / kappa_0)
        kappa_2 = kappa_1 - ((i1(kappa_1)/i0(kappa_1)) - r_bar) / \
                  (1 - (i1(kappa_1)/i0(kappa_1)) ** 2 - (i1(kappa_1)/i0(kappa_1)) / kappa_1)

        for k in range(self.K):
            self.mus[k] = np.arctan2(*mus_k[k])
            self.log_kappas[k] = np.log(kappa_2[k])
예제 #4
0
def vonmises_logpdf(data, mus, kappas, mask=None):
    """
    Compute the log probability density of a von Mises distribution.
    This will broadcast as long as data, mus, and kappas have the same
    (or at least compatible) leading dimensions.

    Parameters
    ----------
    data : array_like (..., D)
        The points at which to evaluate the log density

    mus : array_like (..., D)
        The means of the von Mises distribution(s)

    kappas : array_like (..., D)
        The concentration of the von Mises distribution(s)

    mask : array_like (..., D) bool
        Optional mask indicating which entries in the data are observed

    Returns
    -------
    lps : array_like (...,)
        Log probabilities under the von Mises distribution(s).
    """
    try:
        from autograd.scipy.special import i0
    except:
        raise Exception(
            "von Mises relies on the function autograd.scipy.special.i0. "
            "This is present in the latest Github code, but not on pypi. "
            "Please use the Github version of autograd instead.")

    D = data.shape[-1]
    assert mus.shape[-1] == D
    assert kappas.shape[-1] == D

    # Check mask
    mask = mask if mask is not None else np.ones_like(data, dtype=bool)
    assert mask.shape == data.shape

    ll = kappas * np.cos(data - mus) - np.log(2 * np.pi) - np.log(i0(kappas))
    return np.sum(ll * mask, axis=-1)