Ejemplo n.º 1
0
def RC_drop_var(x, S, dist, df):

    dist = st.wishart(df=df, scale=S)

    s_logpdf = dist.logpdf(x * df)

    ex_logpdf = []

    for j in range(len(x[0, :])):

        ex_c = np.delete(x, j, axis=0)
        S_c = np.delete(S, j, axis=0)

        ex_c1 = np.delete(ex_c, j, axis=1)

        S_c1 = np.delete(S_c, j, axis=1)

        dist_c = st.wishart(df=df, scale=S_c1)
        p_ex = dist_c.logpdf(ex_c1 * df)

        diff_logpdf = s_logpdf - p_ex

        ex_logpdf.append(diff_logpdf)

    return (np.asarray(ex_logpdf))
Ejemplo n.º 2
0
def RC2_en_drop(x, S, df):

    dist = st.wishart(df=df, scale=S)

    s_logpdf = dist.logpdf(x * df)

    ex_logpdf = np.empty(x.shape)

    size = range(len(x))

    for i, j in it.combinations_with_replacement(size, 2):
        x1 = np.delete(x, [i, j], axis=0)
        x2 = np.delete(x1, [i, j], axis=1)

        S1 = np.delete(S, [i, j], axis=0)
        S2 = np.delete(S1, [i, j], axis=1)

        dist_c = st.wishart(df=df, scale=S2)
        p_ex = dist_c.logpdf(x2 * df)

        diff_logpdf = s_logpdf - p_ex

        ex_logpdf[j, i] = diff_logpdf

        ex_logpdf[j, i] = ex_logpdf[i, j]

    return (ex_logpdf)
def Gibbs_sample_slow_scipy_version(param_dict, non_inf=False):        
    '''Draw a sample from the Normal-Wishart model using the buit-in scipy distributions( significantly slower).
    Based on Gelman et al.(2013, 3 ed., chapter 4).  
    Parameters
    ---------
    param_dict: python-dict
    dictionary with sufficient statistics and parameters from a multivariate data-set,
    obtained through the functions 'make_param_dict' and 'update_param_dict'.
    
    non_inf: Boolean.
    This models tends weight heavily the distance from the prior to the empirical mean.
    If a non-informative prior is used, it's advisable to use this option. The parametrization
    will correspond to the  multivariate Jeffreys prior density. 
    
    Output
    --------
    Returns a d-dimensional draw from the Normal-Wishart model. For multiple samples,
    use the function 'Gibbs_sampler' with one of the options:
    ('slow'): non_inf = False
    ('nonInfSlow'): non_inf = True
    '''
    if non_inf:
        Prec_m = sts.wishart(df = param_dict['n']-1., scale=param_dict['invS_m']).rvs()
        Sigma_m = np.linalg.inv(Prec_m)
        mu = sts.multivariate_normal(mean= param_dict['E_mu'], cov=(1./param_dict['n'])*Sigma_m).rvs()
        return sts.multivariate_normal(mean=mu, cov = Sigma_m).rvs()
        
        
    else:
                    
        Prec_m = sts.wishart(df = param_dict['up_v_0'], scale=param_dict['up_Prec_0']).rvs()
        Sigma_m = np.linalg.inv(Prec_m)
        mu = sts.multivariate_normal(mean= param_dict['up_mu_0'], cov=(1./param_dict['up_k_0'])*Sigma_m).rvs()
        return sts.multivariate_normal(mean=mu, cov = Sigma_m).rvs()
Ejemplo n.º 4
0
def RC2_drop_var(x, S, dist, df):

    dist = st.wishart(df=df, scale=S)

    s_logpdf = dist.logpdf(x * df)

    ex_logpdf = np.zeros((len(x[0, :]), len(x[:, 0])))

    diag_indices = np.diag_indices(len(x))

    diag = []

    for j in range(len(x[0, :])):
        for i in range(len(x[:, 0])):
            if j == i:  ## Diagonal element list creation, avoids a good number of
                ex_c = np.delete(x, j, axis=0)  ## ops and mat copies conflict
                S_c = np.delete(S, j, axis=0)

                ex_c1 = np.delete(ex_c, j, axis=1)

                S_c1 = np.delete(S_c, j, axis=1)

                dist_c = st.wishart(df=df, scale=S_c1)
                p_ex = dist_c.logpdf(ex_c1 * df)

                diff_logpdf = s_logpdf - p_ex

                diag.append(diff_logpdf)
            else:
                ex_c = np.delete(x, j, axis=0)
                S_c = np.delete(S, j, axis=0)

                ex_c1 = np.delete(ex_c, j, axis=1)
                S_c1 = np.delete(S_c, j, axis=1)
                ############
                ex_c2 = np.delete(ex_c1, i - 1, axis=0)
                S_c2 = np.delete(S_c1, i - 1, axis=0)

                ex_c3 = np.delete(ex_c2, i - 1, axis=1)
                S_c3 = np.delete(S_c2, i - 1, axis=1)

                dist_c = st.wishart(df=df, scale=S_c3)
                p_ex = dist_c.logpdf(ex_c3 * df)

                diff_logpdf = s_logpdf - p_ex

                ex_logpdf[j, i] = diff_logpdf

    ex_logpdf[diag_indices] = diag

    return (ex_logpdf)
Ejemplo n.º 5
0
    def get_sims(self,errdist_perms,recalc=False):
        """ generate simulated wishart samples """
        A=self.A
        
        if not( 'covs_sim' in self.__dict__) or recalc==True:

            covs=A.get_covs()
            dof=A.dof
            # if sparse cov matrix 
            if scipy.sparse.issparse(A.get_covs()):
                
                corrs=A.get_corrs()
                cvs=covs.diagonal()[1:]

                # generate random samples from wishart
                (vars1,vars2,covmat1) = wishart_2(covs[0,0],covs.diagonal()[1:],corrs[0,1:].toarray(),dof,size=errdist_perms)
                covs_sim=[]

                for a in np.arange(vars2.shape[0]):
                    covmat=A.get_covs()*0
                    covmat[0,0]=vars1[a,0]
                    diag_inds=np.diag_indices(covs.shape[-1])
                    covmat[diag_inds[0][1:],diag_inds[1][1:]]=vars2[a,:]
                    covmat[0,1:]=covmat1[a,:]
                    covs_sim.append(covmat)
            else:
                # simulate underlying "true" covariance
                whA=stats.wishart(dof,np.squeeze(covs)[:,:])
                covs_sim=whA.rvs(10*errdist_perms)/(dof)
                covs_sim=list(covs_sim)

                # create  simulated observed covariances from underlying true cov.
                ppA=np.zeros((1,10*errdist_perms))
                whA=[]

                for yb in np.arange(10*errdist_perms):
                    whA.append(stats.wishart(dof,covs_sim[yb]))
                    ppA[0,yb]=whA[-1].pdf(np.squeeze(A.get_covs())*dof)
                   
                # generate sample distribution of covariances
                ppA=ppA/sum(ppA)
                ppA_cul=(np.dot(ppA,np.triu(np.ones(len(ppA.T)))).T)  ## memory issues
                
                rand_els = stats.uniform(0,1).rvs(errdist_perms) 
                els=np.sort(np.searchsorted(ppA_cul.flatten(),rand_els)) 
                covs_sim=[]
                for xb in np.arange(errdist_perms):
                    covs_sim.append(whA[els[xb]].rvs()/dof)
            self.covs_sim=covs_sim

        return(self.covs_sim)
Ejemplo n.º 6
0
    def test_quantile_dimensions(self):
        # Test that we can call the Wishart rvs with various quantile dimensions

        # If dim == 1, consider x.shape = [1,1,1]
        X = [
            1,                      # scalar
            [1],                    # iterable
            np.array(1),            # 0-dim
            np.r_[1],               # 1-dim
            np.array(1, ndmin=2),   # 2-dim
            np.array([1], ndmin=3)  # 3-dim
        ]

        w = wishart(1,1)
        density = w.pdf(np.array(1, ndmin=3))
        for x in X:
            assert_equal(w.pdf(x), density)

        # If dim == 1, consider x.shape = [1,1,*]
        X = [
            [1,2,3],                     # iterable
            np.r_[1,2,3],                # 1-dim
            np.array([1,2,3], ndmin=3)   # 3-dim
        ]

        w = wishart(1,1)
        density = w.pdf(np.array([1,2,3], ndmin=3))
        for x in X:
            assert_equal(w.pdf(x), density)

        # If dim == 2, consider x.shape = [2,2,1]
        # where x[:,:,*] = np.eye(1)*2
        X = [
            2,                    # scalar
            [2,2],                # iterable
            np.array(2),          # 0-dim
            np.r_[2,2],           # 1-dim
            np.array([[2,0],
                      [0,2]]),    # 2-dim
            np.array([[2,0],
                      [0,2]])[:,:,np.newaxis]  # 3-dim
        ]

        w = wishart(2,np.eye(2))
        density = w.pdf(np.array([[2,0],
                                  [0,2]])[:,:,np.newaxis])
        for x in X:
            assert_equal(w.pdf(x), density)
Ejemplo n.º 7
0
    def test_quantile_dimensions(self):
        # Test that we can call the Wishart rvs with various quantile dimensions

        # If dim == 1, consider x.shape = [1,1,1]
        X = [
            1,                      # scalar
            [1],                    # iterable
            np.array(1),            # 0-dim
            np.r_[1],               # 1-dim
            np.array(1, ndmin=2),   # 2-dim
            np.array([1], ndmin=3)  # 3-dim
        ]

        w = wishart(1,1)
        density = w.pdf(np.array(1, ndmin=3))
        for x in X:
            assert_equal(w.pdf(x), density)

        # If dim == 1, consider x.shape = [1,1,*]
        X = [
            [1,2,3],                     # iterable
            np.r_[1,2,3],                # 1-dim
            np.array([1,2,3], ndmin=3)   # 3-dim
        ]

        w = wishart(1,1)
        density = w.pdf(np.array([1,2,3], ndmin=3))
        for x in X:
            assert_equal(w.pdf(x), density)

        # If dim == 2, consider x.shape = [2,2,1]
        # where x[:,:,*] = np.eye(1)*2
        X = [
            2,                    # scalar
            [2,2],                # iterable
            np.array(2),          # 0-dim
            np.r_[2,2],           # 1-dim
            np.array([[2,0],
                      [0,2]]),    # 2-dim
            np.array([[2,0],
                      [0,2]])[:,:,np.newaxis]  # 3-dim
        ]

        w = wishart(2,np.eye(2))
        density = w.pdf(np.array([[2,0],
                                  [0,2]])[:,:,np.newaxis])
        for x in X:
            assert_equal(w.pdf(x), density)
def test_cholesky_transform():
    d = 10
    A = stats.wishart(d, np.eye(d)).rvs()

    cho_factor = transform.CholeskyTransform(linalg.cho_factor(A))

    U = np.triu(cho_factor.U)
    iU = np.linalg.inv(U)

    b = np.random.rand(d)
    assert np.allclose(cho_factor * b, U @ b)
    assert np.allclose(b * cho_factor, b @ U)
    assert np.allclose(cho_factor.ldiv(b), iU @ b)
    assert np.allclose(b / cho_factor, b @ iU)

    b = np.random.rand(d, d)
    assert np.allclose(cho_factor * b, U @ b)
    assert np.allclose(b * cho_factor, b @ U)
    assert np.allclose(cho_factor.ldiv(b), iU @ b)
    assert np.allclose(b / cho_factor, b @ iU)

    b = np.random.rand(d, d + 1)
    assert np.allclose(cho_factor * b, U @ b)
    assert np.allclose(cho_factor.ldiv(b), iU @ b)

    b = np.random.rand(d + 1, d)
    assert np.allclose(b * cho_factor, b @ U)
    assert np.allclose(b / cho_factor, b @ iU)
Ejemplo n.º 9
0
    def test_frozen(self):
        # Test that the frozen and non-frozen Wishart gives the same answers

        # Construct an arbitrary positive definite scale matrix
        dim = 4
        scale = np.diag(np.arange(dim)+1)
        scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
        scale = np.dot(scale.T, scale)

        # Construct a collection of positive definite matrices to test the PDF
        X = []
        for i in range(5):
            x = np.diag(np.arange(dim)+(i+1)**2)
            x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
            x = np.dot(x.T, x)
            X.append(x)
        X = np.array(X).T

        # Construct a 1D and 2D set of parameters
        parameters = [
            (10, 1, np.linspace(0.1, 10, 5)),  # 1D case
            (10, scale, X)
        ]

        for (df, scale, x) in parameters:
            w = wishart(df, scale)
            assert_equal(w.var(), wishart.var(df, scale))
            assert_equal(w.mean(), wishart.mean(df, scale))
            assert_equal(w.mode(), wishart.mode(df, scale))
            assert_equal(w.entropy(), wishart.entropy(df, scale))
            assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
Ejemplo n.º 10
0
    def test_frozen(self):
        # Test that the frozen and non-frozen Wishart gives the same answers

        # Construct an arbitrary positive definite scale matrix
        dim = 4
        scale = np.diag(np.arange(dim) + 1)
        scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim - 1) // 2)
        scale = np.dot(scale.T, scale)

        # Construct a collection of positive definite matrices to test the PDF
        X = []
        for i in range(5):
            x = np.diag(np.arange(dim) + (i + 1)**2)
            x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim - 1) // 2)
            x = np.dot(x.T, x)
            X.append(x)
        X = np.array(X).T

        # Construct a 1D and 2D set of parameters
        parameters = [
            (10, 1, np.linspace(0.1, 10, 5)),  # 1D case
            (10, scale, X)
        ]

        for (df, scale, x) in parameters:
            w = wishart(df, scale)
            assert_equal(w.var(), wishart.var(df, scale))
            assert_equal(w.mean(), wishart.mean(df, scale))
            assert_equal(w.mode(), wishart.mode(df, scale))
            assert_equal(w.entropy(), wishart.entropy(df, scale))
            assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
Ejemplo n.º 11
0
 def rvs(self):
     """Draw one sample. See p.582 of Gelman."""
     x = stats.wishart(df=self.ν).rvs()
     z = stats.multivariate_normal(mean=np.zeros(self.D),
                                   cov=np.eye(self.D)).rvs()
     A = chol(inv(self.Λ))
     return self.μ + A @ z * sqrt(self.ν / x)
Ejemplo n.º 12
0
 def _sample_gaussian_parameters(self, X, S, m_list, beta_list, nd_list,
                                 W_list, N, K, D):
     """ ガウス分布のパラメータΛ_k, μ_kをサンプルする
         Parameter
             X   : D*N行列   入力データ
             S   : K*N行列   予測した各クラスの割り当て
             m_list, beta_list, nd_list, W_list
             N   : データ数
             K   : クラスタ数 """
     for k in range(K):
         sum_s = 0.0
         sum_sx = np.zeros((D, 1))
         sum_sxx = np.zeros((D, D))
         for n in range(N):
             sum_s += S[k, n]
             sum_sx += S[k, n] * X[:, n:n + 1]
             sum_sxx += S[k, n] * np.dot(X[:, n:n + 1], X[:, n:n + 1].T)
         # パラメータ更新,この順番に更新するのが正しい?
         beta_list[k] = sum_s + self._beta
         m_list[k] = (sum_sx +
                      np.dot(self._beta, self._m_vector)) / beta_list[k]
         W_list[k] = np.linalg.inv(
             sum_sxx +
             np.dot(self._beta, np.dot(self._m_vector, self._m_vector.T)) -
             np.dot(beta_list[k], np.dot(m_list[k], m_list[k].T)) +
             np.linalg.inv(self._W))
         nd_list[k] = sum_s + self._nd
         # ガウス分布のパラメータをサンプリング,この順番に更新するのが正しい
         wish = stats.wishart(df=nd_list[k], scale=W_list[k])
         self.cov_matrixes[k] = np.linalg.inv(wish.rvs(1))
         self.mu_vectors[k] = np.random.multivariate_normal(
             np.squeeze(m_list[k].T),
             1 / beta_list[k] * self.cov_matrixes[k])
         # pdb.set_trace()
     return
Ejemplo n.º 13
0
 def __init__(self, D, a0, b0, m0, beta0):
     self.D = D
     self.a0 = a0
     self.B0 = np.eye(D) * b0
     self.m0 = m0
     self.beta0 = beta0
     self.precision_prior = stats.wishart(self.a0, self.B0)
Ejemplo n.º 14
0
def rprior(n=1,r=3,M=np.eye(2)):
    """ calculate r prior: n,r,M. """
    out=np.zeros((n,len(M),len(M)))
    Minv=np.la.pinv(M)
    for a in np.arange(n):
        out[a,:,:]= scipy.linalg.cho_solve(scipy.linalg.cho_factor( stats.wishart(r,Minv).rvs()),np.eye(len(Minv)))
    return(out)
Ejemplo n.º 15
0
    def test_is_scaled_chisquared(self):
        # The 2-dimensional Wishart with an arbitrary scale matrix can be
        # transformed to a scaled chi-squared distribution.
        # For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
        # :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
        np.random.seed(482974)

        sn = 500
        df = 10
        dim = 4
        # Construct an arbitrary positive definite matrix
        scale = np.diag(np.arange(4)+1)
        scale[np.tril_indices(4, k=-1)] = np.arange(6)
        scale = np.dot(scale.T, scale)
        # Use :math:`\lambda = [1, \dots, 1]'`
        lamda = np.ones((dim,1))
        sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
        w = wishart(df, sigma_lamda)
        c = chi2(df, scale=sigma_lamda)

        # Statistics
        assert_allclose(w.var(), c.var())
        assert_allclose(w.mean(), c.mean())
        assert_allclose(w.entropy(), c.entropy())

        # PDF
        X = np.linspace(0.1,10,num=10)
        assert_allclose(w.pdf(X), c.pdf(X))

        # rvs
        rvs = w.rvs(size=sn)
        args = (df,0,sigma_lamda)
        alpha = 0.01
        check_distribution_rvs('chi2', args, alpha, rvs)
Ejemplo n.º 16
0
    def test_is_scaled_chisquared(self):
        # The 2-dimensional Wishart with an arbitrary scale matrix can be
        # transformed to a scaled chi-squared distribution.
        # For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
        # :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
        np.random.seed(482974)

        sn = 500
        df = 10
        dim = 4
        # Construct an arbitrary positive definite matrix
        scale = np.diag(np.arange(4) + 1)
        scale[np.tril_indices(4, k=-1)] = np.arange(6)
        scale = np.dot(scale.T, scale)
        # Use :math:`\lambda = [1, \dots, 1]'`
        lamda = np.ones((dim, 1))
        sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
        w = wishart(df, sigma_lamda)
        c = chi2(df, scale=sigma_lamda)

        # Statistics
        assert_allclose(w.var(), c.var())
        assert_allclose(w.mean(), c.mean())
        assert_allclose(w.entropy(), c.entropy())

        # PDF
        X = np.linspace(0.1, 10, num=10)
        assert_allclose(w.pdf(X), c.pdf(X))

        # rvs
        rvs = w.rvs(size=sn)
        args = (df, 0, sigma_lamda)
        alpha = 0.01
        check_distribution_rvs('chi2', args, alpha, rvs)
Ejemplo n.º 17
0
    def __init__(self, df, scale, size=1, preload=1, *args, **kwargs):
        # Initialize parameters
        self._frozen = wishart(df, scale)
        self._rvs = self._frozen._wishart
        self.df = self.prior_df = self._frozen.df
        self.scale = self.prior_scale = self._frozen.scale

        # Calculated quantities
        self._inv_prior_scale = np.linalg.inv(self.prior_scale)

        # Setup holder variables for posterior-related quantities
        self._phi = None     # (M x M)
        self._lagged = None  # (M x T)
        self._endog = None   # (M x T)

        # Setup holder variables for calculated quantities
        self._philagged = None
        self._posterior_df = None
        self._posterior_scale = None
        self._posterior_cholesky = None

        # Set the flag to use the prior
        self._use_posterior = False

        # Initialize the distribution
        super(Wishart, self).__init__(None, size=size, preload=preload,
                                      *args, **kwargs)
Ejemplo n.º 18
0
    def test_1D_is_chisquared(self):
        # The 1-dimensional Wishart with an identity scale matrix is just a
        # chi-squared distribution.
        # Test variance, mean, entropy, pdf
        # Kolgomorov-Smirnov test for rvs
        np.random.seed(482974)

        sn = 500
        dim = 1
        scale = np.eye(dim)

        df_range = np.arange(1, 10, 2, dtype=float)
        X = np.linspace(0.1,10,num=10)
        for df in df_range:
            w = wishart(df, scale)
            c = chi2(df)

            # Statistics
            assert_allclose(w.var(), c.var())
            assert_allclose(w.mean(), c.mean())
            assert_allclose(w.entropy(), c.entropy())

            # PDF
            assert_allclose(w.pdf(X), c.pdf(X))

            # rvs
            rvs = w.rvs(size=sn)
            args = (df,)
            alpha = 0.01
            check_distribution_rvs('chi2', args, alpha, rvs)
Ejemplo n.º 19
0
    def __init__(self, df, scale, size=1, preload=1, *args, **kwargs):
        # Initialize parameters
        self._frozen = wishart(df, scale)
        self._rvs = self._frozen._wishart
        self.df = self.prior_df = self._frozen.df
        self.scale = self.prior_scale = self._frozen.scale

        # Calculated quantities
        self._inv_prior_scale = np.linalg.inv(self.prior_scale)

        # Setup holder variables for posterior-related quantities
        self._phi = None  # (M x M)
        self._lagged = None  # (M x T)
        self._endog = None  # (M x T)

        # Setup holder variables for calculated quantities
        self._philagged = None
        self._posterior_df = None
        self._posterior_scale = None
        self._posterior_cholesky = None

        # Set the flag to use the prior
        self._use_posterior = False

        # Initialize the distribution
        super(Wishart, self).__init__(None,
                                      size=size,
                                      preload=preload,
                                      *args,
                                      **kwargs)
Ejemplo n.º 20
0
    def test_1D_is_chisquared(self):
        # The 1-dimensional Wishart with an identity scale matrix is just a
        # chi-squared distribution.
        # Test variance, mean, entropy, pdf
        # Kolgomorov-Smirnov test for rvs
        np.random.seed(482974)

        sn = 500
        dim = 1
        scale = np.eye(dim)

        df_range = np.arange(1, 10, 2, dtype=float)
        X = np.linspace(0.1, 10, num=10)
        for df in df_range:
            w = wishart(df, scale)
            c = chi2(df)

            # Statistics
            assert_allclose(w.var(), c.var())
            assert_allclose(w.mean(), c.mean())
            assert_allclose(w.entropy(), c.entropy())

            # PDF
            assert_allclose(w.pdf(X), c.pdf(X))

            # rvs
            rvs = w.rvs(size=sn)
            args = (df, )
            alpha = 0.01
            check_distribution_rvs('chi2', args, alpha, rvs)
Ejemplo n.º 21
0
def Normal_Wishart(mu_0, lamb, W, nu, seed=None):
    """Function extracting a Normal_Wishart random variable"""
    # first draw a Wishart distribution:
    Lambda = wishart(df=nu, scale=W, seed=seed).rvs()  # NB: Lambda is a matrix.
    # then draw a Gaussian multivariate RV with mean mu_0 and(lambda*Lambda)^{-1} as covariance matrix.
    cov = np.linalg.inv(lamb * Lambda)  # this is the bottleneck!!
    mu = multivariate_normal(mu_0, cov)
    return mu, Lambda, cov
Ejemplo n.º 22
0
    def test_wishart_invwishart_2D_rvs(self):
        dim = 3
        df = 10

        # Construct a simple non-diagonal positive definite matrix
        scale = np.eye(dim)
        scale[0,1] = 0.5
        scale[1,0] = 0.5

        # Construct frozen Wishart and inverse Wishart random variables
        w = wishart(df, scale)
        iw = invwishart(df, scale)

        # Get the generated random variables from a known seed
        np.random.seed(248042)
        w_rvs = wishart.rvs(df, scale)
        np.random.seed(248042)
        frozen_w_rvs = w.rvs()
        np.random.seed(248042)
        iw_rvs = invwishart.rvs(df, scale)
        np.random.seed(248042)
        frozen_iw_rvs = iw.rvs()

        # Manually calculate what it should be, based on the Bartlett (1933)
        # decomposition of a Wishart into D A A' D', where D is the Cholesky
        # factorization of the scale matrix and A is the lower triangular matrix
        # with the square root of chi^2 variates on the diagonal and N(0,1)
        # variates in the lower triangle.
        np.random.seed(248042)
        covariances = np.random.normal(size=3)
        variances = np.r_[
            np.random.chisquare(df),
            np.random.chisquare(df-1),
            np.random.chisquare(df-2),
        ]**0.5

        # Construct the lower-triangular A matrix
        A = np.diag(variances)
        A[np.tril_indices(dim, k=-1)] = covariances

        # Wishart random variate
        D = np.linalg.cholesky(scale)
        DA = D.dot(A)
        manual_w_rvs = np.dot(DA, DA.T)

        # inverse Wishart random variate
        # Supposing that the inverse wishart has scale matrix `scale`, then the
        # random variate is the inverse of a random variate drawn from a Wishart
        # distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
        iD = np.linalg.cholesky(np.linalg.inv(scale))
        iDA = iD.dot(A)
        manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))

        # Test for equality
        assert_allclose(w_rvs, manual_w_rvs)
        assert_allclose(frozen_w_rvs, manual_w_rvs)
        assert_allclose(iw_rvs, manual_iw_rvs)
        assert_allclose(frozen_iw_rvs, manual_iw_rvs)
Ejemplo n.º 23
0
    def test_wishart_invwishart_2D_rvs(self):
        dim = 3
        df = 10

        # Construct a simple non-diagonal positive definite matrix
        scale = np.eye(dim)
        scale[0,1] = 0.5
        scale[1,0] = 0.5

        # Construct frozen Wishart and inverse Wishart random variables
        w = wishart(df, scale)
        iw = invwishart(df, scale)

        # Get the generated random variables from a known seed
        np.random.seed(248042)
        w_rvs = wishart.rvs(df, scale)
        np.random.seed(248042)
        frozen_w_rvs = w.rvs()
        np.random.seed(248042)
        iw_rvs = invwishart.rvs(df, scale)
        np.random.seed(248042)
        frozen_iw_rvs = iw.rvs()

        # Manually calculate what it should be, based on the Bartlett (1933)
        # decomposition of a Wishart into D A A' D', where D is the Cholesky
        # factorization of the scale matrix and A is the lower triangular matrix
        # with the square root of chi^2 variates on the diagonal and N(0,1)
        # variates in the lower triangle.
        np.random.seed(248042)
        covariances = np.random.normal(size=3)
        variances = np.r_[
            np.random.chisquare(df),
            np.random.chisquare(df-1),
            np.random.chisquare(df-2),
        ]**0.5

        # Construct the lower-triangular A matrix
        A = np.diag(variances)
        A[np.tril_indices(dim, k=-1)] = covariances

        # Wishart random variate
        D = np.linalg.cholesky(scale)
        DA = D.dot(A)
        manual_w_rvs = np.dot(DA, DA.T)

        # inverse Wishart random variate
        # Supposing that the inverse wishart has scale matrix `scale`, then the
        # random variate is the inverse of a random variate drawn from a Wishart
        # distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
        iD = np.linalg.cholesky(np.linalg.inv(scale))
        iDA = iD.dot(A)
        manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))

        # Test for equality
        assert_allclose(w_rvs, manual_w_rvs)
        assert_allclose(frozen_w_rvs, manual_w_rvs)
        assert_allclose(iw_rvs, manual_iw_rvs)
        assert_allclose(frozen_iw_rvs, manual_iw_rvs)
Ejemplo n.º 24
0
 def sample(self):
     self._check_is_valid_density()
     precision = wishart(df=self._get_nu(),
                         scale=np.linalg.inv(self._get_psi())).rvs()
     L = np.linalg.cholesky(precision) * \
             np.sqrt(self.natural_parameters['kappa'])
     mean = self.get_mean() + \
             np.linalg.solve(L.T, np.random.normal(size=self.num_dim))
     return dict(mean=mean, precision=precision)
Ejemplo n.º 25
0
def Normal_Wishart(mu_0, lamb, W, nu, seed=None):
    """Function extracting a Normal_Wishart random variable"""
    # first draw a Wishart distribution:
    Lambda = wishart(df=nu, scale=W,
                     seed=seed).rvs()  # NB: Lambda is a matrix.
    # then draw a Gaussian multivariate RV with mean mu_0 and(lambda*Lambda)^{-1} as covariance matrix.
    cov = np.linalg.inv(lamb * Lambda)  # this is the bottleneck!!
    mu = multivariate_normal(mu_0, cov)
    return mu, Lambda, cov
Ejemplo n.º 26
0
def wishart_for_cov(dim, seed=None):
    # returns positive definite matrix for dimension dim generated from the wishart distribution with designated
    #  degreee of freedom and identity scale matrix. recommend degree of freedom = dimension of matrix
    #
    if not seed is None:
        numpy.random.seed(seed)
    else:
        pass
    wishart_obj = wishart()
    out = wishart.rvs(df=dim, scale=numpy.eye(dim), size=1)
    return (out)
Ejemplo n.º 27
0
def wishart_pdf(cov,samples,dof):
    """ wishart pdf given cov and dof. """
    if scipy.sparse.issparse(cov):
        var1=cov[0,0]
        var2=cov.diagonal()[1:]
        covs1=covs[0,:]
        
        for a in len(var2):
            whA.append(stats.wishart(dof,covsA_sim[b,:,:]))
        ppA[0,yb]=whA[-1].pdf(A.get_covs()[xa,:,:]*dof)

    return(cov) 
Ejemplo n.º 28
0
    def compute_elbo(self, phi, nu, kappa, epsilon, m, L, V, N, gamma_1, gamma_2):
        """
        Function compute the evidence lower bound as defined for HRMF-DPCMM From the variational parameters.
        :param phi: of shape [N,K]
        :param nu: of shape [1, K]
        :param kappa: of shape [1, K]
        :param epsilon: of shape [1, K]
        :param m: of shape [N, d]
        :param L: of shape [N, d, d]
        :param V: of shape [K, K]
        :param N: of shape [1, K]
        :param gamma_1: of shape [1, K]
        :param gamma_2: of shape [1, K]
        :return:
        """
        hmrf_term = 0
        log_likelihood_term = 0

        if self.weight_prior == "Dirichelet distribution":
            val = digamma(epsilon) - digamma(np.sum(epsilon))
        else:
            val = digamma(gamma_1) - digamma(gamma_1 + gamma_2) + cumsum_ex(
                digamma(gamma_2) - digamma(gamma_1 + gamma_2))

        for n in range(self.N):
            for k in range(self.K):
                log_likelihood_term += - 0.5 * phi[n, k] * nu[k] * np.trace(np.matmul(L[k,:,:], np.matmul(self.X[n,:].reshape(self.d, 1) - m[k].reshape(self.d, 1),
                                                                                        self.X[n,:].reshape(1, self.d) - m[k].reshape(1, self.d) )) ) \
                     - 0.5 * self.d * N[k] / kappa[k]
                if self.mask[n] == 0:
                    log_likelihood_term += phi[n,k] * val[k]

        for k in range(self.K):
            log_likelihood_term += 0.5 * (nu[k] - self.d + N[k]) * (multivar_digamma(nu[k], self.d) + np.log(self.eps + LA.det(L[k, :, :]))) - 0.5 * self.d * nu[k]


        for tuple in self.tuples_ml:
            hmrf_term += - self.lambda_ * np.sum(np.sum(phi[tuple[0],:].reshape(self.K, 1) * phi[tuple[1],:].reshape(1,self.K) * V))


        for k in range(self.K):
            log_likelihood_term += wishart(nu[k],L[k,:,:]).entropy() + 0.5*(multivar_digamma(nu[k], self.d) + np.log(self.eps + LA.det(L[k, :, :]))) + 0.5*self.d*np.log(eps + kappa[k]) \
                    - np.sum(phi[:,k] * np.log(phi[:,k] + eps))
            if self.weight_prior != "Dirichelet distribution":
                log_likelihood_term += beta(gamma_1[k], gamma_2[k]).entropy()

        if self.weight_prior == "Dirichelet distribution":
            log_likelihood_term += dirichlet(epsilon).entropy()

        elbo = log_likelihood_term + hmrf_term

        return elbo/self.N, log_likelihood_term, hmrf_term
Ejemplo n.º 29
0
    def test_scale_dimensions(self):
        # Test that we can call the Wishart with various scale dimensions

        # Test case: dim=1, scale=1
        true_scale = np.array(1, ndmin=2)
        scales = [
            1,  # scalar
            [1],  # iterable
            np.array(1),  # 0-dim
            np.r_[1],  # 1-dim
            np.array(1, ndmin=2)  # 2-dim
        ]
        for scale in scales:
            w = wishart(1, scale)
            assert_equal(w.scale, true_scale)
            assert_equal(w.scale.shape, true_scale.shape)

        # Test case: dim=2, scale=[[1,0]
        #                          [0,2]
        true_scale = np.array([[1, 0], [0, 2]])
        scales = [
            [1, 2],  # iterable
            np.r_[1, 2],  # 1-dim
            np.array([
                [1, 0],  # 2-dim
                [0, 2]
            ])
        ]
        for scale in scales:
            w = wishart(2, scale)
            assert_equal(w.scale, true_scale)
            assert_equal(w.scale.shape, true_scale.shape)

        # We cannot call with a df < dim
        assert_raises(ValueError, wishart, 1, np.eye(2))

        # We cannot call with a 3-dimension array
        scale = np.array(1, ndmin=3)
        assert_raises(ValueError, wishart, 1, scale)
Ejemplo n.º 30
0
    def test_scale_dimensions(self):
        # Test that we can call the Wishart with various scale dimensions

        # Test case: dim=1, scale=1
        true_scale = np.array(1, ndmin=2)
        scales = [
            1,                    # scalar
            [1],                  # iterable
            np.array(1),          # 0-dim
            np.r_[1],             # 1-dim
            np.array(1, ndmin=2)  # 2-dim
        ]
        for scale in scales:
            w = wishart(1, scale)
            assert_equal(w.scale, true_scale)
            assert_equal(w.scale.shape, true_scale.shape)

        # Test case: dim=2, scale=[[1,0]
        #                          [0,2]
        true_scale = np.array([[1,0],
                               [0,2]])
        scales = [
            [1,2],             # iterable
            np.r_[1,2],        # 1-dim
            np.array([[1,0],   # 2-dim
                      [0,2]])
        ]
        for scale in scales:
            w = wishart(2, scale)
            assert_equal(w.scale, true_scale)
            assert_equal(w.scale.shape, true_scale.shape)

        # We cannot call with a df < dim
        assert_raises(ValueError, wishart, 1, np.eye(2))

        # We cannot call with a 3-dimension array
        scale = np.array(1, ndmin=3)
        assert_raises(ValueError, wishart, 1, scale)
Ejemplo n.º 31
0
    def test_multivariate_normal_density(self):
        for i in range(4):
            with util.NumpySeedContext(seed=i + 8):
                d = i + 2
                cov = stats.wishart(df=10 + d, scale=np.eye(d)).rvs(size=1)
                mean = np.random.randn(d)
                X = np.random.randn(11, d)
                den_estimate = density.GaussianMixture.multivariate_normal_density(
                    mean, cov, X)

                mnorm = stats.multivariate_normal(mean=mean, cov=cov)
                den_truth = mnorm.pdf(X)

                np.testing.assert_almost_equal(den_estimate, den_truth)
def test_kl_between_mvn_and_std():
    """
    Check that our custom implementation of KL divergence for MVN against std MVN matches KL divergence from
    PyTorch's distribution module.
    """
    import time
    torch.manual_seed(512)
    rng = np.random.RandomState(51)
    batch_size = 12
    num_points_per_batch = 57

    # Create a distribution to check
    loc = torch.randn(batch_size,
                      num_points_per_batch,
                      dtype=utils.TORCH_FLOAT_TYPE)
    wishart_ = stats.wishart(seed=rng,
                             df=num_points_per_batch,
                             scale=np.eye(num_points_per_batch))
    cov_samples = np.stack([wishart_.rvs() for _ in range(batch_size)])
    cov_samples = torch.tensor(cov_samples, dtype=utils.TORCH_FLOAT_TYPE)
    dist_1 = distributions.MultivariateNormal(loc,
                                              covariance_matrix=cov_samples)

    # Create a std normal
    mn = torch.zeros(batch_size,
                     num_points_per_batch,
                     dtype=utils.TORCH_FLOAT_TYPE)
    cov = torch.stack([
        torch.eye(num_points_per_batch, dtype=utils.TORCH_FLOAT_TYPE)
        for _ in range(batch_size)
    ])
    std_norm = distributions.MultivariateNormal(mn, covariance_matrix=cov)

    # Do the computed/expected
    time_s = time.time()
    computed_kl = custom_distributions.kl_mvn_and_std_norm(
        dist_1).detach().numpy()
    time_mid = time.time()
    expected_kl = distributions.kl_divergence(dist_1,
                                              std_norm).detach().numpy()
    time_end = time.time()

    print(
        f"Pytorch impl: {time_end-time_mid}s;  Custom imp: {time_mid-time_s}s")
    # ^ not very scientific but sanity check to make sure that worth avoiding the PyTorch implementation in the
    # against std normal case.

    # Test!
    np.testing.assert_array_almost_equal(computed_kl, expected_kl)
Ejemplo n.º 33
0
 def set_cluster_variance(cls, dimension, tightness=5, standard=False):
     """
     Randomly sample a symmetric matrix from a Wishart distribution.
     :param dimension: An integer that specifies the dimension of the data space.
     :param tightness: A float that controls the amount by which a covariance matrix is scaled.
     :param standard: A boolean that determines whether the covariance matrix is the identity matrix or is random.
     :return: A symmetric positive-definite covariance matrix.
     """
     if standard:
         return np.identity(dimension)
     else:
         w = wishart(df=dimension, scale=np.identity(dimension))
         cluster_variance = w.rvs()
         cluster_variance = np.random.uniform(0.1, tightness) * cluster_variance
         return cluster_variance
def global_bge_prior(
        graph,
        total_num_variables=None,
        inverse_scale_matrix=None,
        degrees_freedom=None,
        alpha_mu=None,
        mu0=None,
        size=1,
        progress=False
):
    p = total_num_variables
    variables = graph.nodes
    k = len(variables)
    B = np.zeros((k, k))
    V = list(variables)
    scale_matrix = faster_inverse(inverse_scale_matrix)

    # # Normal distribution vectorized function
    # standard_normal = lambda t: np.random.normal(0, 1)
    # vfunc_standard_normal = np.vectorize(standard_normal)
    # if len(B[indices]) > 0:
    #     B[indices] = vfunc_standard_normal(B[indices])
    #
    # c_squared = np.zeros(k)
    # for i in range(k):
    #     c_squared[i] = stats.chi2.rvs(df=degrees_freedom - p + i + 1)
    #
    # c = np.sqrt(c_squared)
    # inverse_c = 1 / c
    # B = np.multiply(-np.array(B), inverse_c)
    # d = np.multiply(scale_matrix, c_squared)
    #
    # I = np.eye(len(variables))
    # A = I - B.T
    # inverse_sigma = A.T @ d @ A

    # TODO pull directly from wishart, compare to other way
    inverse_sigmas = stats.wishart(df=degrees_freedom, scale=scale_matrix).rvs(size=size)
    sigmas = [faster_inverse(inverse_sigma) for inverse_sigma in inverse_sigmas]
    # ipdb.set_trace()
    mu_covariances = [(1 / alpha_mu) * sigma for sigma in sigmas]
    # mu = stats.multivariate_normal(mean=mu0[V], cov=mu_covariance).rvs()
    mus = [chol_sample(mu0[V], mu_covariance) for mu_covariance in mu_covariances]

    return list(zip(inverse_sigmas, mus))
Ejemplo n.º 35
0
    def _fit(self, X):

        self.mean_ = np.mean(X, axis=0)
        data = X - self.mean_
        cov = np.dot(data.T, data)

        w = ss.wishart(df=data.shape[1] + 1,
                       scale=np.matrix(
                           np.eye(data.shape[1]) * 3 * self.max_norm /
                           (2 * data.shape[0] * self.eps)))
        noise = w.rvs(1, random_state=self.random_state)

        cov = cov + noise

        cov = cov / data.shape[0]
        ev, evec = np.linalg.eig(cov)
        evec = evec.T
        self.components_ = evec[:self.n_components]
Ejemplo n.º 36
0
def updateOneComponent(X, mu, precision, muPrior, precisionPrior):
    '''
    X: (n,p) array of data
    mu: (p,1) array of current mean
    precision: (p,p) matrix of current precision
    muPrior: dictionary of prior mean and precision
    precisionPrior: dictionary of prior df and invScale
    '''

    n = X.shape[0]
    An_inv = inv(muPrior['precision'] + n * precision)
    Xsum = np.sum(X, axis=0)
    bn = muPrior['precision'].dot(muPrior['mean']) + precision.dot(Xsum)

    mu = multivariate_normal(An_inv.dot(bn), An_inv).rvs()

    S_mu = np.matmul((X - mu).T, X - mu)

    precision = wishart(precisionPrior['df'] + n,
                        inv(precisionPrior['invScale'] + S_mu)).rvs()

    return mu, precision
def sampleNewComp(Knew, muPrior, precisionPrior):
    '''
    Knew: number of new components to generate
    muPrior: dictionary of prior mean and precision
    precisionPrior: dictionary of prior df and invScale
    
    return: a list of NEW components
    '''
    comps = []

    if Knew == 0:
        return comps

    muCov = inv(muPrior['precision'])
    precisionScale = inv(precisionPrior['invScale'])

    for k in range(Knew):
        mu = rng.multivariate_normal(muPrior['mean'], muCov)
        precision = wishart(precisionPrior['df'], precisionScale).rvs()

        comps.append((mu, precision))

    return comps
    def sample(self, indices, prior_parameter):
        kappa, nu, mu, psi = prior_parameter.get_kappa_nu_mu_psi()

        y_indices = self.y[indices]
        u_indices = self.parameter.u[indices]
        uy_indices = np.outer(u_indices, np.ones(self.num_dim)) * y_indices
        u_sum = np.sum(u_indices)
        uy_sum = np.sum(uy_indices, axis=0)
        uyy_sum = y_indices.T.dot(uy_indices)

        kappa_post = kappa + u_sum
        nu_post = nu + len(indices)
        mu_post = (kappa * mu + uy_sum) / (kappa + u_sum)
        psi_post = psi + uyy_sum + (kappa * np.outer(mu, mu) -
                                    kappa_post * np.outer(mu_post, mu_post))

        precision = wishart(df=nu_post, scale=np.linalg.inv(psi_post)).rvs()
        L = np.linalg.cholesky(precision) * \
                np.sqrt(kappa_post)
        mean = mu_post + np.linalg.solve(L.T,
                                         np.random.normal(size=self.num_dim))

        return dict(mean=mean, precision=precision)
def test_diagonal_normal_wishart():
    import numpy as np
    from scipy.stats import wishart
    x = np.linspace(1e-6, 20, 100)

    print("Testing wishart entropy/logprob vs scipy implementation...")
    for k in range(1000):
        df_val = torch.randn(1).exp() + 2
        scale_val = torch.randn(1).exp()

        scipy_dist = wishart(df=df_val.item(), scale=scale_val.item())
        torch_dist = DiagonalWishart(
            scale_val.unsqueeze(-1),
            df_val
        )

        torch_ent = torch_dist.entropy()[0]
        scipy_ent = torch.FloatTensor([scipy_dist.entropy()])
        if (rel_error(torch_ent, scipy_ent) > 1e-3).any():
            raise ValueError(
                "Entropies of torch and scipy versions doesn't match"
            )

        scipy_w = torch.FloatTensor(scipy_dist.logpdf(x))
        torch_w = torch_dist.log_prob(torch.FloatTensor(x).unsqueeze(-1))

        if (rel_error(torch_w, scipy_w) > 1e-6).any():
            raise ValueError(
                "Log pdf of torch and scipy versions doesn't match"
            )
    print("Passed")

    print("Testing wishart KL divergence...")
    df1, scale1 = torch.randn(32).exp() + 2, torch.randn(32).exp() + 1e-5
    df2, scale2 = torch.randn(32).exp() + 2, torch.randn(32).exp() + 1e-5
    init_df1, init_scale1 = df1[0].clone(), scale1[0].clone()
    dist2 = DiagonalWishart(scale2.unsqueeze(-1), df2)
    df1.requires_grad, scale1.requires_grad = True, True
    gamma = 0.1
    for k in range(10000):
        dist1 = DiagonalWishart(scale1.unsqueeze(-1), df1)
        loss = kl_divergence(dist1, dist2).mean()
        loss.backward()
        with torch.no_grad():
            scale1 = scale1 - gamma * scale1.grad
            df1 = df1 - gamma * df1.grad
        scale1.requires_grad, df1.requires_grad = True, True

    print('Distribution 1 - initial df %.3f and scale %.3f' % (
        init_df1, init_scale1
    ))
    print('Distribution 1 - trained df %.3f and scale %.3f' % (
        df1[0], scale1[0]
    ))
    print('Distribution 2 - target df %.3f and scale %.3f' % (
        df2[0], scale2[0]
    ))
    print("Passed")

    print("Testing Normal Wishart Distribution...")

    torch_dist = NormalDiagonalWishart(
        torch.ones((100, 1, 32, 32, 1)),
        torch.ones((100, 1, 32, 32, 1)),
        3 * torch.ones((100, 1, 32, 32)),
        3 * torch.ones((100, 1, 32, 32)),
    )

    ex_w = torch_dist.log_prob(
        torch.ones(100, 1, 32, 32, 1),
        torch.ones(100, 1, 32, 32, 1),
    )
    assert ex_w.shape == (100, 1, 32, 32)
    print("Passed")
Ejemplo n.º 40
0
covsTeo, covsNum = np.array(covSuperList).transpose((1, 2, 0, 3, 4, 5)).reshape((2, 4, -1, 2, 2))

scaleNum = np.linalg.inv(covsNum)


# saco la norma de frobenius de cada matriz
covSuperFrob = np.linalg.norm(covSuperList, axis=(4, 5))

# %%
p = 2
matt = np.eye(p)

np.exp(-p/2) / spe.gamma(N/2) / 2**(N*p/2) / np.linalg.det(matt)**(p/2+0.5)


sts.wishart.pdf(matt, df=N-1, scale=matt)



# %%
rv = sts.wishart()
rv.pdf()


frobQuotList = (covSuperFrob[:, 0] / covSuperFrob[:,1]).transpose((1, 0, 2)).reshape((4, -1))


plt.plot(frobQuotList)
plt.hist(frobQuotList[3])
plt.violinplot(frobQuotList.T, showmeans=True, showextrema=False)