Esempio n. 1
0
 def joint_density(df_l, verbose=False):
     df = df_l
     val = -sum((wishart.logpdf(x, df=df, scale=mean_wishart / df)
                 for x in sample_of_matrices))
     if verbose:
         print df, ':', val
     return val
Esempio n. 2
0
 def log_likelihood(self, params_opt, node_name):
     params_node = self.get_node_params(params_opt, node_name)
     m_sigma = self.calculate_sigma(params_node)
     if node_name in self.m_cov.keys():
         m_cov = self.m_cov[node_name]
     else:
         m_cov = self.get_matrices(params_node, 'Cov')
     df = sum([p.shape[0] for _, p in self.m_profiles.items()])
     w = wishart.logpdf(m_cov, df=df, scale=m_sigma / df)
     return w
Esempio n. 3
0
    def log_likelihood(self, params):
        """ Likelihood of data """
        # m_sigma = self.calculate_sigma(params)
        # w = 0
        # for p in self.m_profiles:
        #     w += multivariate_normal.logpdf(p, np.zeros(p.shape), m_sigma)

        m_sigma = self.calculate_sigma(params)
        df = self.m_profiles.shape[0]
        w = wishart.logpdf(self.m_cov, df=df, scale=m_sigma / df)
        return w
Esempio n. 4
0
    def test_log_pdf(self, dtype_dof, dtype, degrees_of_freedom, random_state,
                     scale_is_samples, rv_is_samples, num_data_points, num_samples, broadcast):
        # Create positive semi-definite matrices
        rv = make_spd_matrices_4d(num_samples, num_data_points, degrees_of_freedom, random_state=random_state)
        if broadcast:
            scale = make_spd_matrix(n_dim=degrees_of_freedom, random_state=random_state)
        else:
            scale = make_spd_matrices_4d(num_samples, num_data_points, degrees_of_freedom, random_state=random_state)

        degrees_of_freedom_mx = mx.nd.array([degrees_of_freedom], dtype=dtype_dof)
        degrees_of_freedom = degrees_of_freedom_mx.asnumpy()[0]  # ensures the correct dtype

        scale_mx = mx.nd.array(scale, dtype=dtype)
        if not scale_is_samples:
            scale_mx = add_sample_dimension(mx.nd, scale_mx)
        scale = scale_mx.asnumpy()

        rv_mx = mx.nd.array(rv, dtype=dtype)
        if not rv_is_samples:
            rv_mx = add_sample_dimension(mx.nd, rv_mx)
        rv = rv_mx.asnumpy()

        is_samples_any = scale_is_samples or rv_is_samples

        if broadcast:
            scale_np = np.broadcast_to(scale, rv.shape)
        else:
            n_dim = 1 + len(rv.shape) if is_samples_any and not rv_is_samples else len(rv.shape)
            scale_np = numpy_array_reshape(scale, is_samples_any, n_dim)

        rv_np = numpy_array_reshape(rv, is_samples_any, degrees_of_freedom)

        r = []
        for s in range(num_samples):
            a = []
            for i in range(num_data_points):
                a.append(wishart.logpdf(rv_np[s][i], df=degrees_of_freedom, scale=scale_np[s][i]))
            r.append(a)
        log_pdf_np = np.array(r)

        var = Wishart.define_variable(shape=rv.shape[1:], dtype=dtype, rand_gen=None).factor
        variables = {var.degrees_of_freedom.uuid: degrees_of_freedom_mx, var.scale.uuid: scale_mx,
                     var.random_variable.uuid: rv_mx}
        log_pdf_rt = var.log_pdf(F=mx.nd, variables=variables)

        assert np.issubdtype(log_pdf_rt.dtype, dtype)
        assert is_sampled_array(mx.nd, log_pdf_rt) == is_samples_any
        if is_samples_any:
            assert get_num_samples(mx.nd, log_pdf_rt) == num_samples, (get_num_samples(mx.nd, log_pdf_rt), num_samples)
        assert np.allclose(log_pdf_np, log_pdf_rt.asnumpy())
Esempio n. 5
0
def test_CovUnconstrainedCholeskyWishartReg(seeded_rng):

    cov = CovUnconstrainedCholeskyWishartReg(size=m)

    L = cov.L.numpy()
    cov_np = L @ L.T

    logdet_np, sinv_np, sinvx_np = logdet_sinv_np(X, cov_np)
    assert_allclose(logdet_np, cov.logdet, rtol=rtol)
    assert_allclose(sinv_np, cov.solve(eye), rtol=rtol)
    assert_allclose(sinvx_np, cov.solve(X_tf), rtol=rtol)
    # now compute the regularizer
    reg = wishart.logpdf(cov_np, df=m + 2, scale=1e10 * np.eye(m))
    assert_allclose(reg, cov.logp, rtol=rtol)
def loglike_GW(obs1, obs2, scale, df, mean, scaler):
    like_log = 0
    # Wishart loglike
    like_log += wishart.logpdf(obs2, df, scale)
    # MVN loglike
    # scaler the precision first
    precision = []
    for i in range(n_factor):
        precision.append([])
        for j in range(n_factor):
            temp = obs2[i][j] * scaler
            precision[i].append(temp)
    like_log += loglike_MVN(obs1, mean, precision)
    return like_log
def loglike_GW(obs1, obs2, scale, df, mean, scaler):
    like_log = 0
    # Wishart loglike
    like_log += wishart.logpdf(obs2, df, scale)
    # MVN loglike
    # scaler the precision first
    precision = []
    for i in range(n_factor):
        precision.append([])
        for j in range(n_factor):
            temp = obs2[i][j] * scaler
            precision[i].append(temp)
    like_log += loglike_MVN(obs1, mean, precision)
    return like_log
def loglike_GW(obs1, obs2, scale, df, mean, scaler):
	like_log = 0
	# Wishart loglike
	like_log += wishart.logpdf(obs2, df, scale)
	# MVN loglike
	# scaler the precision first
	precision = []
	for i in range(n_factor):
		precision.append([])
		for j in range(n_factor):
			temp = obs2[i][j] * scaler
			precision[i].append(temp)
	like_log += loglike_MVN(obs1, mean, precision)
	## NOTE:
	'''
	cov = inv(precision)
	like_log += multivariate_normal.logpdf(obs1, mean, cov)
	'''
	return like_log
Esempio n. 9
0
def loglike_GW(obs1, obs2, scale, df, mean, scaler):
    like_log = 0
    # Wishart loglike
    like_log += wishart.logpdf(obs2, df, scale)
    # MVN loglike
    # scaler the precision first
    precision = []
    for i in range(n_factor):
        precision.append([])
        for j in range(n_factor):
            temp = obs2[i][j] * scaler
            precision[i].append(temp)
    like_log += loglike_MVN(obs1, mean, precision)
    ## NOTE:
    '''
	cov = inv(precision)
	like_log += multivariate_normal.logpdf(obs1, mean, cov)
	'''
    return like_log
Esempio n. 10
0
    def getLogPosteriorProbability(self, latent_z, n_cluster_samples, params,
                                   normal_insts):
        n_sample = len(latent_z)
        n_i = [None] * len(n_cluster_samples)
        for idx, c in n_cluster_samples.items():
            n_i[idx] = c
        n_i = np.array(n_i)

        tmp = np.log(self.alpha) * self.n_cluster + gammaln(n_i).sum()
        p_s = tmp - logRisingFact(self.alpha, n_sample)
        posterior = p_s
        for k in range(self.n_cluster):
            cov = np.linalg.inv(self.beta * params[k][1])
            p_mean = multivariate_normal(self.u0, cov).logpdf(params[k][0])
            p_covar = wishart.logpdf(params[k][1], self.new, self.covar)
            posterior += p_mean + p_covar
            sample_k = self.getSampleK(k, latent_z)
            posterior += normal_insts[k].logpdf(sample_k).sum()
        return posterior
Esempio n. 11
0
def adjust_treemix_df(wishart_df, starting_tree):
    cov = make_covariance(starting_tree)
    lmax = wishart.logpdf(wishart, scale=wishart / wishart_df, df=wishart_df)
Esempio n. 12
0
def logpdf(x, v, n):
    correction = (n * v.shape[0] / 2.)*np.log(2) + \
                multigammaln(n/2, v.shape[0])
    return [np.sum(wishart.logpdf(x, n, v)) + correction]
 def propose(self):
     vx = wishart.rvs(self.df, self.cx)
     vy = wishart.rvs(self.df, self.cy)
     fb =   wishart.logpdf(vx, self.df, self.cx) + wishart.logpdf(vy, self.df, self.cy) - \
          ( wishart.logpdf(self.cx, self.df, vx) + wishart.logpdf(self.cy, self.df, vy) )
     return AlignedNormal2D(cx=vx, cy=vy), fb
 def compute_prior(self):
     return wishart.logpdf(self.c, self.df, numpy.eye(self.df))
Esempio n. 15
0
def logwishart(covariance,mean,df):
    if isinstance(covariance, int) or isinstance(covariance, float):
        return chi2.logpdf(covariance, scale=mean/df, df=df)
    return scip_wishart.logpdf(covariance, scale=mean/df, df=df)
Esempio n. 16
0
 def wishart_distance(a,b):
     try:
         ans=(wishart.logpdf(a, df=df, scale=b/df)+wishart.logpdf(b, df=df, scale=a/df))/2
     except np.linalg.LinAlgError as e:
         ans=np.NaN
     return ans
Esempio n. 17
0
def get_average_deviation(scale, df, reps=1000):
    sims=[wishart.rvs(scale=scale/df, df=df) for _ in xrange(reps)]
    wishs=[wishart.logpdf(cov, df=df, scale=scale/df) for cov in sims]
    dists=[dist(cov, scale) for cov in sims]
    return (min(wishs), max(wishs)),(min(dists),max(dists))
Esempio n. 18
0
            '''

            # calculate likelihood manually, following https://en.wikipedia.org/wiki/Wishart_distribution
            # and https://math.stackexchange.com/questions/2803164/degrees-of-freedom-in-a-wishart-distribution
            # and /Users/momchil/anaconda3/lib/python3.7/site-packages/semopy/stats.py: obj_mlw()
            # and /Users/momchil/anaconda3/lib/python3.7/site-packages/semopy/stats.py: calc_bic()
            # and /Users/momchil/anaconda3/lib/python3.7/site-packages/semopy/stats.py: calc_likelihood()
            model.update_matrices(model.param_vals)
            S = model.mx_cov  # empirical covariance matrix
            sigma, _ = model.calc_sigma()  # model covariance matrix
            p = sigma.shape[0]  # # of variables
            n = model.mx_data.shape[
                0]  # dof = # of data points (b/c Wishart is generalization of Chi2 to MVN => sum of n squared i.i.d. MVNs)
            k = len(model.param_vals)  # # of free params

            loglik = wishart.logpdf(S, df=n, scale=sigma)
            bic = k * np.log(n) - 2 * loglik
            lme = -0.5 * bic

            logliks[i, j] = loglik
            ks[i, j] = k
            ns[i, j] = n
            bics[i, j] = bic
            lmes[i, j] = lme

            print('subj=', i, ' mod=', j, filepath, ' k=', k, ' n=', n,
                  ' loglik=', logliks[i, j])
            print(model.mx_cov.shape)

    print(lmes.shape)
 def propose(self):
     val = wishart.rvs(self.df, self.c)
     fb = wishart.logpdf(val, self.df, self.c) - wishart.logpdf(self.c, self.df, val)
     return FreeNormal2D(c=val), fb