コード例 #1
0
ファイル: test_multivariate.py プロジェクト: wodole/scipy
    def test_frozen_matrix_normal(self):
        for i in range(1, 5):
            for j in range(1, 5):
                M = 0.3 * np.ones((i, j))
                U = 0.5 * np.identity(i) + 0.5 * np.ones((i, i))
                V = 0.7 * np.identity(j) + 0.3 * np.ones((j, j))

                frozen = matrix_normal(mean=M, rowcov=U, colcov=V)

                rvs1 = frozen.rvs(random_state=1234)
                rvs2 = matrix_normal.rvs(mean=M,
                                         rowcov=U,
                                         colcov=V,
                                         random_state=1234)
                assert_equal(rvs1, rvs2)

                X = frozen.rvs(random_state=1234)

                pdf1 = frozen.pdf(X)
                pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
                assert_equal(pdf1, pdf2)

                logpdf1 = frozen.logpdf(X)
                logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V)
                assert_equal(logpdf1, logpdf2)
コード例 #2
0
ファイル: simulation.py プロジェクト: Vignesh-Sairaj/cm-229
def generate_pheno(kinship, hsquared, N=300, P=15, rho=0.45):
    '''
	Generates phenotype data from MN distribution
	N = n_samples, P = n_traits, and rho is the autocorrelation parameter to B
	kinship matrix must be NxN
	RETURNS ndarray of size (N x P)
	'''

    assert kinship.shape == (N, N)

    B = generate_B(P, rho)
    E = generate_E(P)

    chumma = np.linalg.cholesky(kinship)

    U = matrix_normal.rvs(rowcov=kinship, colcov=hsquared * cov2corr(B))
    epsilon = matrix_normal.rvs(rowcov=np.eye(N),
                                colcov=(1 - hsquared) * cov2corr(E))

    return U + epsilon
コード例 #3
0
def mniw_sample(mniw, return_stat=False):
    M, V, psi, nu = mniw_nat_to_std(*mniw)

    sigma = invwishart.rvs(scale=psi, df=int(nu))
    A = matrix_normal.rvs(mean=M, rowcov=sigma, colcov=V)

    if (return_stat == True):
        sigma_inv = np.linalg.inv(sigma)
        x1 = -0.5 * A.T @ sigma_inv @ A
        x2 = A.T @ sigma_inv
        x3 = -0.5 * sigma_inv
        x4 = -0.5 * np.linalg.slogdet(sigma)[1]
        return x1, x2, x3, x4

    return A, sigma
コード例 #4
0
    def test_frozen_matrix_normal(self):
        for i in range(1,5):
            for j in range(1,5):
                M = 0.3 * np.ones((i,j))
                U = 0.5 * np.identity(i) + 0.5 * np.ones((i,i))
                V = 0.7 * np.identity(j) + 0.3 * np.ones((j,j))

                frozen = matrix_normal(mean=M, rowcov=U, colcov=V)

                rvs1 = frozen.rvs(random_state=1234)
                rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V,
                                         random_state=1234)
                assert_equal(rvs1, rvs2)

                X = frozen.rvs(random_state=1234)

                pdf1 = frozen.pdf(X)
                pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
                assert_equal(pdf1, pdf2)

                logpdf1 = frozen.logpdf(X)
                logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V)
                assert_equal(logpdf1, logpdf2)
コード例 #5
0
    def test_default_inputs(self):
        # Check that default argument handling works
        num_rows = 4
        num_cols = 3
        M = 0.3 * np.ones((num_rows,num_cols))
        U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows))
        V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols))
        Z = np.zeros((num_rows, num_cols))
        Zr = np.zeros((num_rows, 1))
        Zc = np.zeros((1, num_cols))
        Ir = np.identity(num_rows)
        Ic = np.identity(num_cols)
        I1 = np.identity(1)

        assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape,
                     (num_rows, num_cols))
        assert_equal(matrix_normal.rvs(mean=M).shape,
                     (num_rows, num_cols))
        assert_equal(matrix_normal.rvs(rowcov=U).shape,
                     (num_rows, 1))
        assert_equal(matrix_normal.rvs(colcov=V).shape,
                     (1, num_cols))
        assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape,
                     (num_rows, num_cols))
        assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape,
                     (num_rows, num_cols))
        assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape,
                     (num_rows, num_cols))

        assert_equal(matrix_normal(mean=M).rowcov, Ir)
        assert_equal(matrix_normal(mean=M).colcov, Ic)
        assert_equal(matrix_normal(rowcov=U).mean, Zr)
        assert_equal(matrix_normal(rowcov=U).colcov, I1)
        assert_equal(matrix_normal(colcov=V).mean, Zc)
        assert_equal(matrix_normal(colcov=V).rowcov, I1)
        assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic)
        assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir)
        assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z)
コード例 #6
0
newGP = GP(zeroMean,expCov(1,rho))

U = newGP.covMatrix(locs)

var=4

# V = np.array([[1]])*var
V = np.array([[1,-0.9],[-0.9,1]])*var
# V = np.array([[1,0.9],[0.9,1]])*var
# V = np.array([[1,0.5],[0.5,1]])*var
# V = np.array([[1,-4.5],[-4.5,25]])*var

# mu=-2
mu=np.array([-2,-3])

X = matrix_normal.rvs(rowcov=U, colcov=V) + mu

def multExpit(x):
    N = np.sum(np.exp(x))
    probs = np.array([np.exp(i)/(1+N) for i in x])
    return(np.append(probs,1-np.sum(probs)))
        
        
probs = np.array([multExpit(x) for x in X])

nch = probs.shape[1]

colours = np.array([np.random.choice(nch,p=p) for p in probs])

locs1 = locs[colours == 0]
locs2 = locs[colours == 1]
コード例 #7
0
def MCMCadams(size, lam_init, rho_init, T_init, thismtPP, nInsDelMov, kappa,
              delta, L, mu_lam, sigma2, p, a, b, n, V, mu_init, mean_mu,
              var_mu, diagnostics, res, thin, GP_mom_scale, range_mom_scale):

    ### independent type prior mean

    ### initialize GP
    thisGP = GP(zeroMean, expCov(1, rho_init))

    ### location container initialization
    K = thismtPP.K
    totLocInit = np.concatenate(
        (thismtPP.locs, PPP.randomHomog(lam=int(lam_init // (K + 1))).loc), 0)
    nObs = thismtPP.nObs

    locations = bdmatrix(int(10 * lam_init), totLocInit, nObs,
                         "locations")  # initial size is a bit of black magic

    ### cov matrix initialization

    Rmat = dsymatrix(int(10 * lam_init), thisGP.covMatrix(totLocInit), nObs)

    ### GP values container initialization

    # ### try to initiate GP in logical position

    # mean_init = np.zeros(shape=(locations.nThin+nObs,K))

    # mean_init[:nObs,:] = np.transpose(np.linalg.cholesky(np.linalg.inv(T_init))@thismtPP.typeMatrix)

    # ####

    values = bdmatrix(
        int(10 * lam_init),
        matrix_normal.rvs(rowcov=Rmat.sliceMatrix(),
                          colcov=np.linalg.inv(T_init)) + mu_init, nObs,
        "values")

    ### parameters containers
    lams = np.empty(shape=(size))
    rhos = np.empty(shape=(size))

    Ts = np.empty(shape=(size, K, K))
    mus = np.empty(shape=(size, 1, K))
    Nthins = np.empty(shape=(size))

    ### independent type prior mean
    Vm1 = np.linalg.inv(V)

    ###
    lams[0] = lam_init
    rhos[0] = rho_init
    Ts[0] = T_init
    mus[0] = mu_init
    Nthins[0] = locations.nThin

    ### instantiate containers for diagnostics
    if diagnostics:
        danceLocs = np.empty(shape=(int(size // thin), int(10 * lam_init), 2))
        GPvaluesAtObs = np.empty(shape=(int(size // thin), nObs, K))
        fieldsGrid = np.empty(shape=(int(size // thin), res**2, K + 1))

        danceLocs[0, :int(nObs + Nthins[0])] = locations.totLoc()
        GPvaluesAtObs[0] = values.obsLoc()

        gridLoc = makeGrid([0, 1], [0, 1], res)

        s_11 = thisGP.cov(gridLoc, gridLoc)
        S_21 = thisGP.cov(locations.totLoc(), gridLoc)

        S_12S_22m1 = np.dot(np.transpose(S_21), Rmat.inver)

        muGrid = np.dot(S_12S_22m1, values.totLoc() - mus[0]) + mus[0]
        spatSig = s_11 - np.dot(S_12S_22m1, S_21)

        A = np.linalg.cholesky(Ts[0])

        Am = sp.linalg.solve_triangular(A, np.identity(K), lower=True)

        newVal = np.linalg.cholesky(spatSig) @ np.random.normal(
            size=(res**2, K)) @ Am + muGrid

        fieldsGrid[0] = lams[0] * np.array([multExpit(val) for val in newVal])

    i = 1
    diagnb = 1
    while i < size:

        j = 0
        while j < nInsDelMov:
            birthDeathMove(lams[i - 1], kappa, thisGP, locations, values, Rmat,
                           Ts[i - 1], mus[i - 1])
            j += 1

        Nthins[i] = locations.nThin

        # # locTot_prime = np.concatenate((locThin_prime,thisPPP.loc))
        # valTot_prime = np.concatenate((valThin_prime,obsVal[i-1]))

        # nthin = locThin_prime.shape[0]

        # # Sigma = thisGP.covMatrix(locTot_prime)
        # A = np.linalg.cholesky(Sigmas[i])
        # ntot = A.shape[0]

        # whiteVal_prime = sp.linalg.solve_triangular(A,np.identity(ntot),lower=True)@valTot_prime

        # functionSampler(delta,L,values,Sigma)

        rhos[i] = functionRangeSampler(delta, L, values, Rmat, rhos[i - 1],
                                       Ts[i - 1], mus[i - 1],
                                       thismtPP.typeMatrix, a, b, GP_mom_scale,
                                       range_mom_scale)
        Ts[i] = typePrecisionSampler(n, Vm1, values, Rmat, mus[i - 1])
        mus[i] = typeMeanSampler(values, Rmat, Ts[i], mean_mu, var_mu)
        thisGP = GP(zeroMean, expCov(1, rhos[i]))

        # valTot_prime = A @ whiteVal_prime

        # thinLoc[i] = locThin_prime
        # thinVal[i] = valTot_prime[:nthin,:]
        # obsVal[i] = valTot_prime[nthin:,:]

        # ntot = valTot_prime.shape[0]

        lams[i] = intensitySampler(mu_lam, sigma2, values.nThin + values.nObs)

        if diagnostics and i % thin == 0:

            danceLocs[diagnb, :int(nObs + Nthins[i])] = locations.totLoc()
            GPvaluesAtObs[diagnb] = values.obsLoc()

            s_11 = thisGP.cov(gridLoc, gridLoc)
            S_21 = thisGP.cov(locations.totLoc(), gridLoc)

            S_12S_22m1 = np.dot(np.transpose(S_21), Rmat.inver)

            muGrid = np.dot(S_12S_22m1, values.totLoc() - mus[i]) + mus[i]
            spatSig = s_11 - np.dot(S_12S_22m1, S_21)

            A = np.linalg.cholesky(Ts[i])

            Am = sp.linalg.solve_triangular(A, np.identity(K), lower=True)

            newVal = np.linalg.cholesky(spatSig) @ np.random.normal(
                size=(res**2, K)) @ Am + muGrid

            fieldsGrid[diagnb] = lams[i] * np.array(
                [multExpit(val) for val in newVal])

            diagnb += 1

        if p:
            ### next sample
            locations.nextSamp()
            values.nextSamp()

        print(i)
        i += 1

    if diagnostics:

        ### dancing locations plot
        mpdf = PdfPages('0thinLocs.pdf')

        diagnb = 0
        while (diagnb < int(size // thin)):

            fig = plt.figure()
            ax = fig.add_subplot(111)
            ax.set_aspect('equal')

            plt.plot(danceLocs[diagnb, nObs:int(nObs + Nthins[diagnb * thin]),
                               0],
                     danceLocs[diagnb, nObs:int(nObs + Nthins[diagnb * thin]),
                               1],
                     'o',
                     c=(0.75, 0.75, 0.75))

            for pp in thismtPP.pps:
                plt.plot(pp.loc[:, 0], pp.loc[:, 1], 'o')
            plt.xlim(0, 1)
            plt.ylim(0, 1)

            # plt.show()
            mpdf.savefig(bbox_inches='tight')
            plt.close(fig)

            diagnb += 1

        mpdf.close()

        ### GP traces at observerd locations

        fig, axs = plt.subplots(nObs, figsize=(10, 1.5 * nObs))

        obsNB = 0
        colNB = 0
        while (obsNB < nObs):
            colNB = 0
            while (colNB < K):

                if thismtPP.typeMatrix[colNB, obsNB] == 1:

                    axs[obsNB].plot(GPvaluesAtObs[:, obsNB, colNB],
                                    linewidth=2)

                else:
                    axs[obsNB].plot(GPvaluesAtObs[:, obsNB, colNB],
                                    linestyle="dashed")

                colNB += 1

            obsNB += 1

        # plt.show()
        fig.savefig("0GPtraces.pdf", bbox_inches='tight')
        plt.close(fig)

        ### mean intensities

        meanFields = np.mean(fieldsGrid, axis=0, dtype=np.float32)

        maxi = np.max(meanFields)
        mini = np.min(meanFields)

        k = 0
        while k < K + 1:

            fig = plt.figure()
            ax = fig.add_subplot(111)
            ax.set_aspect('equal')

            plt.xlim(0, 1)
            plt.ylim(0, 1)

            imGP = np.transpose(meanFields[:, k].reshape(res, res))

            x = np.linspace(0, 1, res + 1)
            y = np.linspace(0, 1, res + 1)
            X, Y = np.meshgrid(x, y)

            # fig = plt.figure()
            # axs[k] = fig.add_subplot(111)
            ax.set_aspect('equal')

            ff = ax.pcolormesh(X, Y, imGP, cmap='gray', vmin=mini, vmax=maxi)

            fig.colorbar(ff)

            for pp in thismtPP.pps:
                ax.plot(pp.loc[:, 0], pp.loc[:, 1], 'o', c="tab:orange")

            # plt.scatter(pointpo.loc[:,0],pointpo.loc[:,1], color= "black", s=1)

            # plt.show()

            fig.savefig("0IntFields" + str(k) + ".pdf", bbox_inches='tight')
            plt.close(fig)

            k += 1

        # fig.savefig("0IntFields.pdf", bbox_inches='tight')
        # plt.close(fig)

    return (lams, rhos, Ts, mus, Nthins)
コード例 #8
0
def GOE(dim=(100, 100)): # Samples from a gaussian orthogonal ensemble
    m = matrix_normal.rvs(mean=np.zeros(dim))
    return (m + m.T) / np.sqrt(2)
コード例 #9
0
Wsum0 = Y.copy() * 0
S_np = S * (n - p)
S_bullcomb2 = Ssum0.copy()
Smean22 = Ssum0.copy()

start_time = time.time()

for k in range(0, 100):
    Bsumb = Bsum0.copy()
    Ssumb = Ssum0.copy()
    Wsum = Wsum0.copy()  # reset sums in each simulation
    for j in range(1, M + 1):
        globals()['TildeS%s' % j] = invwishart.rvs(df=n + alpha - p, scale=S_np)
        ## globals()['FlatTildeB%s' % j] = multivariate_normal.rvs(mean=Flat_Beta_hat,
        ##                                                       cov=np.kron(globals()['TildeS%s' % j], inv(X.dot(X.T))))
        globals()['TildeB%s' % j] = pd.DataFrame(matrix_normal.rvs(
            mean=Beta_hat, rowcov=inv(X.dot(X.T)), colcov=globals()['TildeS%s' % j])
            , index=X.index, columns=Y.index)  # matrix normal random variables

        globals()['TilBTX%s' % j] = globals()['TildeB%s' % j].T.dot(X)  # needed for next
        for i in range(0, n):
            globals()['W%s' % j][i] = multivariate_normal.rvs(mean=globals()['TilBTX%s' % j][i],
                                                              cov=globals()[
                                                                  'TildeS%s' % j])  # construct each PPS synthetic dataset
        # stopped checking here
        globals()['B_bull%s' % j] = R_invXXTX.dot(globals()['W%s' % j].T)  # create M B_bull's
        globals()['B_bull%s' % j] = pd.DataFrame(globals()['B_bull%s' % j], index=X.index, columns=Y.index)
        globals()['S_bull%s' % j] = (globals()['W%s' % j] - globals()['B_bull%s' % j].T.dot(X)) \
                                        .dot((globals()['W%s' % j] - globals()['B_bull%s' % j].T.dot(X)).T) / (n - p)
        globals()['S_bull%s' % j] = pd.DataFrame(globals()['S_bull%s' % j], index=Y.index, columns=Y.index)
        Wsum = Wsum + globals()['W%s' % j]  # add all M synthetic versions W's
        Bsumb = Bsumb + globals()['B_bull%s' % j]  # add all M synthetic versions B's
def Proposal(mean,cov,res):
    rval=matrix_normal.rvs(mean=mean,rowcov=cov)
    for i in range(1,res):
        rval=np.hstack((rval,matrix_normal.rvs(mean=mean,rowcov=cov,random_state=i)))
    return rval
コード例 #11
0
    def calculate_map_params(self, subject, subject_dir, sub_index):
        """
        Calculate the parameters based on the passed subject
        for generating a new map
        :param
        subject: numpy array containing the pixel values for the five fingers
        subject_dir: folder name containing the subject's noise data
        :return
        subject_component: subject specific component
        finger_component: array with the finger components
        noise_component: noise component for each finger
        noise_pixel: noise components in the pixel space
        data_dict: dictionary consisting of the above listed components
        """

        # Load subject specific matrices
        matrix_dir = self.data_dir + "/" + subject_dir + "/"
        mappingFile = matrix_dir + subject_dir + "." + "voxelMappingInfo.pkl"
        with open(mappingFile, 'rb') as mfile:
            mappingFile = pkl.load(mfile)
        vox2pix = np.array(csr_matrix(mappingFile['vox2Pixel']).todense())
        noise_mat = mappingFile['noise_corr']
        avg_Bvar_est = mappingFile['avg_Bvar_est']
        del mappingFile

        # Generate Subject specific component
        print("Subject")

        # Use cholesky decomposition followed by matrix multiplication to
        # generate the component with the required correlation structure

        a = self.chol_mat
        z = np.random.normal(0, 1, size=(16384, ))
        z = z / np.std(z)
        subject_component = np.dot(a, z)
        subject_component = subject_component - subject_component.mean()
        #subject_component = (subject_component/np.nanstd(subject_component)) * (np.sqrt(self.var_s[sub_index]))

        # Generate finger specific components
        print("Finger")
        subject_covariance_matrix = self.cov_mat
        #finger_covariance_matrix = \
        #    np.ma.cov(np.ma.masked_invalid(subject - subject.mean(axis=0))) + 0.0000001
        subject_covariance_matrix = self.cov_mat
        finger_component = mnn.rvs(rowcov=finger_covariance_matrix,
                                   colcov=subject_covariance_matrix)
        #z = np.random.normal(size = (5, 16384))
        #finger_component = np.dot(np.dot(finger_covariance_matrix, z), subject_covariance_matrix)

        # Generate noise
        print("Noise")
        noise_list = []
        pixel_noise_list = []
        noise_cov = corr2cov(noise_mat, avg_Bvar_est)
        #noise_cov = noise_mat * avg_Bvar_est
        try:
            for _iter in range(0, 5):
                #noise = mnn.rvs(rowcov=noise_cov)
                z = np.random.normal(size=(noise_cov.shape[0], 1))
                noise = noise_cov @ z
                noise = noise - noise.mean()
                noise_list.append(noise)
                pixel_noise = np.dot(vox2pix.T, noise)
                pixel_noise_list.append(pixel_noise)
        except Exception as e:
            print(e)
            return -1
        data_dict = {
            "subject_component": subject_component,
            "finger_component": finger_component,
            "noise_component": noise_list,
            "noise_pixel": pixel_noise_list
        }
        del noise_list, finger_component, pixel_noise_list, noise_mat, subject_covariance_matrix
        return data_dict