示例#1
0
文件: ga.py 项目: RNAer/correlations
def coerce_gene(X, X_star):
    """Coerces vector X to have same summary statistics as X_star.
    Using the process outlined in 'Generating Data with Identical Statistics but
    Dissimilar Graphics' by Sangit Chatterjee and Aykut Firat, this function
    takes a randomly drawn vector X and a vector X_star and produces a new 
    vector from X that has the same mean, std, correlation and possibly other 
    statistics as X_star. The reference is:
    Generating Data with Identical Statistics but Dissimilar Graphics. Sangit 
    Chatterjee and Aykut Firat 2007. The American Statistician 61:3, 248-254.
    Inputs:
     X, X_star - nX2 arrays.
     Reshaping is required because the way numpy handles matrix multiplication 
     elementwise unless it recognizes the array as 2D. 
    """
    # step ii: set mean value of columns of X to 0 using X=X-e_nx1*X_mean
    n = X.shape[0]
    X_new = X - dot(ones((n,1)),X.mean(0).reshape(1,2)) #a long col axis
    # step iii: orthonormalize cols of X_new with Gram-Schmidt process
    x, y = X_new[:,0], X_new[:,1]
    u1 = x
    u2 = y-(dot(x,y)/dot(x,x))*x
    e1, e2 = u1/norm(u1), u2/norm(u2)
    X_on = array([e1,e2]).T #orthnormalized X_new, .T to keep nX2
    # step iv: transform X_on to ensure summary stat agreement
    tmp_a = ((n-1.)**.5)*X_on
    tmp_b = sqrtm(cov(X_star.T)).astype(float)
    tmp_c = dot(ones((n,1)),X_star.mean(0).reshape(1,2))
    return dot(tmp_a, tmp_b)+tmp_c
示例#2
0
文件: misc.py 项目: MMaus/mutils
def fBM_nd(dims, H, return_mat=False, use_eig_ev=True):
    """
    creates fractional Brownian motion
    parameters: dims is a tuple of the shape of the sample path (nxd); 
                H: Hurst exponent
    this is the slow version of fBM. It might, however, be more precise than
    fBM, however - sometimes, the matrix square root has a problem, which might
    induce inaccuracy    
    use_eig_ev: use eigenvalue decomposition for matrix square root computation
    (faster)
    """
    n = dims[0]
    d = dims[1]
    Gamma = zeros((n, n))
    print('building ...\n')
    for t in arange(n):
        for s in arange(n):
            Gamma[t, s] = .5 * ((s + 1)**(2. * H) +
                                (t + 1)**(2. * H) - abs(t - s)**(2. * H))
    print('rooting ...\n')
    if use_eig_ev:
        ev, ew = eig(Gamma.real)
        Sigma = dot(ew, dot(diag(sqrt(ev)), ew.T))
    else:
        Sigma = sqrtm(Gamma)
    if return_mat:
        return Sigma
    v = randn(n, d)
    return dot(Sigma, v)
示例#3
0
def coerce_gene(X, X_star):
    """Coerces vector X to have same summary statistics as X_star.
    Using the process outlined in 'Generating Data with Identical Statistics but
    Dissimilar Graphics' by Sangit Chatterjee and Aykut Firat, this function
    takes a randomly drawn vector X and a vector X_star and produces a new 
    vector from X that has the same mean, std, correlation and possibly other 
    statistics as X_star. The reference is:
    Generating Data with Identical Statistics but Dissimilar Graphics. Sangit 
    Chatterjee and Aykut Firat 2007. The American Statistician 61:3, 248-254.
    Inputs:
     X, X_star - nX2 arrays.
     Reshaping is required because the way numpy handles matrix multiplication 
     elementwise unless it recognizes the array as 2D. 
    """
    # step ii: set mean value of columns of X to 0 using X=X-e_nx1*X_mean
    n = X.shape[0]
    X_new = X - dot(ones((n, 1)), X.mean(0).reshape(1, 2))  #a long col axis
    # step iii: orthonormalize cols of X_new with Gram-Schmidt process
    x, y = X_new[:, 0], X_new[:, 1]
    u1 = x
    u2 = y - (dot(x, y) / dot(x, x)) * x
    e1, e2 = u1 / norm(u1), u2 / norm(u2)
    X_on = array([e1, e2]).T  #orthnormalized X_new, .T to keep nX2
    # step iv: transform X_on to ensure summary stat agreement
    tmp_a = ((n - 1.)**.5) * X_on
    tmp_b = sqrtm(cov(X_star.T)).astype(float)
    tmp_c = dot(ones((n, 1)), X_star.mean(0).reshape(1, 2))
    return dot(tmp_a, tmp_b) + tmp_c
示例#4
0
文件: misc.py 项目: MMaus/mutils
def fBM_nd(dims, H, return_mat = False, use_eig_ev = True):
    """
    creates fractional Brownian motion
    parameters: dims is a tuple of the shape of the sample path (nxd); 
                H: Hurst exponent
    this is the slow version of fBM. It might, however, be more precise than
    fBM, however - sometimes, the matrix square root has a problem, which might
    induce inaccuracy    
    use_eig_ev: use eigenvalue decomposition for matrix square root computation
    (faster)
    """
    n = dims[0]
    d = dims[1]
    Gamma = zeros((n,n))
    print ('building ...\n')
    for t in arange(n):
        for s in arange(n):
            Gamma[t,s] = .5*((s+1)**(2.*H) + (t+1)**(2.*H) - abs(t-s)**(2.*H))
    print('rooting ...\n')    
    if use_eig_ev:
        ev,ew = eig(Gamma.real)
        Sigma = dot(ew, dot(diag(sqrt(ev)),ew.T) )
    else:
        Sigma = sqrtm(Gamma)
    if return_mat:
        return Sigma
    v = randn(n,d)
    return dot(Sigma,v)
示例#5
0
def gp_gen(num_point, num_dim, domain, noise_level, mix_list=[[0, 1], [2]]):
    """Generate matrix variate normally distributed data"""
    reg_param = 1e-8
    num_class = len(flatten_list(mix_list))
    X = domain*rand(num_dim, num_point)
    Kx = GaussKernel(1.0)
    Kx.compute(X)
    Ky = list2matrix(mix_list, neg_corr=True)
    K = JointKernel(Kx, Ky)

    L = real(sqrtm(0.5*(K.kmat)+reg_param*eye(num_point*num_class)).T)
    mu = zeros((num_class, num_point))

    Y = L*matrix(randn(num_point*num_class, 1))
    Y.shape = (num_point, num_class)
    Y = real(Y.T)
    Y += mu + noise_level*randn(num_class, num_point)
    Y = array(Y)
    return (X, Y)
示例#6
0
def gp_gen(num_point, num_dim, domain, noise_level, mix_list=[[0, 1], [2]]):
    """Generate matrix variate normally distributed data"""
    reg_param = 1e-8
    num_class = len(flatten_list(mix_list))
    X = domain * rand(num_dim, num_point)
    Kx = GaussKernel(1.0)
    Kx.compute(X)
    Ky = list2matrix(mix_list, neg_corr=True)
    K = JointKernel(Kx, Ky)

    L = real(sqrtm(0.5 * (K.kmat) + reg_param * eye(num_point * num_class)).T)
    mu = zeros((num_class, num_point))

    Y = L * matrix(randn(num_point * num_class, 1))
    Y.shape = (num_point, num_class)
    Y = real(Y.T)
    Y += mu + noise_level * randn(num_class, num_point)
    Y = array(Y)
    return (X, Y)
示例#7
0
def mydeftensor (v):
   j = v.reshape([N,N])
   s = sqrtm(j.transpose()*j)
   return S.reshape([1,N*N])
示例#8
0
def mydeftensor(v):
    j = v.reshape([N, N])
    s = sqrtm(j.transpose() * j)
    return S.reshape([1, N * N])
示例#9
0
#http://en.wikipedia.org/wiki/Square_root_of_a_matrix
#Dirac oscillator http://iopscience.iop.org/0305-4470/22/17/002/pdf/0305-4470_22_17_002.pdf
from pylab import *
from scipy.linalg.matfuncs import sqrtm

N = 400
s = 1 # 1/sqrt(m\omega_0)

n = arange(1,N)
m = sqrt(n)

x = matrix(s /sqrt(2) * (diag(m,-1) + diag(m,1)))
p = matrix(1j/(s*sqrt(2)) * (diag(m,-1) - diag(m,1) ))
#H = (p*p + x*x) /2.
#H = (p*p + x**4) /2.

#Klein-Gordon Hamiltonian
mass = 1000
H_KG = sqrtm( p*p + x*x + mass)  - mass
H = H_KG
def eigval_KG_analytic(n): return sqrt(mass**2 + 2 * (n + .5) * mass) - mass
def eigval_classical(n): return (n+.5)

value, vector = eig(H)
value = sort(value)[1:]
plot(value)
plot(*array([(i,eigval_KG_analytic(i)) for i in range(len(value))]).T)
plot(*array([(i,eigval_classical(i)) for i in range(len(value))]).T)
legend(['numerical','analytical','nonrelativistic'])
show()