示例#1
0
print "loop done"

# get average 
SUM = ne.evaluate('SUM / N')

# Fisher z to r transform on average , now this is back to correlation array
SUM = fisher_z2r(SUM)

# transform correlation array into similarity array
SUM += 1.0
SUM /= 2.0

# get full similartiy matrix of correlations
N = hcp_util.N_original(SUM)
SUM.resize([N,N])
hcp_util.upper_to_down(SUM)

print "SUM.shape", SUM.shape

print "do embed for correlation matrix:", SUM.shape

# Satra's embedding algorithm 
embedding, result = embed.compute_diffusion_map(SUM, alpha=0, n_components=20,
    diffusion_time=0, skip_checks=True, overwrite=True)

# output prefix
out_prfx=cliarg_out_prfx
# output precision
out_prec="%g"

np.savetxt(out_prfx + "embedding2.csv", embedding, fmt=out_prec, delimiter='\t', newline='\n')
示例#2
0
        for idx in range(x.shape[0]-1, -1, -1):
            back_sum += x[idx]/float(x.sum())
            if back_sum >= ten_percent:
                thr = bins[idx]
                print "top-10percent threshold:", thr
                break
        # binarize K via thresholding
        K[np.where( K >= thr) ] = 1.0
        K[np.where( K < thr) ] = 0
    elif args.histogram == "node":
        # find a threshold value for each row of corr matrix

        # convert upper-triangular to full matrix
        N_orig = hcp_util.N_original(K)
        K.resize([N_orig, N_orig])
        hcp_util.upper_to_down(K)

        dbins = 0.1
        bins = np.arange(-1, 1+dbins, dbins)
        for j in range(0, N_orig):
            x, bins = np.histogram(K[j,:], bins)
            back_sum = 0
            for idx in range(x.shape[0]-1, -1, -1):
                back_sum += x[idx]/float(x.sum())
                if back_sum >= ten_percent:
                    thr = bins[idx]
                    #print "top-10percent node threshold:", thr
                    break
            # binarize corr matrix via thresholding
            K[j,:][np.where( K[j,:] >= thr) ] = 1.0
            K[j,:][np.where( K[j,:] < thr) ] = 0