def test_2vs0_loop(): L, com_lipids, com_chol = trajIO.decompress("comTraj.npz") dist1 = edm_two_loop(L[34], com_lipids[34]) dist2 = edm(L[34], com_lipids[34]) print(dist1[80][45], dist2[80][45]) sum = 0.0 two_loop = 0 for i in range(5): two_loop += time_function(edm_two_loop, L[34], com_lipids[34]) print('two loop version took %f seconds' % two_loop) one_loop = 0 for i in range(5): one_loop += time_function(edm, L[34], com_lipids[34]) print('one loop version took %f seconds' % one_loop) for t in range(1): difference = np.linalg.norm(dist1 - dist2, ord='fro') sum += difference if sum < 0.00000001: print(str(sum) + " 2loop vs 0 loop passed") return True else: print("2loop vs 0 loop failed") return False
def main(): file = "comTraj.npz" L, com_lipids, com_chol = trajIO.decompress(file) com_lipids, com_chol = trajIO.translateZ(com_lipids, com_chol) com_lipids = displacement.block_displacement(L, com_lipids) com_chol = displacement.block_displacement(L, com_chol) t = 28 lipids = com_lipids[t] chol = com_chol[t] lipids, trash = trajIO.layering(lipids) chol, trash = trajIO.layering(chol) total = np.concatenate((lipids, chol), axis=0) total1 = iter.combine(lipids, chol) cluster = percentages.cluster(total, [0.25, 0.25, 0.25, 0.25]) cluster1 = percentages.cluster(total1, [0.25, 0.25, 0.25, 0.25]) #edm = euclideanDist.edm(L[t],cluster[0]) #edm1 = euclideanDist.edm(L[t],cluster1[0]) #print(np.array_equiv(edm,edm1)) cutoff = 1.15 labels1 = dc.dbscan_wrapper(cluster[0], L[t], cutoff) labels2 = iter.cluster_labels('upper', L[t], cluster1[0]) return labels1, labels2
def test_randomCluster(): L, com_lipids, com_chol = trajIO.decompress("comTraj.npz") com_lipids, com_chol = trajIO.translateZ(com_lipids, com_chol) arr = com_lipids[0] size = 43 cluster = iter_cluster.randomCluster(size, arr) assert len(cluster) == 43 return
def test_clusters( ): #not a complete testfunction but the function should nonetheless be accurate file = "comTraj.npz" L, com_lipids, com_chol = trajIO.decompress(file) com_lipids = displacement.block_displacement(L, com_lipids) Nlipids = com_lipids.shape[1] clusters = jenks_clusters.clusters(com_lipids[40], 4) return clusters
def vs1_2loop(): file = "comTraj.npz" L,com_lipids,com_chol = trajIO.decompress(file) noloop = displacement.block_displacement_one(L,com_lipids) twoloop = displacement.block_displacement_loop(L,com_lipids) noloop = noloop[:,:,3] twoloop = twoloop[:,:,3] for t in range(100): boo = (noloop[t] == twoloop[t]) print(boo.all())
def timings(): file = "comTraj.npz" L,com_lipids,com_chol = trajIO.decompress(file) two_loop_time = one_loop_time = no_loop_time = 0 for t in range(10): #two_loop_time += time_function(displacement.block_displacement_loop, L,com_lipids) one_loop_time += time_function(displacement.block_displacement_one, L,com_lipids) no_loop_time += time_function(displacement.block_displacement_no, L,com_lipids) print('Two loop version took %f seconds' % two_loop_time) print('One loop version took %f seconds' % one_loop_time) print('No loop version took %f seconds' % no_loop_time)
def s_test(): file = "comTraj.npz" L,com_lipids,com_chol = trajIO.decompress(file) control = displacement.block_displacement(L,com_lipids) control = control[:,:,3] test = com_lipids test = displacement.s(L,com_lipids,0,list(range(1,46))) test = displacement.s(L,test,46,np.asarray(list(range(1,46)))+46) test = displacement.s(L,test,92,[93,94,95,96,97,98,99]) test = test[:,:,3] for t in range(100): boo = (test[t] == control[t]) print(boo.all())
def benchmark(): L, com_lipids, com_chol = trajIO.decompress("comTraj.npz") import pickle pickle_off = open("clusters.dict","rb") clusters = pickle.load(pickle_off) X = clusters[0][28]['lipid'][0][0] db_time = 0 for i in range(10): db_time += time_function(dc.dbscan_wrapper, X,L[0],1.15) print('DBSCAN version took %f seconds' % db_time) hierarchy_time = 0 for i in range(10): hierarchy_time += time_function(dc.hierarchy_wrapper, X,L[0],1.15) print('Hierarchy version took %f seconds' % hierarchy_time)
def test_sort(): file = "comTraj.npz" L, com_lipids, com_chol = trajIO.decompress(file) com_lipids = displacement.block_displacement(L, com_lipids) Nlipids = com_lipids.shape[1] Nconf = com_lipids.shape[0] for t in range(Nconf): lipids = com_lipids[t][com_lipids[t][:, 3].argsort()] curr = 0 for i in range(Nlipids): if curr > lipids[i, 3]: print("sorting messed up") raise ValueError else: curr = lipids[i, 3] return True
def linear_test(): file = "comTraj.npz" L,com_lipids,com_chol = trajIO.decompress(file) control = displacement.block_displacement(L,com_lipids) control1 = control[:,:,3] test = displacement.linear_displacement(L,control,0,100) test1 = test[:,:,3] print(test1.shape) for t in range(46): boo = (test1[t] == control1[t]) print(boo.all()) print("hi") for t in [46,92]: boo = (test1[t] != control1[t]) print(boo.all())
def test_buildCluster(): #& singletonMerge L, com_lipids, com_chol = trajIO.decompress("comTraj.npz") com_lipids, com_chol = trajIO.translateZ(com_lipids, com_chol) Nlipids = len(com_lipids[0]) c = iter_cluster.Cluster(com_lipids[0], L[0], 0.3) c.test() """ manualSingleton = set() for cluster in c.clusters: if len(cluster) == 1: manualSingleton |= cluster c.singletonMerge() c.test() assert c.clusters[-1] == manualSingleton """ return
trajFileName = sys.argv[1] Nconf = int(sys.argv[2]) nlog = int(sys.argv[3]) Nblock = Nconf // nlog cutoff = 1.3 #anything above 20chol #maybe 1.3 for everything below? percentage = c.percentages['lipids']['lower'][name] if trajIO.rawOrCOM(trajFileName): Nchol = trajIO.cholConc(topology) N, L, com_lipids, com_chol = trajIO.processTrajCOM( trajFileName, Nchol, c.NDIM, Nconf) com_lipids, com_chol = trajIO.translateZ(com_lipids, com_chol) Nlipids = com_lipids.shape[0] else: L, com_lipids, com_chol = trajIO.decompress(trajFileName) com_lipids, com_chol = trajIO.translateZ(com_lipids, com_chol) #parameters del com_chol cluster_sizes = [name] name = str(cluster_sizes[0]) sys.stdout = open("norm" + name + ".txt", "w") times = list(range(1, 46)) com_lipids = displacement.block_displacement(L, com_lipids) #initialize output dict normSizes = {} for block in range(Nblock): normSizes[block] = {}
from bilayer_clusters import trajIO from analysis import compress import numpy if __name__ == "__main__": trajFileName = "../data/temp20" chol = "../data/20chol.top" Nchol = trajIO.cholConc(chol) outfile = "../test/comTraj.npz" compress.spitnumpy(trajFileName,chol,100) N,L,com_lipids,com_chol = trajIO.processTrajCOM(trajFileName,Nchol,3,100) L_new,lipids_new,chol_new = trajIO.decompress(outfile) #print(numpy.array_equal(L,L_new)) #print(numpy.array_equal(com_lipids,lipids_new)) #print(numpy.array_equal(com_chol,chol_new)) print(lipids_new.shape) if numpy.array_equal(L,L_new) and numpy.array_equal(com_lipids,lipids_new) and numpy.array_equal(com_chol,chol_new): print("compressLoad test passed") else: print("compressLoad test failed")
hierarchy_time += time_function(dc.hierarchy_wrapper, X,L[0],1.15) print('Hierarchy version took %f seconds' % hierarchy_time) def size(): """ lst = [] for i in range(100): for j in range(i): lst.append(i) """ lst = list(range(100000)) return dc.means(dc.cluster_sizes(lst)) if __name__ == '__main__': #benchmark() L, com_lipids, com_chol = trajIO.decompress("comTraj.npz") import pickle pickle_off = open("clusters.dict","rb") clusters = pickle.load(pickle_off) X = clusters[0][28]['lipid'][0][0] assert dc.mean_cluster_size(X,L[0],1.15,dc.dbscan_wrapper) == dc.mean_cluster_size(X,L[0],1.15,dc.hierarchy_wrapper) #print(dc.mean_cluster_size(X,L[0],1.15,dc.dbscan_wrapper)) #print(dc.meanRandom(com_lipids[28],L[0],1.15,319,)) print(dc.normSize(X,L[0],1.15,com_lipids[0])) #print(size())
def test_randomIterCluster(): L, com_lipids, com_chol = trajIO.decompress("comTraj.npz") com_lipids, com_chol = trajIO.translateZ(com_lipids, com_chol) size = 45 return