def test_l_CalculateProjectSASA(self): outpath = os.path.join(WorkingDir, "SASA.h5") os.system('CalculateProjectSASA.py -o %s -p %s' % (outpath, ProjectFn) ) r0 = io.loadh(os.path.join( ReferenceDir, "SASA.h5" ), 'Data') r1 = io.loadh(os.path.join( WorkingDir, "SASA.h5" ), 'arr_0') npt.assert_array_almost_equal(r0,r1, err_msg="Error: Project SASAs disagree!")
def test_j_PCCA(self): TC = scipy.io.mmread(os.path.join(WorkingDir,"Data", "tProb.mtx")) A = io.loadh(os.path.join(WorkingDir,"Data", "Assignments.Fixed.h5"), 'arr_0') PCCA.run_pcca(NumMacroStates, A, TC, os.path.join(WorkingDir, 'Data')) mm = np.loadtxt(os.path.join(WorkingDir, "Data", "MacroMapping.dat"),'int') mm_r = np.loadtxt(os.path.join(ReferenceDir, "Data", "MacroMapping.dat"),'int') ma = io.loadh(os.path.join(WorkingDir, "Data", "MacroAssignments.h5"), 'arr_0') ma_r = io.loadh(os.path.join(ReferenceDir, "Data", "MacroAssignments.h5"), 'Data') num_macro = NumMacroStates permutation_mapping = np.zeros(num_macro,'int') #The order of macrostates might be different between the reference and new lumping. #We therefore find a permutation to match them. for i in range(num_macro): j = np.where(mm==i)[0][0] permutation_mapping[i] = mm_r[j] mm_permuted = permutation_mapping[mm] MSMLib.ApplyMappingToAssignments(ma,permutation_mapping) npt.assert_array_almost_equal(mm_permuted, mm_r) npt.assert_array_almost_equal(ma, ma_r)
def test_j_PCCA(self): TC = scipy.io.mmread(os.path.join(WorkingDir, "Data", "tProb.mtx")) A = io.loadh(os.path.join(WorkingDir, "Data", "Assignments.Fixed.h5"), 'arr_0') PCCA.run_pcca(NumMacroStates, A, TC, os.path.join(WorkingDir, 'Data')) mm = np.loadtxt(os.path.join(WorkingDir, "Data", "MacroMapping.dat"), 'int') mm_r = np.loadtxt( os.path.join(ReferenceDir, "Data", "MacroMapping.dat"), 'int') ma = io.loadh(os.path.join(WorkingDir, "Data", "MacroAssignments.h5"), 'arr_0') ma_r = io.loadh( os.path.join(ReferenceDir, "Data", "MacroAssignments.h5"), 'Data') num_macro = NumMacroStates permutation_mapping = np.zeros(num_macro, 'int') #The order of macrostates might be different between the reference and new lumping. #We therefore find a permutation to match them. for i in range(num_macro): j = np.where(mm == i)[0][0] permutation_mapping[i] = mm_r[j] mm_permuted = permutation_mapping[mm] MSMLib.ApplyMappingToAssignments(ma, permutation_mapping) npt.assert_array_almost_equal(mm_permuted, mm_r) npt.assert_array_almost_equal(ma, ma_r)
def test_flux(self): flux = tpt.calculate_fluxes(self.sources, self.sinks, self.tprob) flux_ref = io.loadh(os.path.join(self.tpt_ref_dir,"flux.h5"), 'Data') npt.assert_array_almost_equal(flux.toarray(), flux_ref) net_flux = tpt.calculate_net_fluxes(self.sources, self.sinks, self.tprob) net_flux_ref = io.loadh(os.path.join(self.tpt_ref_dir,"net_flux.h5"), 'Data') npt.assert_array_almost_equal(net_flux.toarray(), net_flux_ref)
def test_l_CalculateProjectSASA(self): outpath = os.path.join(WorkingDir, "SASA.h5") os.system('CalculateProjectSASA.py -o %s -p %s' % (outpath, ProjectFn)) r0 = io.loadh(os.path.join(ReferenceDir, "SASA.h5"), 'Data') r1 = io.loadh(os.path.join(WorkingDir, "SASA.h5"), 'arr_0') npt.assert_array_almost_equal(r0, r1, err_msg="Error: Project SASAs disagree!")
def test_flux(self): flux = tpt.calculate_fluxes(self.sources, self.sinks, self.tprob) flux_ref = io.loadh(tpt_get("flux.h5"), 'Data') npt.assert_array_almost_equal(flux.toarray(), flux_ref) net_flux = tpt.calculate_net_fluxes(self.sources, self.sinks, self.tprob) net_flux_ref = io.loadh(tpt_get("net_flux.h5"), 'Data') npt.assert_array_almost_equal(net_flux.toarray(), net_flux_ref)
def test_m_DoTPT(self): T = scipy.io.mmread(os.path.join(ReferenceDir, "Data", "tProb.mtx")) sources = [0] sinks = [70] script_out = DoTPT.run(T, sources, sinks) committors_ref = io.loadh(os.path.join(ReferenceDir, "transition_path_theory_reference", "committors.h5"), 'Data') net_flux_ref = io.loadh(os.path.join(ReferenceDir, "transition_path_theory_reference", "net_flux.h5"), 'Data') npt.assert_array_almost_equal(script_out[0], committors_ref) npt.assert_array_almost_equal(script_out[1].toarray(), net_flux_ref)
def test_h_CalculateClusterRadii(self): #args = ("Data/Assignments.h5", "Data/Assignments.h5.distances", MinState,MaxState) #Note this one RETURNS a value, not saves it to disk. cr = CalculateClusterRadii.main(io.loadh("Data/Assignments.h5", 'arr_0'), io.loadh("Data/Assignments.h5.distances", 'arr_0')) #recall that this one bundles stuff #time.sleep(10) # we have to wait a little to get results cr_r = np.loadtxt(ReferenceDir +"/ClusterRadii.dat") npt.assert_array_almost_equal(cr, cr_r)
def test_path_calculations(self): path_output = tpt.find_top_paths(self.sources, self.sinks, self.tprob) paths_ref = io.loadh( tpt_get("dijkstra_paths.h5"), 'Data') fluxes_ref = io.loadh( tpt_get("dijkstra_fluxes.h5"), 'Data') bottlenecks_ref = io.loadh( tpt_get("dijkstra_bottlenecks.h5"), 'Data') #npt.assert_array_almost_equal(path_output[0], paths_ref) npt.assert_array_almost_equal(path_output[1], bottlenecks_ref) npt.assert_array_almost_equal(path_output[2], fluxes_ref)
def test_n_FindPaths(self): tprob = scipy.io.mmread(os.path.join(ReferenceDir, "Data", "tProb.mtx")) sources = [0] sinks = [70] paths, bottlenecks, fluxes = FindPaths.run(tprob, sources, sinks, 10) # paths are hard to test due to type issues, adding later --TJL bottlenecks_ref = io.loadh(os.path.join(ReferenceDir, "transition_path_theory_reference", "dijkstra_bottlenecks.h5"), 'Data') fluxes_ref = io.loadh(os.path.join(ReferenceDir, "transition_path_theory_reference", "dijkstra_fluxes.h5"), 'Data') npt.assert_array_almost_equal(bottlenecks, bottlenecks_ref) npt.assert_array_almost_equal(fluxes, fluxes_ref)
def test_path_calculations(self): path_output = tpt.find_top_paths(self.sources, self.sinks, self.tprob) paths_ref = io.loadh(os.path.join(self.tpt_ref_dir,"dijkstra_paths.h5"), 'Data') fluxes_ref = io.loadh(os.path.join(self.tpt_ref_dir,"dijkstra_fluxes.h5"), 'Data') bottlenecks_ref = io.loadh(os.path.join(self.tpt_ref_dir,"dijkstra_bottlenecks.h5"), 'Data') #npt.assert_array_almost_equal(path_output[0], paths_ref) npt.assert_array_almost_equal(path_output[1], bottlenecks_ref) npt.assert_array_almost_equal(path_output[2], fluxes_ref)
def entry_point(): args = parser.parse_args() # load args try: assignments = io.loadh(args.assignments, 'arr_0') except KeyError: assignments = io.loadh(args.assignments, 'Data') tProb = scipy.io.mmread(args.tProb) # workaround for arglib funniness? if args.do_minimization in ["False", "0"]: args.do_minimization = False else: args.do_minimization = True if args.algorithm == 'PCCA': MacroAssignmentsFn = os.path.join(args.output_dir, "MacroAssignments.h5") MacroMapFn = os.path.join(args.output_dir, "MacroMapping.dat") arglib.die_if_path_exists([MacroAssignmentsFn, MacroMapFn]) MAP, assignments = run_pcca(args.num_macrostates, assignments, tProb) np.savetxt(MacroMapFn, MAP, "%d") io.saveh(MacroAssignmentsFn, assignments) logger.info("Saved output to: %s, %s", MacroAssignmentsFn, MacroMapFn) elif args.algorithm == 'PCCA+': MacroAssignmentsFn = os.path.join(args.output_dir, "MacroAssignments.h5") MacroMapFn = os.path.join(args.output_dir, "MacroMapping.dat") ChiFn = os.path.join(args.output_dir, 'Chi.dat') AFn = os.path.join(args.output_dir, 'A.dat') arglib.die_if_path_exists([MacroAssignmentsFn, MacroMapFn, ChiFn, AFn]) chi, A, MAP, assignments = run_pcca_plus( args.num_macrostates, assignments, tProb, args.flux_cutoff, objective_function=args.objective_function, do_minimization=args.do_minimization) np.savetxt(ChiFn, chi) np.savetxt(AFn, A) np.savetxt(MacroMapFn, MAP, "%d") io.saveh(MacroAssignmentsFn, assignments) logger.info('Saved output to: %s, %s, %s, %s', ChiFn, AFn, MacroMapFn, MacroAssignmentsFn) else: raise Exception()
def test_h_CalculateClusterRadii(self): #args = ("Data/Assignments.h5", "Data/Assignments.h5.distances", MinState,MaxState) #Note this one RETURNS a value, not saves it to disk. cr = CalculateClusterRadii.main( io.loadh("Data/Assignments.h5", 'arr_0'), io.loadh("Data/Assignments.h5.distances", 'arr_0')) #recall that this one bundles stuff #time.sleep(10) # we have to wait a little to get results cr_r = np.loadtxt(ReferenceDir + "/ClusterRadii.dat") npt.assert_array_almost_equal(cr, cr_r)
def test_path_calculations(self): path_output = tpt.find_top_paths(self.sources, self.sinks, self.tprob) paths_ref = io.loadh( tpt_get("dijkstra_paths.h5"), 'Data') fluxes_ref = io.loadh( tpt_get("dijkstra_fluxes.h5"), 'Data') bottlenecks_ref = io.loadh( tpt_get("dijkstra_bottlenecks.h5"), 'Data') for i in range(len(paths_ref)): npt.assert_array_almost_equal(path_output[0][i], paths_ref[i]) npt.assert_array_almost_equal(path_output[1], bottlenecks_ref) npt.assert_array_almost_equal(path_output[2], fluxes_ref)
def test_k_CalculateProjectRMSD(self): #C1 = Conformation.load_from_pdb(PDBFn) #P1 = Project.load_from_hdf(ProjectFn) #AInd=np.loadtxt("AtomIndices.dat", int) #CalculateProjectRMSD.run(C1,P1,AInd,"RMSD.h5") outpath = os.path.join(WorkingDir, "RMSD.h5") os.system('CalculateProjectDistance.py -s %s -o %s -p %s rmsd -a %s' % (PDBFn, outpath, ProjectFn, "AtomIndices.dat") ) r0 = io.loadh(ReferenceDir+"/RMSD.h5", 'Data') r1 = io.loadh(WorkingDir+"/RMSD.h5", 'arr_0') npt.assert_array_almost_equal(r0,r1, err_msg="Error: Project RMSDs disagree!")
def test_d_Assign(self): cmd = "Assign.py -p %s -g %s -o %s rmsd -a %s" % (ProjectFn, GensPath, "./Data", "AtomIndices.dat") os.system(cmd) Assignments = io.loadh("./Data/Assignments.h5", 'arr_0') AssignmentsRMSD = io.loadh("./Data/Assignments.h5.distances", 'arr_0') r_Assignments = io.loadh(ReferenceDir +"/Data/Assignments.h5", 'Data') r_AssignmentsRMSD = io.loadh(ReferenceDir +"/Data/Assignments.h5.RMSD", 'Data') npt.assert_array_equal(Assignments, r_Assignments) npt.assert_array_equal(AssignmentsRMSD, r_AssignmentsRMSD)
def entry_point(): args = parser.parse_args() try: assignments = io.loadh(args.assignments, 'arr_0') except KeyError: assignments = io.loadh(args.assignments, 'Data') if args.mapping != "None": args.mapping = np.array(np.loadtxt(args.mapping), dtype=int) run(args.lagtime, assignments, args.symmetrize, args.mapping, args.trim, args.output_dir)
def test_mfpt(self): mfpt = tpt.calculate_mfpt(self.sinks, self.tprob, lag_time=self.lag_time) mfpt_ref = io.loadh(os.path.join(self.tpt_ref_dir, "mfpt.h5"), 'Data') npt.assert_array_almost_equal(mfpt, mfpt_ref) ensemble_mfpt = tpt.calculate_ensemble_mfpt(self.sources, self.sinks, self.tprob, self.lag_time) ensemble_mfpt_ref = io.loadh(os.path.join(self.tpt_ref_dir, "ensemble_mfpt.h5"), 'Data') npt.assert_array_almost_equal(ensemble_mfpt, ensemble_mfpt_ref) all_to_all_mfpt = tpt.calculate_all_to_all_mfpt(self.tprob) all_to_all_mfpt_ref = io.loadh(os.path.join(self.tpt_ref_dir, "all_to_all_mfpt.h5"), 'Data') npt.assert_array_almost_equal(all_to_all_mfpt, all_to_all_mfpt_ref)
def test_mfpt(self): mfpt = tpt.calculate_mfpt(self.sinks, self.tprob, lag_time=self.lag_time) mfpt_ref = io.loadh( tpt_get("mfpt.h5"), 'Data') npt.assert_array_almost_equal(mfpt, mfpt_ref) ensemble_mfpt = tpt.calculate_ensemble_mfpt(self.sources, self.sinks, self.tprob, self.lag_time) ensemble_mfpt_ref = io.loadh( tpt_get("ensemble_mfpt.h5"), 'Data') npt.assert_array_almost_equal(ensemble_mfpt, ensemble_mfpt_ref) all_to_all_mfpt = tpt.calculate_all_to_all_mfpt(self.tprob) all_to_all_mfpt_ref = io.loadh( tpt_get("all_to_all_mfpt.h5"), 'Data') npt.assert_array_almost_equal(all_to_all_mfpt, all_to_all_mfpt_ref)
def test_m_DoTPT(self): T = scipy.io.mmread(os.path.join(ReferenceDir, "Data", "tProb.mtx")) sources = [0] sinks = [70] script_out = DoTPT.run(T, sources, sinks) committors_ref = io.loadh( os.path.join(ReferenceDir, "transition_path_theory_reference", "committors.h5"), 'Data') net_flux_ref = io.loadh( os.path.join(ReferenceDir, "transition_path_theory_reference", "net_flux.h5"), 'Data') npt.assert_array_almost_equal(script_out[0], committors_ref) npt.assert_array_almost_equal(script_out[1].toarray(), net_flux_ref)
def test_k_CalculateProjectRMSD(self): #C1 = Conformation.load_from_pdb(PDBFn) #P1 = Project.load_from_hdf(ProjectFn) #AInd=np.loadtxt("AtomIndices.dat", int) #CalculateProjectRMSD.run(C1,P1,AInd,"RMSD.h5") outpath = os.path.join(WorkingDir, "RMSD.h5") os.system('CalculateProjectDistance.py -s %s -o %s -p %s rmsd -a %s' % (PDBFn, outpath, ProjectFn, "AtomIndices.dat")) r0 = io.loadh(ReferenceDir + "/RMSD.h5", 'Data') r1 = io.loadh(WorkingDir + "/RMSD.h5", 'arr_0') npt.assert_array_almost_equal(r0, r1, err_msg="Error: Project RMSDs disagree!")
def test_d_Assign(self): cmd = "Assign.py -p %s -g %s -o %s rmsd -a %s" % ( ProjectFn, GensPath, "./Data", "AtomIndices.dat") os.system(cmd) Assignments = io.loadh("./Data/Assignments.h5", 'arr_0') AssignmentsRMSD = io.loadh("./Data/Assignments.h5.distances", 'arr_0') r_Assignments = io.loadh(ReferenceDir + "/Data/Assignments.h5", 'Data') r_AssignmentsRMSD = io.loadh( ReferenceDir + "/Data/Assignments.h5.RMSD", 'Data') npt.assert_array_equal(Assignments, r_Assignments) npt.assert_array_equal(AssignmentsRMSD, r_AssignmentsRMSD)
def entry_point(): args = parser.parse_args() # load args try: assignments = io.loadh(args.assignments, 'arr_0') except KeyError: assignments = io.loadh(args.assignments, 'Data') tProb = scipy.io.mmread(args.tProb) # workaround for arglib funniness? if args.do_minimization in ["False", "0"]: args.do_minimization = False else: args.do_minimization = True if args.algorithm == 'PCCA': MacroAssignmentsFn = os.path.join( args.output_dir, "MacroAssignments.h5") MacroMapFn = os.path.join(args.output_dir, "MacroMapping.dat") arglib.die_if_path_exists([MacroAssignmentsFn, MacroMapFn]) MAP, assignments = run_pcca(args.num_macrostates, assignments, tProb) np.savetxt(MacroMapFn, MAP, "%d") io.saveh(MacroAssignmentsFn, assignments) logger.info("Saved output to: %s, %s", MacroAssignmentsFn, MacroMapFn) elif args.algorithm == 'PCCA+': MacroAssignmentsFn = os.path.join( args.output_dir, "MacroAssignments.h5") MacroMapFn = os.path.join(args.output_dir, "MacroMapping.dat") ChiFn = os.path.join(args.output_dir, 'Chi.dat') AFn = os.path.join(args.output_dir, 'A.dat') arglib.die_if_path_exists([MacroAssignmentsFn, MacroMapFn, ChiFn, AFn]) chi, A, MAP, assignments = run_pcca_plus(args.num_macrostates, assignments, tProb, args.flux_cutoff, objective_function=args.objective_function, do_minimization=args.do_minimization) np.savetxt(ChiFn, chi) np.savetxt(AFn, A) np.savetxt(MacroMapFn, MAP, "%d") io.saveh(MacroAssignmentsFn, assignments) logger.info('Saved output to: %s, %s, %s, %s', ChiFn, AFn, MacroMapFn, MacroAssignmentsFn) else: raise Exception()
def test_n_FindPaths(self): tprob = scipy.io.mmread(os.path.join(ReferenceDir, "Data", "tProb.mtx")) sources = [0] sinks = [70] paths, bottlenecks, fluxes = FindPaths.run(tprob, sources, sinks, 10) # paths are hard to test due to type issues, adding later --TJL bottlenecks_ref = io.loadh( os.path.join(ReferenceDir, "transition_path_theory_reference", "dijkstra_bottlenecks.h5"), 'Data') fluxes_ref = io.loadh( os.path.join(ReferenceDir, "transition_path_theory_reference", "dijkstra_fluxes.h5"), 'Data') npt.assert_array_almost_equal(bottlenecks, bottlenecks_ref) npt.assert_array_almost_equal(fluxes, fluxes_ref)
def entry_point(): import matplotlib args = parser.parse_args() try: assignments = io.loadh(args.assignments, 'arr_0') except KeyError: assignments = io.loadh(args.assignments, 'Data') K = run(assignments) T = scipy.linalg.matfuncs.expm(K) np.savetxt(os.path.join(args.output_dir, "Rate.dat"), K) scipy.io.mmwrite(os.path.join(args.output_dir, "tProb.mtx.tl"), T)
def test_e_BuildMSM(self): Assignments = io.loadh("Data/Assignments.h5", 'arr_0') BuildMSM.run(Lagtime, Assignments, Symmetrize="MLE") # Test mapping m = np.loadtxt("Data/Mapping.dat") r_m = np.loadtxt(ReferenceDir +"/Data/Mapping.dat") npt.assert_array_almost_equal(m, r_m, err_msg="Mapping.dat incorrect") # Test populations p = np.loadtxt("Data/Populations.dat") r_p = np.loadtxt(ReferenceDir +"/Data/Populations.dat") npt.assert_array_almost_equal(p, r_p, err_msg="Populations.dat incorrect") # Test counts matrix C = scipy.io.mmread("Data/tCounts.mtx") r_C = scipy.io.mmread(ReferenceDir +"/Data/tCounts.mtx") D=(C-r_C).data Z=0.*D D /= r_C.sum()#KAB 4-5-2012. We want the normalized counts to agree at 7 decimals #normalizing makes this test no longer depend on an arbitrary scaling factor (the total number of counts) #the relative number of counts in the current and reference models DOES matter, however. npt.assert_array_almost_equal(D,Z, err_msg="tCounts.mtx incorrect") # Test transition matrix T = scipy.io.mmread("Data/tProb.mtx") r_T = scipy.io.mmread(ReferenceDir +"/Data/tProb.mtx") D=(T-r_T).data Z=0.*D npt.assert_array_almost_equal(D,Z, err_msg="tProb.mtx incorrect")
def test_e_BuildMSM(self): Assignments = io.loadh("Data/Assignments.h5", 'arr_0') BuildMSM.run(Lagtime, Assignments, Symmetrize="MLE") # Test mapping m = np.loadtxt("Data/Mapping.dat") r_m = np.loadtxt(ReferenceDir + "/Data/Mapping.dat") npt.assert_array_almost_equal(m, r_m, err_msg="Mapping.dat incorrect") # Test populations p = np.loadtxt("Data/Populations.dat") r_p = np.loadtxt(ReferenceDir + "/Data/Populations.dat") npt.assert_array_almost_equal(p, r_p, err_msg="Populations.dat incorrect") # Test counts matrix C = scipy.io.mmread("Data/tCounts.mtx") r_C = scipy.io.mmread(ReferenceDir + "/Data/tCounts.mtx") D = (C - r_C).data Z = 0. * D D /= r_C.sum( ) #KAB 4-5-2012. We want the normalized counts to agree at 7 decimals #normalizing makes this test no longer depend on an arbitrary scaling factor (the total number of counts) #the relative number of counts in the current and reference models DOES matter, however. npt.assert_array_almost_equal(D, Z, err_msg="tCounts.mtx incorrect") # Test transition matrix T = scipy.io.mmread("Data/tProb.mtx") r_T = scipy.io.mmread(ReferenceDir + "/Data/tProb.mtx") D = (T - r_T).data Z = 0. * D npt.assert_array_almost_equal(D, Z, err_msg="tProb.mtx incorrect")
def test_g_GetRandomConfs(self): P1 = Project.load_from(ProjectFn) Assignments = io.loadh("Data/Assignments.Fixed.h5", 'arr_0') # make a predictable stream of random numbers by seeding the RNG with 42 random_source = np.random.RandomState(42) randomconfs = GetRandomConfs.run(P1, Assignments, NumRandomConformations, random_source) reference = Trajectory.load_trajectory_file(os.path.join(ReferenceDir, "2RandomConfs.lh5")) self.assert_trajectories_equal(reference, randomconfs)
def load(filename): # delay these imports, since this module is loaded in a bunch # of places but not necessarily used import scipy.io from msmbuilder import Trajectory, io, Project # the filename extension ext = os.path.splitext(filename)[1] # load trajectories if ext in ['.lh5', '.pdb']: val = Trajectory.load_trajectory_file(filename) # load flat text files elif 'AtomIndices.dat' in filename: # try loading AtomIndices first, because the default for loadtxt # is to use floats val = np.loadtxt(filename, dtype=np.int) elif ext in ['.dat']: # try loading general .dats with floats val = np.loadtxt(filename) # short circuit opening ProjectInfo elif ('ProjectInfo.yaml' in filename) or ('ProjectInfo.h5' in filename): val = Project.load_from(filename) # load with serializer files that end with .h5, .hdf or .h5.distances elif ext in ['.h5', '.hdf']: val = io.loadh(filename, deferred=False) elif filename.endswith('.h5.distances'): val = io.loadh(filename, deferred=False) # load matricies elif ext in ['.mtx']: val = scipy.io.mmread(filename) else: raise TypeError("I could not infer how to load this file. You " "can either request load=False, or perhaps add more logic to " "the load heuristics in this class: %s" % filename) return val
def test_i_CalculateRMSD(self): #C1 = Conformation.Conformation.load_from_pdb(PDBFn) #Traj = Trajectory.load_trajectory_file("Data/Gens.lh5") #AInd = np.loadtxt("AtomIndices.dat", int) #CalculateRMSD.run(C1, Traj, AInd, "RMSD.dat") outpath = os.path.join(WorkingDir, "RMSD_Gens.h5") os.system('CalculateProjectDistance.py -s %s -t %s -o %s rmsd -a %s' % (PDBFn, "Data/Gens.lh5", outpath, "AtomIndices.dat" ) ) cr = io.loadh(outpath, 'arr_0') cr_r = np.loadtxt(os.path.join(ReferenceDir, "RMSD.dat")) npt.assert_array_almost_equal(cr, cr_r)
def test_g_GetRandomConfs(self): P1 = Project.load_from(ProjectFn) Assignments = io.loadh("Data/Assignments.Fixed.h5", 'arr_0') # make a predictable stream of random numbers by seeding the RNG with 42 random_source = np.random.RandomState(42) randomconfs = GetRandomConfs.run(P1, Assignments, NumRandomConformations, random_source) reference = Trajectory.load_trajectory_file( os.path.join(ReferenceDir, "2RandomConfs.lh5")) self.assert_trajectories_equal(reference, randomconfs)
def test_multi_state_path_calculations(self): path_output = FindPaths.run(self.tprob, self.multi_sources, self.multi_sinks, self.num_paths) path_result_ref = io.loadh(tpt_get("many_state/Paths.h5")) paths_ref = path_result_ref['Paths'] bottlenecks_ref = path_result_ref['Bottlenecks'] fluxes_ref = path_result_ref['fluxes'] npt.assert_array_almost_equal(path_output[0], paths_ref) npt.assert_array_almost_equal(path_output[1], bottlenecks_ref) npt.assert_array_almost_equal(path_output[2], fluxes_ref)
def test_i_CalculateRMSD(self): #C1 = Conformation.Conformation.load_from_pdb(PDBFn) #Traj = Trajectory.load_trajectory_file("Data/Gens.lh5") #AInd = np.loadtxt("AtomIndices.dat", int) #CalculateRMSD.run(C1, Traj, AInd, "RMSD.dat") outpath = os.path.join(WorkingDir, "RMSD_Gens.h5") os.system('CalculateProjectDistance.py -s %s -t %s -o %s rmsd -a %s' % (PDBFn, "Data/Gens.lh5", outpath, "AtomIndices.dat")) cr = io.loadh(outpath, 'arr_0') cr_r = np.loadtxt(os.path.join(ReferenceDir, "RMSD.dat")) npt.assert_array_almost_equal(cr, cr_r)
def test_TP_time(self): tp_time = tpt.calculate_avg_TP_time(self.sources, self.sinks, self.tprob, self.lag_time) tp_time_ref = io.loadh( tpt_get("tp_time.h5"), 'Data') npt.assert_array_almost_equal(tp_time, tp_time_ref)
def test_committors(self): Q = tpt.calculate_committors(self.sources, self.sinks, self.tprob) Q_ref = io.loadh(tpt_get("committors.h5"), 'Data') npt.assert_array_almost_equal(Q, Q_ref)
def test_committors(self): Q = tpt.calculate_committors(self.sources, self.sinks, self.tprob) Q_ref = io.loadh(os.path.join(self.tpt_ref_dir, "committors.h5"), 'Data') npt.assert_array_almost_equal(Q, Q_ref)
K=K0.copy() * float(LagTime) C0=MSMLib.get_count_matrix_from_assignments(assignments, lag_time=LagTime).toarray() Counts=C0.sum(1) Counts/=LagTime X2=SCRE.MaximizeRateLikelihood(X,M,populations,C0,K) K=SCRE.ConstructRateFromParams(X2,M,populations,K) K/=(LagTime) KList.append(K) counts_list.append(Counts) KList=np.array(KList) SCRE.PlotRates(KList,lagtime_list,counts_list) return KList run = interactive_scre if __name__ == "__main__": args = parser.parse_args() try: assignments = io.loadh(args.assignments, 'arr_0') except KeyError: assignments = io.loadh(args.assignments, 'Data') K = run(assignments) T = scipy.linalg.matfuncs.expm(K) np.savetxt(os.path.join(args.output_dir, "Rate.dat"), K) scipy.io.mmwrite(os.path.join(args.output_dir, "tProb.mtx.tl"), T)
def test_TP_time(self): tp_time = tpt.calculate_avg_TP_time(self.sources, self.sinks, self.tprob, self.lag_time) tp_time_ref = io.loadh(os.path.join(self.tpt_ref_dir, "tp_time.h5"), 'Data') npt.assert_array_almost_equal(tp_time, tp_time_ref)