Exemplo n.º 1
0
    def test_l_CalculateProjectSASA(self):
        outpath = os.path.join(WorkingDir, "SASA.h5")
        os.system('CalculateProjectSASA.py -o %s -p %s' % (outpath, ProjectFn) )

        r0 = io.loadh(os.path.join( ReferenceDir, "SASA.h5" ), 'Data')
        r1 = io.loadh(os.path.join( WorkingDir, "SASA.h5" ), 'arr_0')
        npt.assert_array_almost_equal(r0,r1, err_msg="Error: Project SASAs disagree!")
Exemplo n.º 2
0
    def test_j_PCCA(self):

        TC = scipy.io.mmread(os.path.join(WorkingDir,"Data", "tProb.mtx"))
        A  = io.loadh(os.path.join(WorkingDir,"Data", "Assignments.Fixed.h5"), 'arr_0')
        PCCA.run_pcca(NumMacroStates, A, TC, os.path.join(WorkingDir, 'Data'))

        mm   = np.loadtxt(os.path.join(WorkingDir, "Data", "MacroMapping.dat"),'int')
        mm_r = np.loadtxt(os.path.join(ReferenceDir, "Data", "MacroMapping.dat"),'int')

        ma   = io.loadh(os.path.join(WorkingDir, "Data", "MacroAssignments.h5"), 'arr_0')
        ma_r = io.loadh(os.path.join(ReferenceDir, "Data", "MacroAssignments.h5"), 'Data')

        num_macro = NumMacroStates
        permutation_mapping = np.zeros(num_macro,'int')
        #The order of macrostates might be different between the reference and new lumping.
        #We therefore find a permutation to match them.
        for i in range(num_macro):
            j = np.where(mm==i)[0][0]
            permutation_mapping[i] = mm_r[j]

        mm_permuted = permutation_mapping[mm]
        MSMLib.ApplyMappingToAssignments(ma,permutation_mapping)
        
        npt.assert_array_almost_equal(mm_permuted, mm_r)
        npt.assert_array_almost_equal(ma, ma_r)
Exemplo n.º 3
0
    def test_j_PCCA(self):

        TC = scipy.io.mmread(os.path.join(WorkingDir, "Data", "tProb.mtx"))
        A = io.loadh(os.path.join(WorkingDir, "Data", "Assignments.Fixed.h5"),
                     'arr_0')
        PCCA.run_pcca(NumMacroStates, A, TC, os.path.join(WorkingDir, 'Data'))

        mm = np.loadtxt(os.path.join(WorkingDir, "Data", "MacroMapping.dat"),
                        'int')
        mm_r = np.loadtxt(
            os.path.join(ReferenceDir, "Data", "MacroMapping.dat"), 'int')

        ma = io.loadh(os.path.join(WorkingDir, "Data", "MacroAssignments.h5"),
                      'arr_0')
        ma_r = io.loadh(
            os.path.join(ReferenceDir, "Data", "MacroAssignments.h5"), 'Data')

        num_macro = NumMacroStates
        permutation_mapping = np.zeros(num_macro, 'int')
        #The order of macrostates might be different between the reference and new lumping.
        #We therefore find a permutation to match them.
        for i in range(num_macro):
            j = np.where(mm == i)[0][0]
            permutation_mapping[i] = mm_r[j]

        mm_permuted = permutation_mapping[mm]
        MSMLib.ApplyMappingToAssignments(ma, permutation_mapping)

        npt.assert_array_almost_equal(mm_permuted, mm_r)
        npt.assert_array_almost_equal(ma, ma_r)
Exemplo n.º 4
0
 def test_flux(self):
     
     flux = tpt.calculate_fluxes(self.sources, self.sinks, self.tprob)
     flux_ref = io.loadh(os.path.join(self.tpt_ref_dir,"flux.h5"), 'Data')
     npt.assert_array_almost_equal(flux.toarray(), flux_ref)
     
     net_flux = tpt.calculate_net_fluxes(self.sources, self.sinks, self.tprob)
     net_flux_ref = io.loadh(os.path.join(self.tpt_ref_dir,"net_flux.h5"), 'Data')
     npt.assert_array_almost_equal(net_flux.toarray(), net_flux_ref)
Exemplo n.º 5
0
    def test_l_CalculateProjectSASA(self):
        outpath = os.path.join(WorkingDir, "SASA.h5")
        os.system('CalculateProjectSASA.py -o %s -p %s' % (outpath, ProjectFn))

        r0 = io.loadh(os.path.join(ReferenceDir, "SASA.h5"), 'Data')
        r1 = io.loadh(os.path.join(WorkingDir, "SASA.h5"), 'arr_0')
        npt.assert_array_almost_equal(r0,
                                      r1,
                                      err_msg="Error: Project SASAs disagree!")
Exemplo n.º 6
0
 def test_flux(self):
     
     flux = tpt.calculate_fluxes(self.sources, self.sinks, self.tprob)
     flux_ref = io.loadh(tpt_get("flux.h5"), 'Data')
     npt.assert_array_almost_equal(flux.toarray(), flux_ref)
     
     net_flux = tpt.calculate_net_fluxes(self.sources, self.sinks, self.tprob)
     net_flux_ref = io.loadh(tpt_get("net_flux.h5"), 'Data')
     npt.assert_array_almost_equal(net_flux.toarray(), net_flux_ref)
Exemplo n.º 7
0
 def test_m_DoTPT(self): 
     T = scipy.io.mmread(os.path.join(ReferenceDir, "Data", "tProb.mtx"))
     sources = [0]
     sinks = [70]
     script_out = DoTPT.run(T, sources, sinks)
     committors_ref = io.loadh(os.path.join(ReferenceDir, "transition_path_theory_reference", "committors.h5"), 'Data')
     net_flux_ref = io.loadh(os.path.join(ReferenceDir, "transition_path_theory_reference", "net_flux.h5"), 'Data')
     npt.assert_array_almost_equal(script_out[0], committors_ref)
     npt.assert_array_almost_equal(script_out[1].toarray(), net_flux_ref)
Exemplo n.º 8
0
    def test_h_CalculateClusterRadii(self):

        #args = ("Data/Assignments.h5", "Data/Assignments.h5.distances", MinState,MaxState)
        #Note this one RETURNS a value, not saves it to disk.
        cr = CalculateClusterRadii.main(io.loadh("Data/Assignments.h5", 'arr_0'),
                                        io.loadh("Data/Assignments.h5.distances", 'arr_0'))
        #recall that this one bundles stuff
        #time.sleep(10) # we have to wait a little to get results
        cr_r = np.loadtxt(ReferenceDir +"/ClusterRadii.dat")
        npt.assert_array_almost_equal(cr, cr_r)
Exemplo n.º 9
0
    def test_path_calculations(self):
        path_output = tpt.find_top_paths(self.sources, self.sinks, self.tprob)

        paths_ref = io.loadh( tpt_get("dijkstra_paths.h5"), 'Data')
        fluxes_ref = io.loadh( tpt_get("dijkstra_fluxes.h5"), 'Data')
        bottlenecks_ref = io.loadh( tpt_get("dijkstra_bottlenecks.h5"), 'Data')

        #npt.assert_array_almost_equal(path_output[0], paths_ref)
        npt.assert_array_almost_equal(path_output[1], bottlenecks_ref)
        npt.assert_array_almost_equal(path_output[2], fluxes_ref)
Exemplo n.º 10
0
 def test_n_FindPaths(self):
     tprob = scipy.io.mmread(os.path.join(ReferenceDir, "Data", "tProb.mtx"))
     sources = [0]
     sinks = [70]
     paths, bottlenecks, fluxes = FindPaths.run(tprob, sources, sinks, 10)
     # paths are hard to test due to type issues, adding later --TJL
     bottlenecks_ref = io.loadh(os.path.join(ReferenceDir, "transition_path_theory_reference", "dijkstra_bottlenecks.h5"), 'Data')
     fluxes_ref = io.loadh(os.path.join(ReferenceDir, "transition_path_theory_reference", "dijkstra_fluxes.h5"), 'Data')
     npt.assert_array_almost_equal(bottlenecks, bottlenecks_ref)
     npt.assert_array_almost_equal(fluxes, fluxes_ref)
Exemplo n.º 11
0
    def test_path_calculations(self):
        path_output = tpt.find_top_paths(self.sources, self.sinks, self.tprob)

        paths_ref = io.loadh(os.path.join(self.tpt_ref_dir,"dijkstra_paths.h5"), 'Data')
        fluxes_ref = io.loadh(os.path.join(self.tpt_ref_dir,"dijkstra_fluxes.h5"), 'Data')
        bottlenecks_ref = io.loadh(os.path.join(self.tpt_ref_dir,"dijkstra_bottlenecks.h5"), 'Data')

        #npt.assert_array_almost_equal(path_output[0], paths_ref)
        npt.assert_array_almost_equal(path_output[1], bottlenecks_ref)
        npt.assert_array_almost_equal(path_output[2], fluxes_ref)
Exemplo n.º 12
0
def entry_point():
    args = parser.parse_args()

    # load args
    try:
        assignments = io.loadh(args.assignments, 'arr_0')
    except KeyError:
        assignments = io.loadh(args.assignments, 'Data')

    tProb = scipy.io.mmread(args.tProb)

    # workaround for arglib funniness?
    if args.do_minimization in ["False", "0"]:
        args.do_minimization = False
    else:
        args.do_minimization = True

    if args.algorithm == 'PCCA':
        MacroAssignmentsFn = os.path.join(args.output_dir,
                                          "MacroAssignments.h5")
        MacroMapFn = os.path.join(args.output_dir, "MacroMapping.dat")
        arglib.die_if_path_exists([MacroAssignmentsFn, MacroMapFn])

        MAP, assignments = run_pcca(args.num_macrostates, assignments, tProb)

        np.savetxt(MacroMapFn, MAP, "%d")
        io.saveh(MacroAssignmentsFn, assignments)
        logger.info("Saved output to: %s, %s", MacroAssignmentsFn, MacroMapFn)

    elif args.algorithm == 'PCCA+':
        MacroAssignmentsFn = os.path.join(args.output_dir,
                                          "MacroAssignments.h5")
        MacroMapFn = os.path.join(args.output_dir, "MacroMapping.dat")
        ChiFn = os.path.join(args.output_dir, 'Chi.dat')
        AFn = os.path.join(args.output_dir, 'A.dat')

        arglib.die_if_path_exists([MacroAssignmentsFn, MacroMapFn, ChiFn, AFn])

        chi, A, MAP, assignments = run_pcca_plus(
            args.num_macrostates,
            assignments,
            tProb,
            args.flux_cutoff,
            objective_function=args.objective_function,
            do_minimization=args.do_minimization)

        np.savetxt(ChiFn, chi)
        np.savetxt(AFn, A)
        np.savetxt(MacroMapFn, MAP, "%d")
        io.saveh(MacroAssignmentsFn, assignments)
        logger.info('Saved output to: %s, %s, %s, %s', ChiFn, AFn, MacroMapFn,
                    MacroAssignmentsFn)
    else:
        raise Exception()
Exemplo n.º 13
0
    def test_h_CalculateClusterRadii(self):

        #args = ("Data/Assignments.h5", "Data/Assignments.h5.distances", MinState,MaxState)
        #Note this one RETURNS a value, not saves it to disk.
        cr = CalculateClusterRadii.main(
            io.loadh("Data/Assignments.h5", 'arr_0'),
            io.loadh("Data/Assignments.h5.distances", 'arr_0'))
        #recall that this one bundles stuff
        #time.sleep(10) # we have to wait a little to get results
        cr_r = np.loadtxt(ReferenceDir + "/ClusterRadii.dat")
        npt.assert_array_almost_equal(cr, cr_r)
Exemplo n.º 14
0
    def test_path_calculations(self):
        path_output = tpt.find_top_paths(self.sources, self.sinks, self.tprob)

        paths_ref = io.loadh( tpt_get("dijkstra_paths.h5"), 'Data')
        fluxes_ref = io.loadh( tpt_get("dijkstra_fluxes.h5"), 'Data')
        bottlenecks_ref = io.loadh( tpt_get("dijkstra_bottlenecks.h5"), 'Data')

        for i in range(len(paths_ref)):
            npt.assert_array_almost_equal(path_output[0][i], paths_ref[i])
        npt.assert_array_almost_equal(path_output[1], bottlenecks_ref)
        npt.assert_array_almost_equal(path_output[2], fluxes_ref)
Exemplo n.º 15
0
 def test_k_CalculateProjectRMSD(self):
     #C1 = Conformation.load_from_pdb(PDBFn)
     #P1 = Project.load_from_hdf(ProjectFn)
     #AInd=np.loadtxt("AtomIndices.dat", int)
     #CalculateProjectRMSD.run(C1,P1,AInd,"RMSD.h5")
     outpath = os.path.join(WorkingDir, "RMSD.h5")
     os.system('CalculateProjectDistance.py -s %s -o %s -p %s rmsd -a %s' % (PDBFn, outpath, ProjectFn, "AtomIndices.dat") )
     
     
     r0 = io.loadh(ReferenceDir+"/RMSD.h5", 'Data')
     r1 = io.loadh(WorkingDir+"/RMSD.h5", 'arr_0')
     npt.assert_array_almost_equal(r0,r1, err_msg="Error: Project RMSDs disagree!")
Exemplo n.º 16
0
 def test_d_Assign(self):
     cmd = "Assign.py -p %s -g %s -o %s rmsd -a %s" % (ProjectFn, GensPath, "./Data", "AtomIndices.dat")
     os.system(cmd)
     
     Assignments       = io.loadh("./Data/Assignments.h5", 'arr_0')
     AssignmentsRMSD   = io.loadh("./Data/Assignments.h5.distances", 'arr_0')
     
     r_Assignments     = io.loadh(ReferenceDir +"/Data/Assignments.h5", 'Data')
     r_AssignmentsRMSD = io.loadh(ReferenceDir +"/Data/Assignments.h5.RMSD", 'Data')
     
     npt.assert_array_equal(Assignments, r_Assignments)
     npt.assert_array_equal(AssignmentsRMSD, r_AssignmentsRMSD)
Exemplo n.º 17
0
def entry_point():
    args = parser.parse_args()
    
    try:
        assignments = io.loadh(args.assignments, 'arr_0')
    except KeyError:
        assignments = io.loadh(args.assignments, 'Data')

    if args.mapping != "None":
        args.mapping = np.array(np.loadtxt(args.mapping), dtype=int)

    run(args.lagtime, assignments, args.symmetrize, args.mapping, args.trim, args.output_dir)
Exemplo n.º 18
0
 def test_mfpt(self):
     
     mfpt = tpt.calculate_mfpt(self.sinks, self.tprob, lag_time=self.lag_time)
     mfpt_ref = io.loadh(os.path.join(self.tpt_ref_dir, "mfpt.h5"), 'Data')
     npt.assert_array_almost_equal(mfpt, mfpt_ref)
     
     ensemble_mfpt = tpt.calculate_ensemble_mfpt(self.sources, self.sinks, self.tprob, self.lag_time)
     ensemble_mfpt_ref = io.loadh(os.path.join(self.tpt_ref_dir, "ensemble_mfpt.h5"), 'Data')
     npt.assert_array_almost_equal(ensemble_mfpt, ensemble_mfpt_ref)
     
     all_to_all_mfpt = tpt.calculate_all_to_all_mfpt(self.tprob)
     all_to_all_mfpt_ref = io.loadh(os.path.join(self.tpt_ref_dir, "all_to_all_mfpt.h5"), 'Data')
     npt.assert_array_almost_equal(all_to_all_mfpt, all_to_all_mfpt_ref)
Exemplo n.º 19
0
def entry_point():
    args = parser.parse_args()

    try:
        assignments = io.loadh(args.assignments, 'arr_0')
    except KeyError:
        assignments = io.loadh(args.assignments, 'Data')

    if args.mapping != "None":
        args.mapping = np.array(np.loadtxt(args.mapping), dtype=int)

    run(args.lagtime, assignments, args.symmetrize, args.mapping, args.trim,
        args.output_dir)
Exemplo n.º 20
0
 def test_mfpt(self):
     
     mfpt = tpt.calculate_mfpt(self.sinks, self.tprob, lag_time=self.lag_time)
     mfpt_ref = io.loadh( tpt_get("mfpt.h5"), 'Data')
     npt.assert_array_almost_equal(mfpt, mfpt_ref)
     
     ensemble_mfpt = tpt.calculate_ensemble_mfpt(self.sources, self.sinks, self.tprob, self.lag_time)
     ensemble_mfpt_ref = io.loadh( tpt_get("ensemble_mfpt.h5"), 'Data')
     npt.assert_array_almost_equal(ensemble_mfpt, ensemble_mfpt_ref)
     
     all_to_all_mfpt = tpt.calculate_all_to_all_mfpt(self.tprob)
     all_to_all_mfpt_ref = io.loadh( tpt_get("all_to_all_mfpt.h5"), 'Data')
     npt.assert_array_almost_equal(all_to_all_mfpt, all_to_all_mfpt_ref)
Exemplo n.º 21
0
 def test_m_DoTPT(self):
     T = scipy.io.mmread(os.path.join(ReferenceDir, "Data", "tProb.mtx"))
     sources = [0]
     sinks = [70]
     script_out = DoTPT.run(T, sources, sinks)
     committors_ref = io.loadh(
         os.path.join(ReferenceDir, "transition_path_theory_reference",
                      "committors.h5"), 'Data')
     net_flux_ref = io.loadh(
         os.path.join(ReferenceDir, "transition_path_theory_reference",
                      "net_flux.h5"), 'Data')
     npt.assert_array_almost_equal(script_out[0], committors_ref)
     npt.assert_array_almost_equal(script_out[1].toarray(), net_flux_ref)
Exemplo n.º 22
0
    def test_k_CalculateProjectRMSD(self):
        #C1 = Conformation.load_from_pdb(PDBFn)
        #P1 = Project.load_from_hdf(ProjectFn)
        #AInd=np.loadtxt("AtomIndices.dat", int)
        #CalculateProjectRMSD.run(C1,P1,AInd,"RMSD.h5")
        outpath = os.path.join(WorkingDir, "RMSD.h5")
        os.system('CalculateProjectDistance.py -s %s -o %s -p %s rmsd -a %s' %
                  (PDBFn, outpath, ProjectFn, "AtomIndices.dat"))

        r0 = io.loadh(ReferenceDir + "/RMSD.h5", 'Data')
        r1 = io.loadh(WorkingDir + "/RMSD.h5", 'arr_0')
        npt.assert_array_almost_equal(r0,
                                      r1,
                                      err_msg="Error: Project RMSDs disagree!")
Exemplo n.º 23
0
    def test_d_Assign(self):
        cmd = "Assign.py -p %s -g %s -o %s rmsd -a %s" % (
            ProjectFn, GensPath, "./Data", "AtomIndices.dat")
        os.system(cmd)

        Assignments = io.loadh("./Data/Assignments.h5", 'arr_0')
        AssignmentsRMSD = io.loadh("./Data/Assignments.h5.distances", 'arr_0')

        r_Assignments = io.loadh(ReferenceDir + "/Data/Assignments.h5", 'Data')
        r_AssignmentsRMSD = io.loadh(
            ReferenceDir + "/Data/Assignments.h5.RMSD", 'Data')

        npt.assert_array_equal(Assignments, r_Assignments)
        npt.assert_array_equal(AssignmentsRMSD, r_AssignmentsRMSD)
Exemplo n.º 24
0
def entry_point():
    args = parser.parse_args()

    # load args
    try:
        assignments = io.loadh(args.assignments, 'arr_0')
    except KeyError:
        assignments = io.loadh(args.assignments, 'Data')

    tProb = scipy.io.mmread(args.tProb)

    # workaround for arglib funniness?
    if args.do_minimization in ["False", "0"]:
        args.do_minimization = False
    else:
        args.do_minimization = True

    if args.algorithm == 'PCCA':
        MacroAssignmentsFn = os.path.join(
            args.output_dir, "MacroAssignments.h5")
        MacroMapFn = os.path.join(args.output_dir, "MacroMapping.dat")
        arglib.die_if_path_exists([MacroAssignmentsFn, MacroMapFn])

        MAP, assignments = run_pcca(args.num_macrostates, assignments, tProb)

        np.savetxt(MacroMapFn, MAP, "%d")
        io.saveh(MacroAssignmentsFn, assignments)
        logger.info("Saved output to: %s, %s", MacroAssignmentsFn, MacroMapFn)

    elif args.algorithm == 'PCCA+':
        MacroAssignmentsFn = os.path.join(
            args.output_dir, "MacroAssignments.h5")
        MacroMapFn = os.path.join(args.output_dir, "MacroMapping.dat")
        ChiFn = os.path.join(args.output_dir, 'Chi.dat')
        AFn = os.path.join(args.output_dir, 'A.dat')

        arglib.die_if_path_exists([MacroAssignmentsFn, MacroMapFn, ChiFn, AFn])

        chi, A, MAP, assignments = run_pcca_plus(args.num_macrostates,
                                                 assignments, tProb, args.flux_cutoff, objective_function=args.objective_function,
                                                 do_minimization=args.do_minimization)

        np.savetxt(ChiFn, chi)
        np.savetxt(AFn, A)
        np.savetxt(MacroMapFn, MAP, "%d")
        io.saveh(MacroAssignmentsFn, assignments)
        logger.info('Saved output to: %s, %s, %s, %s',
                    ChiFn, AFn, MacroMapFn, MacroAssignmentsFn)
    else:
        raise Exception()
Exemplo n.º 25
0
 def test_n_FindPaths(self):
     tprob = scipy.io.mmread(os.path.join(ReferenceDir, "Data",
                                          "tProb.mtx"))
     sources = [0]
     sinks = [70]
     paths, bottlenecks, fluxes = FindPaths.run(tprob, sources, sinks, 10)
     # paths are hard to test due to type issues, adding later --TJL
     bottlenecks_ref = io.loadh(
         os.path.join(ReferenceDir, "transition_path_theory_reference",
                      "dijkstra_bottlenecks.h5"), 'Data')
     fluxes_ref = io.loadh(
         os.path.join(ReferenceDir, "transition_path_theory_reference",
                      "dijkstra_fluxes.h5"), 'Data')
     npt.assert_array_almost_equal(bottlenecks, bottlenecks_ref)
     npt.assert_array_almost_equal(fluxes, fluxes_ref)
def entry_point():
    import matplotlib
    args = parser.parse_args()

    try:
        assignments = io.loadh(args.assignments, 'arr_0')
    except KeyError:
        assignments = io.loadh(args.assignments, 'Data')

    K = run(assignments)

    T = scipy.linalg.matfuncs.expm(K)

    np.savetxt(os.path.join(args.output_dir, "Rate.dat"), K)
    scipy.io.mmwrite(os.path.join(args.output_dir, "tProb.mtx.tl"), T)
Exemplo n.º 27
0
    def test_e_BuildMSM(self):
        Assignments = io.loadh("Data/Assignments.h5", 'arr_0')
        BuildMSM.run(Lagtime, Assignments, Symmetrize="MLE")
        # Test mapping
        m   = np.loadtxt("Data/Mapping.dat")
        r_m = np.loadtxt(ReferenceDir +"/Data/Mapping.dat")
        npt.assert_array_almost_equal(m, r_m, err_msg="Mapping.dat incorrect")

        # Test populations
        p   = np.loadtxt("Data/Populations.dat")
        r_p = np.loadtxt(ReferenceDir +"/Data/Populations.dat")
        npt.assert_array_almost_equal(p, r_p, err_msg="Populations.dat incorrect")

        # Test counts matrix
        C   = scipy.io.mmread("Data/tCounts.mtx")
        r_C = scipy.io.mmread(ReferenceDir +"/Data/tCounts.mtx")
        D=(C-r_C).data
        Z=0.*D

        D /= r_C.sum()#KAB 4-5-2012.  We want the normalized counts to agree at 7 decimals
        #normalizing makes this test no longer depend on an arbitrary scaling factor (the total number of counts)
        #the relative number of counts in the current and reference models DOES matter, however.
        
        npt.assert_array_almost_equal(D,Z, err_msg="tCounts.mtx incorrect")

        # Test transition matrix
        T   = scipy.io.mmread("Data/tProb.mtx")
        r_T = scipy.io.mmread(ReferenceDir +"/Data/tProb.mtx")
        D=(T-r_T).data
        Z=0.*D
        npt.assert_array_almost_equal(D,Z, err_msg="tProb.mtx incorrect")
Exemplo n.º 28
0
    def test_e_BuildMSM(self):
        Assignments = io.loadh("Data/Assignments.h5", 'arr_0')
        BuildMSM.run(Lagtime, Assignments, Symmetrize="MLE")
        # Test mapping
        m = np.loadtxt("Data/Mapping.dat")
        r_m = np.loadtxt(ReferenceDir + "/Data/Mapping.dat")
        npt.assert_array_almost_equal(m, r_m, err_msg="Mapping.dat incorrect")

        # Test populations
        p = np.loadtxt("Data/Populations.dat")
        r_p = np.loadtxt(ReferenceDir + "/Data/Populations.dat")
        npt.assert_array_almost_equal(p,
                                      r_p,
                                      err_msg="Populations.dat incorrect")

        # Test counts matrix
        C = scipy.io.mmread("Data/tCounts.mtx")
        r_C = scipy.io.mmread(ReferenceDir + "/Data/tCounts.mtx")
        D = (C - r_C).data
        Z = 0. * D

        D /= r_C.sum(
        )  #KAB 4-5-2012.  We want the normalized counts to agree at 7 decimals
        #normalizing makes this test no longer depend on an arbitrary scaling factor (the total number of counts)
        #the relative number of counts in the current and reference models DOES matter, however.

        npt.assert_array_almost_equal(D, Z, err_msg="tCounts.mtx incorrect")

        # Test transition matrix
        T = scipy.io.mmread("Data/tProb.mtx")
        r_T = scipy.io.mmread(ReferenceDir + "/Data/tProb.mtx")
        D = (T - r_T).data
        Z = 0. * D
        npt.assert_array_almost_equal(D, Z, err_msg="tProb.mtx incorrect")
Exemplo n.º 29
0
 def test_g_GetRandomConfs(self):
     P1 = Project.load_from(ProjectFn)
     Assignments = io.loadh("Data/Assignments.Fixed.h5", 'arr_0')
     
     # make a predictable stream of random numbers by seeding the RNG with 42
     random_source = np.random.RandomState(42)
     randomconfs = GetRandomConfs.run(P1, Assignments, NumRandomConformations, random_source)
     
     reference = Trajectory.load_trajectory_file(os.path.join(ReferenceDir, "2RandomConfs.lh5"))
     self.assert_trajectories_equal(reference, randomconfs)
Exemplo n.º 30
0
def load(filename):
    # delay these imports, since this module is loaded in a bunch
    # of places but not necessarily used
    import scipy.io
    from msmbuilder import Trajectory, io, Project
    
    # the filename extension
    ext = os.path.splitext(filename)[1]

    # load trajectories
    if ext in ['.lh5', '.pdb']:
        val = Trajectory.load_trajectory_file(filename)

    # load flat text files
    elif 'AtomIndices.dat' in filename:
        # try loading AtomIndices first, because the default for loadtxt
        # is to use floats
        val = np.loadtxt(filename, dtype=np.int)
    elif ext in ['.dat']:
        # try loading general .dats with floats
        val = np.loadtxt(filename)
    
    # short circuit opening ProjectInfo
    elif ('ProjectInfo.yaml' in filename) or ('ProjectInfo.h5' in filename):
        val = Project.load_from(filename)
        
    # load with serializer files that end with .h5, .hdf or .h5.distances
    elif ext in ['.h5', '.hdf']:
        val = io.loadh(filename, deferred=False)
    elif filename.endswith('.h5.distances'):
        val = io.loadh(filename, deferred=False)

    # load matricies
    elif ext in ['.mtx']:
        val = scipy.io.mmread(filename)
        
    else:
        raise TypeError("I could not infer how to load this file. You "
            "can either request load=False, or perhaps add more logic to "
            "the load heuristics in this class: %s" % filename)

    return val
Exemplo n.º 31
0
 def test_i_CalculateRMSD(self):
     #C1   = Conformation.Conformation.load_from_pdb(PDBFn)
     #Traj = Trajectory.load_trajectory_file("Data/Gens.lh5")
     #AInd = np.loadtxt("AtomIndices.dat", int)
     #CalculateRMSD.run(C1, Traj, AInd, "RMSD.dat")
     outpath = os.path.join(WorkingDir, "RMSD_Gens.h5")
     os.system('CalculateProjectDistance.py -s %s -t %s -o %s rmsd -a %s' % (PDBFn, "Data/Gens.lh5", outpath, "AtomIndices.dat" ) )
     
     cr   = io.loadh(outpath, 'arr_0')
     cr_r = np.loadtxt(os.path.join(ReferenceDir, "RMSD.dat"))
     npt.assert_array_almost_equal(cr, cr_r)
Exemplo n.º 32
0
    def test_g_GetRandomConfs(self):
        P1 = Project.load_from(ProjectFn)
        Assignments = io.loadh("Data/Assignments.Fixed.h5", 'arr_0')

        # make a predictable stream of random numbers by seeding the RNG with 42
        random_source = np.random.RandomState(42)
        randomconfs = GetRandomConfs.run(P1, Assignments,
                                         NumRandomConformations, random_source)

        reference = Trajectory.load_trajectory_file(
            os.path.join(ReferenceDir, "2RandomConfs.lh5"))
        self.assert_trajectories_equal(reference, randomconfs)
Exemplo n.º 33
0
    def test_multi_state_path_calculations(self):
        path_output = FindPaths.run(self.tprob, self.multi_sources, self.multi_sinks, self.num_paths)

        path_result_ref = io.loadh(tpt_get("many_state/Paths.h5"))

        paths_ref = path_result_ref['Paths']
        bottlenecks_ref = path_result_ref['Bottlenecks']
        fluxes_ref = path_result_ref['fluxes']

        npt.assert_array_almost_equal(path_output[0], paths_ref)
        npt.assert_array_almost_equal(path_output[1], bottlenecks_ref)
        npt.assert_array_almost_equal(path_output[2], fluxes_ref)
Exemplo n.º 34
0
    def test_i_CalculateRMSD(self):
        #C1   = Conformation.Conformation.load_from_pdb(PDBFn)
        #Traj = Trajectory.load_trajectory_file("Data/Gens.lh5")
        #AInd = np.loadtxt("AtomIndices.dat", int)
        #CalculateRMSD.run(C1, Traj, AInd, "RMSD.dat")
        outpath = os.path.join(WorkingDir, "RMSD_Gens.h5")
        os.system('CalculateProjectDistance.py -s %s -t %s -o %s rmsd -a %s' %
                  (PDBFn, "Data/Gens.lh5", outpath, "AtomIndices.dat"))

        cr = io.loadh(outpath, 'arr_0')
        cr_r = np.loadtxt(os.path.join(ReferenceDir, "RMSD.dat"))
        npt.assert_array_almost_equal(cr, cr_r)
Exemplo n.º 35
0
    def test_multi_state_path_calculations(self):
        path_output = FindPaths.run(self.tprob, self.multi_sources, self.multi_sinks, self.num_paths)

        path_result_ref = io.loadh(tpt_get("many_state/Paths.h5"))

        paths_ref = path_result_ref['Paths']
        bottlenecks_ref = path_result_ref['Bottlenecks']
        fluxes_ref = path_result_ref['fluxes']

        npt.assert_array_almost_equal(path_output[0], paths_ref)
        npt.assert_array_almost_equal(path_output[1], bottlenecks_ref)
        npt.assert_array_almost_equal(path_output[2], fluxes_ref)
Exemplo n.º 36
0
 def test_TP_time(self):
     tp_time = tpt.calculate_avg_TP_time(self.sources, self.sinks, self.tprob, self.lag_time)
     tp_time_ref = io.loadh( tpt_get("tp_time.h5"), 'Data')
     npt.assert_array_almost_equal(tp_time, tp_time_ref)
Exemplo n.º 37
0
 def test_committors(self):
     Q = tpt.calculate_committors(self.sources, self.sinks, self.tprob)
     Q_ref = io.loadh(tpt_get("committors.h5"), 'Data')
     npt.assert_array_almost_equal(Q, Q_ref)
Exemplo n.º 38
0
 def test_committors(self):
     Q = tpt.calculate_committors(self.sources, self.sinks, self.tprob)
     Q_ref = io.loadh(os.path.join(self.tpt_ref_dir, "committors.h5"), 'Data')
     npt.assert_array_almost_equal(Q, Q_ref)
Exemplo n.º 39
0
        K=K0.copy() * float(LagTime)
        C0=MSMLib.get_count_matrix_from_assignments(assignments, lag_time=LagTime).toarray()
        Counts=C0.sum(1)
        Counts/=LagTime     
        X2=SCRE.MaximizeRateLikelihood(X,M,populations,C0,K)
        K=SCRE.ConstructRateFromParams(X2,M,populations,K)
        K/=(LagTime)
        KList.append(K)
        counts_list.append(Counts)

    KList=np.array(KList)
    SCRE.PlotRates(KList,lagtime_list,counts_list)
    return KList

run = interactive_scre

if __name__ == "__main__":
    args = parser.parse_args()

    try:
        assignments = io.loadh(args.assignments, 'arr_0')
    except KeyError:
        assignments = io.loadh(args.assignments, 'Data')

    K = run(assignments)

    T = scipy.linalg.matfuncs.expm(K)

    np.savetxt(os.path.join(args.output_dir, "Rate.dat"), K)
    scipy.io.mmwrite(os.path.join(args.output_dir, "tProb.mtx.tl"), T)
Exemplo n.º 40
0
 def test_TP_time(self):
     tp_time = tpt.calculate_avg_TP_time(self.sources, self.sinks, self.tprob, self.lag_time)
     tp_time_ref = io.loadh(os.path.join(self.tpt_ref_dir, "tp_time.h5"), 'Data')
     npt.assert_array_almost_equal(tp_time, tp_time_ref)