def test_generating_clusters(self): """ Test that the solutions generated by the clustering a the same as those obtainedduring the benchmark for the paper """ emagefit_cluster = \ self.import_python_application('emagefit_cluster') fn_config = self.get_input_file_name("config.py") fn_database = self.get_input_file_name("domino_solutions.db") fn_db_clusters = "clusters.db" # modify the names of the PDB files to include the proper name for # testing exp = utility.get_experiment_params(fn_config) for i in range(len(exp.fn_pdbs)): exp.fn_pdbs[i] = self.get_input_file_name(exp.fn_pdbs[i]) n_solutions = 30 orderby = "em2d" max_rmsd = 10 tc = emagefit_cluster.AlignmentClustering(exp) tc.cluster(fn_database, n_solutions, orderby, max_rmsd) tc.store_clusters(fn_db_clusters, "clusters") # retrieve the largest cluster solutions_stored = '9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29' solutions_stored = map(int, solutions_stored.split("|")) db_clusters = solutions_io.ResultsDB() db_clusters.connect(fn_db_clusters) cl_record = db_clusters.get_nth_largest_cluster(1) elements = map(int, cl_record.elements.split("|")) for i, j in zip(solutions_stored, elements): self.assertEqual(i, j) db_clusters.close() os.remove(fn_db_clusters)
def test_generating_clusters(self): """ Test that the solutions generated by the clustering a the same as those obtainedduring the benchmark for the paper """ em2d_cluster_solutions = \ self.import_python_application('em2d_cluster_solutions.py') fn_config = self.get_input_file_name("config.py") fn_database = self.get_input_file_name("domino_solutions.db") fn_db_clusters = self.get_input_file_name("clusters.db") # modify the names of the PDB files to include the proper name for testing exp = utility.get_experiment_params(fn_config) for i in range(len(exp.fn_pdbs)): exp.fn_pdbs[i] = self.get_input_file_name(exp.fn_pdbs[i]) n_solutions = 30 orderby = "em2d" max_rmsd = 10 tc = em2d_cluster_solutions.AlignmentClustering(exp) tc.cluster(fn_database, n_solutions, orderby, max_rmsd) tc.store_clusters(fn_db_clusters, "clusters") # retrieve the largest cluster solutions_stored = '9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29' solutions_stored = map(int,solutions_stored.split("|")) db_clusters = solutions_io.ResultsDB() db_clusters.connect(fn_db_clusters) cl_record = db_clusters.get_nth_largest_cluster(1) elements = map(int, cl_record.elements.split("|")) for i,j in zip(solutions_stored, elements): self.assertEqual(i,j) db_clusters.close() os.remove(fn_db_clusters)
def test_generate_model(self): """ Test that Monte Carlo modeling runs """ try: import networkx import subprocess except ImportError as detail: self.skipTest(str(detail)) domino_model = self.import_python_application('emagefit') fn_config = self.get_input_file_name("config.py") exp = utility.get_experiment_params(fn_config) fn_database = "monte_carlo_output_database.db" domino_model.generate_monte_carlo_model(exp, fn_database, seed=-1, write_solution=True) # test that the database and pdb files are generated and that they # are not empty self.assertTrue(os.path.exists(fn_database)) self.assertGreater(os.path.getsize(fn_database), 0) fn_pdb = fn_database + ".pdb" self.assertTrue(os.path.exists(fn_pdb)) self.assertGreater(os.path.getsize(fn_pdb), 0) # check that there is one solution in the database db = solutions_io.ResultsDB() db.connect(fn_database) data = db.get_solutions() self.assertEqual(len(data), 1) os.remove(fn_database) os.remove(fn_pdb)
def test_generate_model(self): """ Test that the DOMINO modeling runs """ try: import networkx import subprocess except ImportError as detail: self.skipTest(str(detail)) domino_model = self.import_python_application('emagefit') IMP.base.set_log_level(IMP.base.SILENT) fn = self.get_input_file_name("config.py") exp = utility.get_experiment_params(fn) fn_output_db = "domino_solutions_temp.db" domino_model.generate_domino_model(exp, fn_output_db) # assert that a database of results is created self.assertTrue(os.path.exists(fn_output_db)) self.assertGreater(os.path.getsize(fn_output_db), 0) # check that there are solutions in the database db = solutions_io.ResultsDB() db.connect(fn_output_db) data = db.get_solutions() self.assertGreater(len(data), 0) columns = db.get_table_column_names("results") self.assertTrue("em2d" in columns) os.remove(fn_output_db)
max_number=max_number_of_results, orderby=args.orderby) log.info("Merging done. Time %s", time.time() - t0) sys.exit() if args.gather: t0 = time.time() solutions_io.gather_solution_results(args.gather, args.fn_database) log.info("Gathering done. Time %s", time.time() - t0) sys.exit() if args.write: if not args.fn_database or not args.orderby: raise ValueError("Writting solutions requires the database of " \ "results and the name of the restraint to order by") if args.orderby == "False": args.orderby = False exp = utility.get_experiment_params(arg.fn_params) write_pdbs_for_solutions(exp, args.fn_database, args.write, args.orderby, ) sys.exit() if args.write_cluster: if not args.fn_database: raise ValueError("Writting clusters requires the database file") fn_db_clusters = args.write_cluster[0] position = int(args.write_cluster[1]) exp = utility.get_experiment_params(arg.fn_params) write_nth_largest_cluster(exp, args.fn_database, fn_db_clusters, position ) sys.exit() if args.cdrms:
"clustering") parser.add_option("--log", dest="log", default=None, help="File for logging") parser.add_option("--rmsd", type=float, dest="max_rmsd", default=10, help="Maximum rmsd centroids to define clusters") args = parser.parse_args() args = args[0] if(len(sys.argv) == 1): parser.print_help() sys.exit() if(args.log): logging.basicConfig(filename=args.log, filemode="w") else: logging.basicConfig(stream=sys.stdout) logging.root.setLevel(logging.DEBUG) if(args.fn_database): if(not args.n_solutions or not args.orderby): raise ValueError("parameters --n and --orderby required") exp = utility.get_experiment_params(args.experiment) tc = AlignmentClustering(exp) tc.cluster(args.fn_database, args.n_solutions, args.orderby, args.max_rmsd) tc.store_clusters(args.fn_output_db, "clusters")
max_number=max_number_of_results, orderby=args.orderby) log.info("Merging done. Time %s", time.time() - t0) quit() if args.gather: t0 = time.time() solutions_io.gather_solution_results(args.gather, args.fn_database) log.info("Gathering done. Time %s", time.time() - t0) quit() if args.write: if not args.fn_database or not args.orderby: raise ValueError("Writing solutions requires the database of " \ "results and the name of the restraint to order by") if args.orderby == "False": args.orderby = False params = utility.get_experiment_params(args.fn_params) write_pdbs_for_solutions(params, args.fn_database, args.write, args.orderby, ) quit() if args.write_cluster: if not args.fn_database: raise ValueError("Writing clusters requires the database file") fn_db_clusters = args.write_cluster[0] position = int(args.write_cluster[1]) params = utility.get_experiment_params(args.fn_params) write_nth_largest_cluster(params, args.fn_database, fn_db_clusters, position ) quit() if args.cdrms: