def run_filter(self, f): if f.name() == 'FragmentScoreFilter': # FragmentScoreFilter can be finicky and sometimes requires you # to run sparks-x outside of C++. I have no idea why, just roll # with it. try: # First try to run FSF because it needs to generate the # other sequence profile files score = f.report_sm(self.pose) except: import subprocess # Now get rid of the empty .fasta.pssm file it creates # and try again in Python rempath = os.path.join(self.workspace.seqprof_dir, '{}.fasta.phipsi'.format(self.task_id)) print('REMOVING {}'.format(rempath)) os.remove(rempath) print('CWD: {}'.format(os.getcwd())) print('TASK: {}'.format(self.task_id)) print('FILES IN CWD:') print(os.listdir(os.getcwd())) cmd = [ '/wynton/home/kortemme/krivacic/software/fragments/sparks-x/bin/buildinp_query.sh', os.path.join(self.workspace.seqprof_dir, '{}.fasta'.format(self.task_id)), ] process = subprocess.run( cmd, env=dict( SPARKSXDIR= '/wynton/home/kortemme/krivacic/software/fragments/sparks-x', **os.environ)) score = f.report_sm(self.pose) elif f.name() == 'BuriedUnsatHbondFilter': try: score = f.report_sm(self.pose) except: print('Buried Unsats FAILED; make sure DAlphaBall is '\ 'installed and working. You may need to install '\ 'gfortran \n(conda install -c anaconda '\ 'gfortran_<OS>-64 gmp)') print('Once installed, add the library path to your '\ 'environment.\n'\ 'Ex. export '\ 'LD_LIBRARY_PATH=anaconda/lib:LD_LIBRARY_PATH') else: score = f.report_sm(self.pose) fname = 'EXTRA_METRIC_' + f.get_user_defined_name() setPoseExtraScore(self.pose, fname, score)
def __setitem__(self, key, value): if key in self._reserved: raise ValueError( "Can not set score key with reserved energy name: %r" % key ) # Bit of a two-step to deal with potential duplicate keys in the # score maps. First check if a key, of either type, exists. If so # *try* to set the extra score, triggering type conversion checking # etc... # # If set is successful then clear the score cache and set again, # eliminating any potential duplicate keys. had_score = key in self setPoseExtraScore(self.pose, key, value) if had_score: self.__delitem__(key) setPoseExtraScore(self.pose, key, value)
print(fd.movemap) print(fd.task_factory) fd.apply() # Create new pose from input file for comparison input_pose = pose_from_file(pdbpath) #input_pose = pose_from_file(workspace.input_pdb_path) # Calculate several different types of RMSD ca_rmsd = CA_rmsd(fd.pose, input_pose) all_atom_rmsd = all_atom_rmsd(fd.pose, input_pose) filters = workspace.get_filters(fd.pose, task_id=job_info['task_id'], score_fragments=False, test_run=test_run) filters.run_filters() # Add RMSDs as extra metrics, which will be printed at the end of # the PDB file. setPoseExtraScore(fd.pose, 'EXTRA_METRIC_CA_RMSD', ca_rmsd) setPoseExtraScore(fd.pose, 'EXTRA_METRIC_AllAtom_RMSD', all_atom_rmsd) total_time = time.time() - start_time setPoseExtraScore(fd.pose, 'EXTRA_METRIC_Run time', total_time) # Save final pose as a pdb file. input_name = os.path.basename(pdbpath).split(".")[0] out = workspace.output_prefix( job_info) + input_name + workspace.output_suffix(job_info) + '.pdb.gz' pose.dump_pdb(out)
chi=designable.extend(repackable)) if test_run: fd.rounds = 1 print(fd.movemap) print(fd.task_factory) fd.apply() # Create new pose from input file for comparison input_pose = pose_from_file(pdbpath) #input_pose = pose_from_file(workspace.input_pdb_path) # Calculate several different types of RMSD ca_rmsd = CA_rmsd(fd.pose, input_pose) all_atom_rmsd = all_atom_rmsd(fd.pose, input_pose) filters = workspace.get_filters(fd.pose, task_id=job_info['task_id'], score_fragments=False, test_run=test_run) filters.run_filters() # Add RMSDs as extra metrics, which will be printed at the end of # the PDB file. setPoseExtraScore(fd.pose, 'EXTRA_METRIC_CA_RMSD', ca_rmsd) setPoseExtraScore(fd.pose, 'EXTRA_METRIC_AllAtom_RMSD', all_atom_rmsd) # Save final pose as a pdb file. input_name = os.path.basename(pdbpath).split(".")[0] out = workspace.output_prefix(job_info) + input_name + workspace.output_suffix(job_info) + '.pdb.gz' pose.dump_pdb(out)
def main(): args = docopt.docopt(__doc__) print(args) cluster.require_qsub() start_time = time.time() workspace, job_info = big_jobs.initiate() pdbpath = workspace.input_path(job_info) if not os.path.exists(workspace.output_prefix(job_info)): os.mkdir(workspace.output_prefix(job_info)) outpath = workspace.output_path(job_info) test_run = job_info.get('test_run', False) # append location of Rosetta binaries to path sys.path.append('/wynton/home/kortemme/krivacic/source/bin') # find necessary files tmers = glob.glob( os.path.join(workspace.fragments_dir, workspace.fragments_tag(pdbpath) + '?', '*3mers.gz')) nmers = glob.glob( os.path.join(workspace.fragments_dir, workspace.fragments_tag(pdbpath) + '?', '*9mers.gz')) ss2 = glob.glob( os.path.join(workspace.fragments_dir, workspace.fragments_tag(pdbpath) + '?', '*psipred_ss2')) # Run the ab initio relaxation script. relax_abinitio = [ 'AbinitioRelax.linuxgccrelease', '-abinitio:relax', '-use_filters', 'true', '-abinitio::increase_cycles', '10', '-abinitio::rg_reweight', '0.5', '-abinitio::rsd_wt_helix', '0.5', '-abinitio::rsd_wt_loop', '0.5', '-relax::fast', '-in:file:fasta', workspace.fasta_path, '-in:file:frag3', tmers, '-in:file:frag9', nmers, '-in:file:psipred_ss2', ss2, '-nstruct', args.nstruct, '-out:pdb_gz', '-out:prefix', workspace.output_prefix(job_info), '-out:suffix', workspace.output_suffix(job_info), # '--outdir', workspace.fragments_dir, # '--memfree', args['--mem-free'] ] if args['--dry-run']: print(' '.join(relax_abinitio)) else: subprocess.call(relax_abinitio) init() pose = pose_from_file(outpath) input_pose = pose_from_file(pdbpath) ca_rmsd = CA_rmsd(pose, input_pose) all_atom_rmsd = all_atom_rmsd(pose, input_pose) setPoseExtraScore(pose, 'EXTRA_METRIC_CA_RMSD [[-]]', ca_rmsd) setPoseExtraScore(pose, 'EXTRA_METRIC_AllAtom_RMSD [[-]]', all_atom_rmsd) filters = workspace.get_filters(pose, task_id=job_info['task_id'], score_fragments=not test_run, test_run=test_run) filters.run_filters() total_time = time.time() - start_time setPoseExtraScore(pose, 'EXTRA_METRIC_Run time', total_time) outname = os.path.basename(outpath) outfolder = os.path.join(workspace.output_prefix(job_info), 'filtered') pose.dump_pdb(os.path.join(outfolder, outpath))
dalphaball_path, pdbpath)) relax.add_init_arg('-total_threads 1') if test_run: relax.rounds = 1 relax.pose = pose # Warning: This is an all-atom movemap. Constrain to input coords if # you don't want things to move around a lot, or set a different # movemap. relax.setup_default_movemap() # Constrain relax to start coords. relax.mover.constrain_relax_to_start_coords(True) relax.apply() input_pose = pose_from_file(workspace.input_pdb_path) ca_rmsd = CA_rmsd(relax.pose, input_pose) score_fragments = os.path.exists(workspace.loops_path) filters = workspace.get_filters(relax.pose, task_id=job_info['task_id'], score_fragments=score_fragments, test_run=test_run) filters.run_filters() input_name = os.path.basename(pdbpath).split(".")[0] out = workspace.output_prefix( job_info) + input_name + workspace.output_suffix(job_info) + '.pdb.gz' setPoseExtraScore(relax.pose, 'EXTRA_METRIC_CA_RMSD', ca_rmsd) pose.dump_pdb(out)
i += 1 sfxn = create_score_function('ref2015') # Create new pose from input file for comparison input_pose = pose_from_file(pdbpath) for i in posedict: pose = posedict[i] sfxn(input_pose) sfxn(pose) ca_rmsd = CA_rmsd(pose, input_pose) aa_rmsd = all_atom_rmsd(pose, input_pose) # Add RMSDs to pose for parsing setPoseExtraScore(pose, 'EXTRA_METRIC_CA_RMSD', ca_rmsd) setPoseExtraScore(pose, 'EXTRA_METRIC_AllAtom_RMSD', aa_rmsd) #input_pose = pose_from_file(workspace.input_pdb_path) # Calculate several different types of RMSD filters = workspace.get_filters(pose, task_id=str(job_info['task_id']) + '_' + str(i), score_fragments=False, test_run=test_run, fragment_full_chain=1) filters.run_filters() # Save final pose as a pdb file. input_name = os.path.basename(pdbpath).split('.')[0] out = workspace.output_prefix(job_info) + \ input_name + '_{}'.format(i) + '.pdb.gz'