Esempio n. 1
0
def run(nativePdb, nativeMtz=None, nativeMap=None, mrPdbs=None, outDir=None):

    logging.info("Using native PDB file: %s", os.path.abspath(nativePdb))
    logging.info("Using native MTZ file: %s", os.path.abspath(nativeMtz))
    logging.info("Wrapping the following MTZ files to the native: %s",
                 " ".join(mrPdbs))

    phenix = False

    if outDir is not None:
        if not os.path.isdir(outDir):
            msg = "Cannot find output directory: {}".format(outDir)
            raise RuntimeError(msg)
        outDir = os.path.abspath(outDir)
    else:
        outDir = os.getcwd()

    if phenix:
        if nativeMap is None:
            nativeMap = generateMap(nativePdb, nativeMtz)
        if not os.path.isfile(nativeMap):
            msg = "Cannot find nativeMap: {}".format(nativeMap)
            raise RuntimeError(msg)
    else:
        shelxeExe = ample_util.find_exe('shelxe')

    removables = []
    for mrPdb in mrPdbs:
        if phenix:
            logging.debug("Searching for origin shift using: %s %s", nativeMap,
                          mrPdb)
            origin = phenixer.ccmtzOrigin(nativeMap, mrPdb)
            # offset.pdb is the mrPdb moved onto the new origin
            offsetPdb = "offset.pdb"
            logging.debug("Found origin: %s\nOffset pdb is: %s", origin,
                          offsetPdb)
        else:

            mrinfo = shelxe.MRinfo(shelxeExe, nativePdb, nativeMtz)
            mrinfo.analyse(mrPdb)
            originShift = mrinfo.originShift
            logging.debug("Found origin: {0}".format(originShift))
            offsetPdb = ample_util.filename_append(mrPdb,
                                                   astr='offset',
                                                   directory=os.getcwd())
            pdb_edit.translate(mrPdb, offsetPdb, originShift)

        csymmPdb = ample_util.filename_append(filename=mrPdb,
                                              astr="csymmatch",
                                              directory=outDir)
        logging.debug("Running csymmatch to wrap %s onto native %s", offsetPdb,
                      nativePdb)
        csymmatch.Csymmatch().run(refPdb=nativePdb,
                                  inPdb=offsetPdb,
                                  outPdb=csymmPdb,
                                  originHand=False)
        removables += [offsetPdb]
        logging.info("Matched PDB is: %s", csymmPdb)

    map(os.remove, removables + ["shelxe-input.hkl", "shelxe-input.ent"])
Esempio n. 2
0
    def test_shelxe_1BYZ(self):
        shelxe_exe = ample_util.find_exe('shelxe' + ample_util.EXE_EXT)
        pdb = os.path.join(self.testfiles_dir, "1BYZ.pdb")
        mtz = os.path.join(self.testfiles_dir, "1BYZ-cad.mtz")
        mrPdb = os.path.join(
            self.testfiles_dir,
            "1BYZ_phaser_loc0_ALL_poly_ala_trunc_0.486615_rad_1_UNMOD.1.pdb")
        mrinfo = shelxe.MRinfo(shelxe_exe, pdb, mtz)
        mrinfo.analyse(mrPdb)

        self.assertEqual(mrinfo.originShift, [0.326, 0.19, 0.275])
        # Need MRBUMP to be updated
        #self.assertEqual(mrinfo.MPE,78.5)
        self.assertEqual(mrinfo.wMPE, 74.5)
Esempio n. 3
0
    def test_shelxe_1D7M(self):
        shelxe_exe = ample_util.find_exe('shelxe' + ample_util.EXE_EXT)
        pdb = os.path.join(self.testfiles_dir, "1D7M.pdb")
        mtz = os.path.join(self.testfiles_dir, "1D7M-cad.mtz")
        mrPdb = os.path.join(
            self.testfiles_dir,
            "1D7M_phaser_loc0_ALL_SCWRL_reliable_sidechains_trunc_5.241154_rad_1_UNMOD.1.pdb"
        )

        mrinfo = shelxe.MRinfo(shelxe_exe, pdb, mtz)
        mrinfo.analyse(mrPdb)

        self.assertEqual(mrinfo.originShift, [-0.0, -0.0, 0.5])
        # Need MRBUMP to be updated
        #self.assertEqual(mrinfo.MPE,65.5)
        self.assertEqual(mrinfo.wMPE, 57.4)
Esempio n. 4
0
def analyse(amoptd, newroot=None):
    if newroot:
        assert os.path.isdir(newroot)
        global _oldroot, _newroot
        _newroot = newroot
        _oldroot = amoptd['work_dir']

    if not os.path.isdir(fixpath(amoptd['benchmark_dir'])):
        os.mkdir(fixpath(amoptd['benchmark_dir']))
    os.chdir(fixpath(amoptd['benchmark_dir']))

    # AnalysePdb may have already been called from the main script
    if amoptd['native_pdb'] and 'native_pdb_std' not in amoptd:
        analysePdb(amoptd)

    if amoptd['native_pdb_std']:
        # Generate an SHELXE HKL and ENT file so that we can calculate phase errors
        mtz_util.to_hkl(amoptd['mtz'],
                        hkl_file=os.path.join(amoptd['benchmark_dir'],
                                              SHELXE_STEM + ".hkl"))
        shutil.copyfile(
            amoptd['native_pdb_std'],
            os.path.join(amoptd['benchmark_dir'], SHELXE_STEM + ".ent"))

    if amoptd['native_pdb'] and \
       not (amoptd['homologs'] or amoptd['ideal_helices']
            or amoptd['import_ensembles'] or amoptd['single_model_mode']):
        analyseModels(amoptd)

    # Get the ensembling data
    if 'ensembles_data' not in amoptd or not len(amoptd['ensembles_data']):
        logger.critical("Benchmark cannot find any ensemble data!")
        return

    # Get dict of ensemble name -> ensemble result
    ensemble_results = {e['name']: e for e in amoptd['ensembles_data']}

    # Get mrbump_results for cluster
    if 'mrbump_results' not in amoptd or not len(amoptd['mrbump_results']):
        logger.critical("Benchmark cannot find any mrbump results!")
        return

    data = []
    mrinfo = shelxe.MRinfo(amoptd['shelxe_exe'], amoptd['native_pdb_info'].pdb,
                           amoptd['mtz'])
    for result in amoptd['mrbump_results']:

        # use mrbump dict as basis for result object
        d = copy.copy(result)

        # Add in the data from the ensemble
        d.update(ensemble_results[d['ensemble_name']])
        assert d['ensemble_name'] == d['name'], d

        # Hack for old results
        if 'truncation_num_residues' in d:
            d['num_residues'] = d['truncation_num_residues']
            del d['truncation_num_residues']

        # Hack for ideal helices where num_residues are missing
        if amoptd['ideal_helices'] and ('num_residues' not in d
                                        or d['num_residues'] is None):
            d['num_residues'] = int(d['ensemble_name'].lstrip('polyala'))

        # Get the ensemble data and add to the MRBUMP data
        d['ensemble_percent_model'] = int(
            (float(d['num_residues']) / float(amoptd['fasta_length'])) * 100)

        if amoptd['native_pdb']:
            # Add in stuff we've cleaned from the pdb
            native_keys = [
                'native_pdb_code', 'native_pdb_title', 'native_pdb_resolution',
                'native_pdb_solvent_content', 'native_pdb_space_group',
                'native_pdb_num_chains', 'native_pdb_num_atoms',
                'native_pdb_num_residues'
            ]
            d.update({key: amoptd[key] for key in native_keys})
            # Analyse the solution
            analyseSolution(amoptd, d, mrinfo)
        data.append(d)

    # Put everything in a pandas DataFrame
    dframe = pd.DataFrame(data)

    # General stuff
    dframe['ample_version'] = amoptd['ample_version']
    dframe['fasta_length'] = amoptd['fasta_length']

    # Analyse subcluster centroid models
    if 'subcluster_centroid_model' in dframe.columns and amoptd['native_pdb']:
        centroid_index = dframe.index
        centroid_models = [
            fixpath(f) for f in dframe.subcluster_centroid_model
        ]
        native_pdb_std = fixpath(amoptd['native_pdb_std'])
        fasta = fixpath(amoptd['fasta'])

        # Calculation of TMscores for subcluster centroid models
        if amoptd['have_tmscore']:
            tm = tm_util.TMscore(amoptd['tmscore_exe'],
                                 wdir=fixpath(amoptd['benchmark_dir']),
                                 **amoptd)
            tm_results = tm.compare_structures(centroid_models,
                                               [native_pdb_std], [fasta])
            centroid_tmscores = [r['tmscore'] for r in tm_results]
            centroid_rmsds = [r['rmsd'] for r in tm_results]

        else:
            # Use maxcluster
            centroid_tmscores = []
            for centroid_model in centroid_models:
                n = os.path.splitext(os.path.basename(centroid_model))[0]
                cm = None
                for pdb in amoptd['models']:
                    if n.startswith(
                            os.path.splitext(os.path.basename(pdb))[0]):
                        cm = pdb
                        break
                if cm:
                    centroid_tmscores.append(_MAXCLUSTERER.tm(cm))
                else:
                    msg = "Cannot find model for subcluster_centroid_model {0}".format(
                        dframe[0, 'subcluster_centroid_model'])
                    raise RuntimeError(msg)

            # There is an issue here as this is the RMSD over the TM-aligned residues
            # NOT the global RMSD, which it is for the tm_util results
            centroid_rmsds = [None for _ in centroid_index]

        dframe['subcluster_centroid_model_TM'] = pd.Series(
            centroid_tmscores, index=centroid_index)
        dframe['subcluster_centroid_model_RMSD'] = pd.Series(
            centroid_rmsds, index=centroid_index)

    # Save the data
    file_name = os.path.join(fixpath(amoptd['benchmark_dir']), 'results.csv')
    dframe.to_csv(file_name, columns=_CSV_KEYLIST, index=False, na_rep="N/A")
    amoptd['benchmark_results'] = dframe.to_dict('records')

    return