Beispiel #1
0
def coord_to_pdb(coords, sequence, coord_id, clean=True):
    assert len(sequence) == len(coords)
    pdbca = "{0}.pdb".format(coord_id)
    if not pdb_from_ca(coords, sequence, pdbca):
        raise RuntimeError("Error processing coords {0}".format(coord_id))
    #print("Wrote file {0}".format(pdbca))

    pdb_side = ample_util.filename_append(pdbca, "pulchra")
    if not add_sidechains_pulchra(pdbca, pdb_side):
        print "Failed to add pulchra sidechains for coord {0}".format(coord_id)
        pdb_side = ample_util.filename_append(pdbca, "maxsprout")
        if not add_sidechains_maxsprout(pdbca, pdb_side):
            print "*** Failed to add maxsprout sidechains for coord {0}".format(
                coord_id)
            return None

    # Now add sidechains with SCWRL
    pdbout = ample_util.filename_append(pdb_side, "scwrl")
    SCWRL.add_sidechains(pdbin=pdb_side,
                         pdbout=pdbout,
                         hydrogens=False,
                         strip_oxt=False)

    if clean: map(os.unlink, [pdbca, pdb_side])

    return pdbout
Beispiel #2
0
def split_into_chains(pdbin, chain=None, directory=None):
    """Split a pdb file into its separate chains"""

    if directory is None:
        directory = os.path.dirname(pdbin)

    # Largely stolen from pdb_split_models.py in phenix
    #http://cci.lbl.gov/cctbx_sources/iotbx/command_line/pdb_split_models.py
    pdbf = iotbx.file_reader.any_file(pdbin, force_type="pdb")
    pdbf.check_file_type("pdb")
    hierarchy = pdbf.file_object.construct_hierarchy()

    # Nothing to do
    n_models = hierarchy.models_size()
    if n_models != 1: 
        raise RuntimeError("split_into_chains only works with single-mdoel pdbs!")

    crystal_symmetry = pdbf.file_object.crystal_symmetry()

    output_files = []
    n_chains = len(hierarchy.models()[0].chains())
    for i, hchain in enumerate(hierarchy.models()[0].chains()):
        if not hchain.is_protein():
            continue
        if chain and not hchain.id == chain:
            continue
        new_hierarchy = iotbx.pdb.hierarchy.root()
        new_model = iotbx.pdb.hierarchy.model()
        new_hierarchy.append_model((new_model))
        new_model.append_chain(hchain.detached_copy())
        output_file = ample_util.filename_append(pdbin, hchain.id, directory)
        with open(output_file, "w") as f:
            if (crystal_symmetry is not None):
                f.write(
                    iotbx.pdb.format_cryst1_and_scale_records(
                        crystal_symmetry=crystal_symmetry, write_scale_records=True
                    ) + '\n'
                )
            f.write('REMARK Chain %d of %d\n' % (i, n_chains))
            if (pdbin is not None):
                f.write('REMARK Original file:\n')
                f.write('REMARK   %s\n' % pdbin)
            f.write(new_hierarchy.as_pdb_string())

        output_files.append(output_file)

    if not len(output_files): 
        raise RuntimeError("split_into_chains could not find any chains to split")

    return output_files
Beispiel #3
0
def del_column(file_name, column, overwrite=True):
    """Delete a column from an mtz file and return a path to the file"""
    mtzDel = ample_util.filename_append(file_name, "d{0}".format(column))
    cmd = ["mtzutils", "hklin1", file_name, "hklout", mtzDel]
    stdin = "EXCLUDE 1 {0}".format(column)
    logfile = os.path.join(os.getcwd(), "mtzutils.log")
    retcode = ample_util.run_command(cmd, stdin=stdin, logfile=logfile)
    if retcode != 0:
        msg = "Error running mtzutils. Check the logfile: {0}".format(logfile)
        logger.critical(msg)
        raise RuntimeError(msg)

    if overwrite:
        shutil.move(mtzDel, file_name)
        return file_name
    else:
        return mtzDel
Beispiel #4
0
def add_rfree(file_name,directory=None,overwrite=True):
    """Run uniqueify on mtz file to generate RFREE data column"""
    mtzUnique = ample_util.filename_append(file_name, "uniqueify", directory=directory)

    cmd = ['uniqueify', file_name, mtzUnique]
    logfile = os.path.join(os.getcwd(), "uniqueify_{}.log".format(str(uuid.uuid1())))
    retcode = ample_util.run_command(cmd, logfile=logfile)
    if retcode != 0:
        raise RuntimeError("Error running command: {0}. Check the logfile: {1}".format(" ".join(cmd), logfile))
    else:
        os.unlink(logfile)

    if overwrite:
        shutil.move(mtzUnique,file_name)
        return file_name
    else:
        return mtzUnique
Beispiel #5
0
def del_column(file_name, column, overwrite=True):
    """Delete a column from an mtz file and return a path to the file"""
    mtzDel = ample_util.filename_append(file_name, "d{0}".format(column) )
    cmd = [ "mtzutils", "hklin1", file_name, "hklout", mtzDel ]
    stdin = "EXCLUDE 1 {0}".format( column )
    logfile = os.path.join(os.getcwd(), "mtzutils_{}.log".format(str(uuid.uuid1())))
    retcode = ample_util.run_command(cmd, stdin=stdin, logfile=logfile)
    if retcode != 0:
        raise RuntimeError("Error running mtzutils. Check the logfile: {0}".format(logfile))
    else:
        os.unlink(logfile)

    if overwrite:
        shutil.move(mtzDel,file_name)
        return file_name
    else:
        return mtzDel
Beispiel #6
0
def add_rfree(file_name, directory=None, overwrite=True):
    """Run uniqueify on mtz file to generate RFREE data column"""
    mtzUnique = ample_util.filename_append(file_name,
                                           "uniqueify",
                                           directory=directory)

    cmd = ['uniqueify', file_name, mtzUnique]
    logfile = os.path.join(os.getcwd(), "uniqueify.log")
    retcode = ample_util.run_command(cmd, logfile=logfile)
    if retcode != 0:
        msg = "Error running command: {0}. Check the logfile: {1}".format(
            " ".join(cmd), logfile)
        logger.critical(msg)
        raise RuntimeError(msg)

    if overwrite:
        shutil.move(mtzUnique, file_name)
        return file_name
    else:
        return mtzUnique
Beispiel #7
0
def split_pdb(pdbin, directory=None, strip_hetatm=False, same_size=False):
    """Split a pdb file into its separate models

    Parameters
    ----------
    pdbin : str
      path to input pdbf file
    directory : str
      path to directory where pdb files will be created
    strip_hetatm : bool
        remove HETATMS if true
    same_size : bool
      Only output models of equal length (the most numerous length is selected)
    """

    if directory is None:
        directory = os.path.dirname(pdbin)
    if not os.path.isdir(directory):
        os.mkdir(directory)

    # Largely stolen from pdb_split_models.py in phenix
    #http://cci.lbl.gov/cctbx_sources/iotbx/command_line/pdb_split_models.py

    pdbf = iotbx.file_reader.any_file(pdbin, force_type="pdb")
    pdbf.check_file_type("pdb")
    hierarchy = pdbf.file_object.construct_hierarchy()

    # Nothing to do
    n_models = hierarchy.models_size()

    if same_size:
        _only_equal_sizes(hierarchy)
    crystal_symmetry = pdbf.file_object.crystal_symmetry()
    output_files = []
    for k, model in enumerate(hierarchy.models()):
        k += 1
        new_hierarchy = iotbx.pdb.hierarchy.root()
        new_hierarchy.append_model(model.detached_copy())
        if strip_hetatm: _strip(
                new_hierarchy,
                hetatm=True,
        )
        if (model.id == ""):
            model_id = str(k)
        else:
            model_id = model.id.strip()
        output_file = ample_util.filename_append(pdbin, model_id, directory)
        with open(output_file, "w") as f:
            if (crystal_symmetry is not None):
                f.write(
                    iotbx.pdb.format_cryst1_and_scale_records(
                        crystal_symmetry=crystal_symmetry, write_scale_records=True
                    ) + '\n'
                )
            f.write("REMARK Model %d of %d\n" % (k, n_models))
            if (pdbin is not None):
                f.write('REMARK Original file:\n')
                f.write('REMARK   %s\n' % pdbin)
            f.write(new_hierarchy.as_pdb_string())
        output_files.append(output_file)
    return output_files
Beispiel #8
0
import os
import sys
sys.path.append("/Users/jmht/Documents/AMPLE/ample-dev1/scripts")
sys.path.append("/Users/jmht/Documents/AMPLE/ample-dev1/python")

import ample_util
from analyse_run import AmpleResult

ocsv = sys.argv[1] 
ncsv = ample_util.filename_append( filename=ocsv, astr="obj" )

a = AmpleResult()
with open( ocsv ) as o, open( ncsv, 'w' ) as n:
    for i, line in enumerate( o ):
        if i == 0:
            fields = line.strip().split(",")
            nf = []
            for f in fields:
                i = a.orderedTitles.index( f )
                nf.append( a.orderedAttrs[ i ] )
            line = ",".join( nf ) + "\n"
        n.write( line )	
Beispiel #9
0
    print "processing ", r.pdbCode, r.ensembleName
    #r.orderedAttrs = a.orderedAttrs
    #r.orderedTitles = a.orderedTitles
    buccaneerLog = os.path.join(runDir, r.pdbCode, "ROSETTA_MR_0", "MRBUMP",
                                "cluster_1", "search_{0}_mrbump".format(
                                    r.ensembleName), "data",
                                "loc0_ALL_{0}".format(r.ensembleName), "unmod",
                                "mr", "{0}".format(r.mrProgram), "build",
                                "shelxe", "rebuild", "build", "buccaneer.log")

    if os.path.isfile(buccaneerLog):
        p.parse(buccaneerLog)
        r.buccFinalRfact = p.finalRfact
        r.buccFinalRfree = p.finalRfree

pfile = ample_util.filename_append(pfile, astr="bucc")
f = open(pfile, 'w')
ampleDict = cPickle.dump(allResults, f)

cpath = os.path.join(workDir, 'results_bucc.csv')
csvfile = open(cpath, 'wb')
csvwriter = csv.writer(csvfile,
                       delimiter=',',
                       quotechar='"',
                       quoting=csv.QUOTE_MINIMAL)

header = False
for r in allResults:
    if not header:
        #csvwriter.writerow( r.titlesAsList() )
        csvwriter.writerow(r.valueAttrAsList())
Beispiel #10
0
    print "\nResults for ", pdbCode

    # Directory where all the data for this run live
    dataDir = os.path.join(dataRoot, pdbCode)

    # Get path to native Extract all the nativePdbInfo from it
    nativePdb = os.path.join(dataDir, "{0}.pdb".format(pdbCode))
    pdbedit = pdb_edit.PDBEdit()
    nativePdbInfo = pdbedit.get_info(nativePdb)

    # First check if the native has > 1 model and extract the first if so
    if len(nativePdbInfo.models) > 1:
        print "nativePdb has > 1 model - using first"
        nativePdb1 = ample_util.filename_append(filename=nativePdb,
                                                astr="model1",
                                                directory=workdir)
        pdbedit.extract_model(nativePdb,
                              nativePdb1,
                              modelID=nativePdbInfo.models[0].serial)
        nativePdb = nativePdb1

    # Standardise the PDB to rename any non-standard AA, remove solvent etc
    nativePdbStd = ample_util.filename_append(filename=nativePdb,
                                              astr="std",
                                              directory=workdir)
    pdbedit.standardise(nativePdb, nativePdbStd)
    nativePdb = nativePdbStd

    # Get the new Info about the native
    nativePdbInfo = pdbedit.get_info(nativePdb)
Beispiel #11
0
    #     if os.path.isfile( phaserLog ):
    #         phaserP = phaser_parser.PhaserLogParser( phaserLog )
    #         ptime = phaserP.time

    shelxeLog = os.path.join(mrDir, "build/shelxe/shelxe_run.log")
    stime = 0.0
    if os.path.isfile(shelxeLog):
        shelxeP = parse_shelxe.ShelxeLogParser(shelxeLog)
        stime = shelxeP.cputime

    r.shelxeTime = stime

    #print "PTIME ",r.phaserTime
    #print "STIME ",stime

pfile = ample_util.filename_append(pfile, astr="timings")
f = open(pfile, 'w')
ampleDict = cPickle.dump(ensembleResults, f)

cpath = os.path.join(runDir, 'final_results_timings.csv')
csvfile = open(cpath, 'wb')
csvwriter = csv.writer(csvfile,
                       delimiter=',',
                       quotechar='"',
                       quoting=csv.QUOTE_MINIMAL)

header = False
for r in ensembleResults:
    if not header:
        #csvwriter.writerow( r.titlesAsList() )
        csvwriter.writerow(r.valueAttrAsList())