示例#1
0
def main():
  dlgs = glob('*.dlg')

  if len(dlgs) == 0:
    sys.exit(1)

  if os.path.exists('target') is False:
    os.mkdir('target')

  ligands = []
  receptors = []

  for filename in dlgs:
    dlg=filename[:-4]
    (lig, rec) = dlg.split('--____--')
    if not lig in ligands:
      ligands.append(lig)
    if not rec in receptors:
      receptors.append(rec)
  
  res = [[] for i in ligands]
  for i in range(len(res)):
    res[i] = ['' for j in receptors]

  for filename in dlgs:
    dlg=filename[:-4]
    (lig, rec) = dlg.split('--____--')

    d = Docking()
    d.readDlg(filename)

    lines = d.ligMol.parser.allLines
    for i in range(len(lines)):
      line = lines[i]
      if line.find("\n") == -1:
        d.ligMol.parser.allLines[i] = line + "\n"

    if not hasattr(d, 'clusterer'):
      d.clusterer = Clusterer(d.ch.conformations, sort="binding_energy")

    clist = [d.clusterer.data[int(i)] for i in d.clusterer.argsort]

    energy = clist[0].binding_energy

    res[ligands.index(lig)][receptors.index(rec)] = "%.2f" % energy

    d.ligMol.parser.allLines = d.ligMol.parser.write_with_new_coords(clist[0].getCoords())

    write_pdb(d.ligMol.parser.parse(), 'target/%s.pdb' % dlg)

  res_file = open('target/result.txt', 'w')
  print>>res_file, ",%s" % ",".join(receptors)
  for i in range(len(res)):
    print>>res_file, "%s,%s" % (ligands[i], ",".join(res[i]))
  res_file.close()
def parse_dlg(dlgfilename, num, result):
    global VERBOSE

    d = Docking()
    d.readDlg(dlgfilename)

    if num > 1:
        workdir = outdir + '/' + d.ligMol.name
        if os.path.exists(workdir) is False:
            os.mkdir(workdir)
    if num == 1:
        workdir = outdir

    if num > 1:
        log = {'name': d.ligMol.name, 'num': num, 'target': workdir}
        if VERBOSE:
            print log
        #print>>result, log

    lines = d.ligMol.parser.allLines
    for i in range(len(lines)):
        line = lines[i]
        if line.find("\n") == -1:
            d.ligMol.parser.allLines[i] = line + "\n"

    if not hasattr(d, 'clusterer'):
        d.clusterer = Clusterer(d.ch.conformations, sort='binding_energy')

    clist = []

    for i in d.clusterer.argsort:
        clist.append(d.clusterer.data[int(i)])

    energy_list = []
    for i in range(0, num):
        conf = clist[i]
        outfile = workdir + '/' + d.ligMol.name + '_' + str(i + 1) + '.pdb'
        energy = conf.binding_energy
        energy_list.append("%.2f" % energy)

        mylog = {'name': outfile, 'energy': energy}
        if VERBOSE:
            print mylog

        d.ligMol.parser.allLines = d.ligMol.parser.write_with_new_coords(
            conf.getCoords())

        write_pdb(d.ligMol.parser.parse(), outfile)

    print >> result, "%s,%s" % (d.ligMol.name, ','.join(energy_list))
        if o in ('-v', '--v'):
            verbose = True
            if verbose: print 'set verbose to ', True
        if o in ('-h', '--'):
            usage()
            sys.exit()

    #read all the docking logs in current directory, one by one
    dlg_list = glob.glob('./*.dlg')
    dockings = []
    #build a list of all atom types in all dlgs
    #it is assumed that all the dockings used the same grids
    ctr = 0
    at_types = {}
    for dlg in dlg_list:
        d = Docking()
        d.readDlg(dlg)
        ctr += 1
        print ctr, ": read ", dlg
        dockings.append(d)
        for a in d.ligMol.allAtoms:
            at_types[a.autodock_element] = 0
    if debug: print 'at_types=', at_types.keys()
    d = dockings[0]  #get grid info from the first docking
    xcen, ycen, zcen = d.dlo_list[0].parser.center_pt
    #for the output maps...
    # nxgrid=nygrid=nzgrid=npts  ??is this required??
    nxpts = nypts = nzpts = int(num_pts) / 2 * 2 + 1  #ensure an odd integer
    npts = (nxpts, nypts, nzpts)
    macroStem = d.dlo_list[0].macroStem
    # build list of all atom types
示例#4
0
            if verbose: print 'set verbose to ', True
        if o in ('-h', '--'):
            usage()
            sys.exit()

    if not  directory:
        print 'summarize_wcg_docking: directory must be specified.'
        usage()
        sys.exit()

    #read all the xml docking logs in as one Docking
    
    xml_list = glob.glob('*.xml')
    print "xml_list =", xml_list
    p = XMLParser()
    d = Docking(parser=p)
    for xml_file in xml_list:
        print "calling readXMLResults with", xml_file
        d.readXMLResults(xml_file, dpf = dpf_fn)
    ligMol = d.ligMol
    ligAts = ligMol.allAtoms
    #setup rmsd tool
    coords = ligAts.coords[:]
    atom_ct = len(ligAts)
    torsion_ct = len(ligMol.torTree.torsionMap)
    tors_penalty = torsion_ct * 0.2744

    cl = Clusterer(d.ch.conformations)
    d.clusterer = cl
    cl.make_clustering(rms_tolerance)
    ref_coords = cl.clustering_dict[rms_tolerance][0][0].getCoords()[:]
示例#5
0
    if not refdirectory:
        msg += 'reference directory must be specified!\n'
        if not directory:
            msg = 'Both reference directory and directory must be specified!\n'
    elif not directory:
        msg += 'directory must be specified!'

    if not refdirectory or not directory:
        print(msg)
        usage()
        sys.exit()

    #process docking in reference directory
    #read all the docking logs in reference directory as one Docking
    ref_dlg_list = glob.glob(refdirectory + '/*.dlg')
    ref_d = Docking()
    for dlg in ref_dlg_list:
        ref_d.readDlg(dlg)
    #setup rmsd tool
    coords = ref_d.ligMol.allAtoms.coords[:]
    ref_d.clusterer.rmsTool = RMSDCalculator(coords)
    ref_d.clusterer.make_clustering(rms_tolerance)
    clust0 = ref_d.clusterer.clustering_dict[rms_tolerance][0]
    c = clust0[0]  #lowest energy overall in reference docking
    ref_d.ch.set_conformation(c)
    ref_LE_coords = ref_d.ligMol.allAtoms.coords[:]
    ref_largest = clust0
    for clust in ref_d.clusterer.clustering_dict[rms_tolerance]:
        if verbose: print("current largest cluster len= ", len(clust))
        if len(clust) > len(ref_largest):
            if verbose: print("resetting largest clust: now len=", len(clust))