示例#1
0
def outside_grow(spec, min_radius, i_merge_bblock, **kw):
    bbspec0, crit0 = spec
    bbspec = deepcopy(bbspec0[crit0.from_seg :])
    bbspec[0][1] = "_" + bbspec[0][1][1]
    print("outside", bbspec)

    if os.path.exists("outer.pickle"):
        with open("outer.pickle", "rb") as inp:
            ssdag, rslt, crit = pickle.load(inp)
    else:
        crit = Cyclic(crit0.nfold, min_radius=min_radius)
        ssdag = simple_search_dag(
            bbspec, modbbs=modsinglebb((0, -1), i_merge_bblock), **kw
        )
        rslt = grow_linear(
            ssdag,
            loss_function=crit.jit_lossfunc(),
            loss_threshold=1.0,
            last_bb_same_as=crit.from_seg,
            **kw,
        )
        with open("outer.pickle", "wb") as out:
            pickle.dump((ssdag, rslt, crit), out)

    return ssdag, rslt, crit
示例#2
0
def inside_grow(spec, binner, table, i_merge_bblock, **kw):
    bbspec0, crit0 = spec
    bbspec = deepcopy(bbspec0[:crit0.from_seg + 1])
    bbspec[-1][1] = bbspec[-1][1][0] + '_'
    print('inside', bbspec)

    if os.path.exists('inner.pickle'):
        with open('inner.pickle', 'rb') as inp:
            ssdag, rslt = pickle.load(inp)
    else:
        ssdag = simple_search_dag(bbspec, modbbs=modsinglebb((-1, ), i_merge_bblock),
            **kw)
        rslt = grow_linear(
            ssdag,
            loss_function=_hash_lossfunc(binner, table, crit0.nfold),
            # loss_function=lossfunc_rand_1_in(1000),
            loss_threshold=1.0,
            **kw
        )
        with open('inner.pickle', 'wb') as out:
            pickle.dump((ssdag, rslt), out)
    return ssdag, rslt
示例#3
0
def worm_grow_3(bbdb,
                spdb,
                nbblocks=10,
                shuffle_bblocks=0,
                parallel=1,
                verbosity=1,
                monte_carlo=0,
                clash_check=0,
                dump_pdb=0,
                cache_sync=0.001):
    if clash_check < dump_pdb: clash_check = dump_pdb * 100
    ttot = time()

    ssdag, tdb, tvertex, tedge = simple_search_dag(
        [
            ('C3_N', '_N'),
            ('Het:NCy', 'C_'),
            # ('Het:CCC', 'C_'),
            # ('Het:NN', 'NN'),
            # ('Het:CC', 'CC'),
            # ('Het:NNX', 'N_'),
        ],
        (bbdb, spdb),
        nbblocks=nbblocks,
        timing=True,
        verbosity=verbosity,
        parallel=parallel,
        cache_sync=cache_sync)

    # crit = Cyclic(3, from_seg=2, origin_seg=0)
    # crit = Cyclic(3)
    # last_bb_same_as = crit.from_seg
    crit = NullCriteria()
    lf = crit.jit_lossfunc()
    last_bb_same_as = -1

    tgrow = time()
    rslt = grow_linear(
        ssdag,
        # loss_function=lf,
        loss_function=lossfunc_rand_1_in(1000),
        parallel=parallel,
        loss_threshold=1.0,
        last_bb_same_as=last_bb_same_as,
        monte_carlo=monte_carlo)
    tgrow = time() - tgrow

    Nres = len(rslt.err)
    Ntot = np.prod([v.len for v in ssdag.verts])
    logtot = np.log10(Ntot)
    print('frac last_bb_same_as',
          rslt.stats.n_last_bb_same_as[0] / rslt.stats.total_samples[0])
    Nsparse = int(rslt.stats.total_samples[0])
    Nsparse_rate = int(Nsparse / tgrow)
    ttot = time() - ttot
    if len(rslt.idx) == 0: frac_redundant = 0
    else: frac_redundant = rslt.stats.n_redundant_results[0] / len(rslt.idx)
    print(
        f' worm_grow_3 {nbblocks:4} {ttot:7.1f}s {Nres:9,} logtot{logtot:4.1f} tv'
        f' {tvertex:7.1f}s te {tedge:7.1f}s tg {tgrow:7.1f}s {Nsparse:10,} {Nsparse_rate:7,}/s {frac_redundant:4.1f}'
    )
    if len(rslt.err):
        print('err 0 25 50 75 100',
              np.percentile(rslt.err, (0, 25, 50, 75, 100)))
    sys.stdout.flush()

    if not clash_check: return

    tclash = time()
    norig = len(rslt.idx)
    # rslt = prune_clashes(
    # ssdag, crit, rslt, at_most=clash_check, thresh=4.0, parallel=parallel
    # )
    print(
        'pruned clashes, %i of %i remain,' %
        (len(rslt.idx), min(clash_check, norig)), 'took',
        time() - tclash, 'seconds')

    for i, idx in enumerate(rslt.idx[:10]):
        graph_dump_pdb('graph_%i_nojoin.pdb' % i,
                       ssdag,
                       idx,
                       rslt.pos[i],
                       join=0)
        # graph_dump_pdb('graph_%i.pdb' % i, ssdag, idx, rslt.pos[i])

    return

    if len(rslt.idx) > 0:
        tpdb = time()
        exe = cf.ThreadPoolExecutor if parallel else InProcessExecutor
        with exe(max_workers=3) as pool:
            futures = list()
            for i in range(min(dump_pdb, len(rslt.idx))):
                kw = dict(
                    bbdb=bbdb,
                    ssdag=ssdag,
                    # crit=crit,
                    i=i,
                    indices=rslt.idx[i],
                    positions=rslt.pos[i],
                    only_connected=False,
                )
                futures.append(pool.submit(_dump_pdb, **kw))
            [f.result() for f in futures]
        print('dumped %i structures' % min(dump_pdb, len(rslt.idx)), 'time',
              time() - tpdb)
示例#4
0
def make_peace(spec, cart_resl, ori_resl, clash_check, dump_pdb, **kw):
    binner = gu_xbin_indexer(cart_resl, ori_resl)
    numba_binner = numba_xbin_indexer(cart_resl, ori_resl)
    bbdb = kw["db"][0]
    bbspec, crit = spec

    ################ outer cycle ###############

    touter = time()
    ot_graph, ot_rslt, ot_crit = outside_grow(spec, **kw)
    rescore = ot_crit.score(ot_rslt.pos.swapaxes(0, 1))
    assert np.max(rescore[rescore < 9999]) <= 1.0
    ot_rslt = subset_result(ot_rslt, rescore <= 1.0)
    ntot = len(ot_rslt.idx)
    keys, hash_table = _make_hash_table(ot_graph, ot_rslt, binner)
    print(
        "  nresult outer",
        len(ot_rslt.idx),
        "unique hashes",
        len(set(keys)),
        f"{int(ot_rslt.stats.total_samples[0] / (time() - touter)):,}/s",
        f"redundancy {ot_rslt.stats.n_redundant_results[0]/len(ot_rslt.idx):5.2f}",
    )

    ################ spokes ###############

    tinner = time()
    in_graph, in_rslt = inside_grow(spec,
                                    binner=numba_binner,
                                    table=hash_table,
                                    **kw)
    print(
        "  nresults inner",
        len(in_rslt.idx),
        f"rate {int(in_rslt.stats.total_samples[0] / (time() - tinner)):,}/s"
        f"redundancy {in_rslt.stats.n_redundant_results[0]/len(in_rslt.idx):5.2f}",
    )

    ################ merged ###############

    ssdag = simple_search_dag(
        bbspec,
        modbbs=modsinglebb((spec[1].from_seg, ), kw["i_merge_bblock"]),
        make_edges=False,
        **kw,
    )
    print("whole:", spec[0])
    rslt, imerge = merge_results(ssdag, crit, in_rslt, in_graph, ot_rslt,
                                 ot_graph, binner, hash_table)
    ntot = len(rslt.idx)
    tclash = time()
    rslt = prune_clashes(ssdag, crit, rslt, at_most=10000, thresh=3.0, **kw)
    tclash = time() - tclash
    print("  nresults", len(rslt.idx), "withclash", ntot, "clashrate",
          ntot / tclash)

    symdata = util.get_symdata("C" + str(crit.nfold))
    for i in range(min(999, len(rslt.idx))):
        pose = make_pose_crit(bbdb,
                              ssdag,
                              crit,
                              rslt.idx[i],
                              rslt.pos[i],
                              only_connected="auto")
        ros.core.util.switch_to_residue_type_set(pose, "centroid")
        ros.core.pose.symmetry.make_symmetric_pose(pose, symdata)
        pose.dump_pdb("whole_%03i.pdb" % i)