示例#1
0
def outside_grow(spec, min_radius, i_merge_bblock, **kw):
    bbspec0, crit0 = spec
    bbspec = deepcopy(bbspec0[crit0.from_seg :])
    bbspec[0][1] = "_" + bbspec[0][1][1]
    print("outside", bbspec)

    if os.path.exists("outer.pickle"):
        with open("outer.pickle", "rb") as inp:
            ssdag, rslt, crit = pickle.load(inp)
    else:
        crit = Cyclic(crit0.nfold, min_radius=min_radius)
        ssdag = simple_search_dag(
            bbspec, modbbs=modsinglebb((0, -1), i_merge_bblock), **kw
        )
        rslt = grow_linear(
            ssdag,
            loss_function=crit.jit_lossfunc(),
            loss_threshold=1.0,
            last_bb_same_as=crit.from_seg,
            **kw,
        )
        with open("outer.pickle", "wb") as out:
            pickle.dump((ssdag, rslt, crit), out)

    return ssdag, rslt, crit
示例#2
0
def inside_grow(spec, binner, table, i_merge_bblock, **kw):
    bbspec0, crit0 = spec
    bbspec = deepcopy(bbspec0[:crit0.from_seg + 1])
    bbspec[-1][1] = bbspec[-1][1][0] + '_'
    print('inside', bbspec)

    if os.path.exists('inner.pickle'):
        with open('inner.pickle', 'rb') as inp:
            ssdag, rslt = pickle.load(inp)
    else:
        ssdag = simple_search_dag(bbspec, modbbs=modsinglebb((-1, ), i_merge_bblock),
            **kw)
        rslt = grow_linear(
            ssdag,
            loss_function=_hash_lossfunc(binner, table, crit0.nfold),
            # loss_function=lossfunc_rand_1_in(1000),
            loss_threshold=1.0,
            **kw
        )
        with open('inner.pickle', 'wb') as out:
            pickle.dump((ssdag, rslt), out)
    return ssdag, rslt
示例#3
0
def worm_grow_3(bbdb,
                spdb,
                nbblocks=10,
                shuffle_bblocks=0,
                parallel=1,
                verbosity=1,
                monte_carlo=0,
                clash_check=0,
                dump_pdb=0,
                cache_sync=0.001):
    if clash_check < dump_pdb: clash_check = dump_pdb * 100
    ttot = time()

    ssdag, tdb, tvertex, tedge = simple_search_dag(
        [
            ('C3_N', '_N'),
            ('Het:NCy', 'C_'),
            # ('Het:CCC', 'C_'),
            # ('Het:NN', 'NN'),
            # ('Het:CC', 'CC'),
            # ('Het:NNX', 'N_'),
        ],
        (bbdb, spdb),
        nbblocks=nbblocks,
        timing=True,
        verbosity=verbosity,
        parallel=parallel,
        cache_sync=cache_sync)

    # crit = Cyclic(3, from_seg=2, origin_seg=0)
    # crit = Cyclic(3)
    # last_bb_same_as = crit.from_seg
    crit = NullCriteria()
    lf = crit.jit_lossfunc()
    last_bb_same_as = -1

    tgrow = time()
    rslt = grow_linear(
        ssdag,
        # loss_function=lf,
        loss_function=lossfunc_rand_1_in(1000),
        parallel=parallel,
        loss_threshold=1.0,
        last_bb_same_as=last_bb_same_as,
        monte_carlo=monte_carlo)
    tgrow = time() - tgrow

    Nres = len(rslt.err)
    Ntot = np.prod([v.len for v in ssdag.verts])
    logtot = np.log10(Ntot)
    print('frac last_bb_same_as',
          rslt.stats.n_last_bb_same_as[0] / rslt.stats.total_samples[0])
    Nsparse = int(rslt.stats.total_samples[0])
    Nsparse_rate = int(Nsparse / tgrow)
    ttot = time() - ttot
    if len(rslt.idx) == 0: frac_redundant = 0
    else: frac_redundant = rslt.stats.n_redundant_results[0] / len(rslt.idx)
    print(
        f' worm_grow_3 {nbblocks:4} {ttot:7.1f}s {Nres:9,} logtot{logtot:4.1f} tv'
        f' {tvertex:7.1f}s te {tedge:7.1f}s tg {tgrow:7.1f}s {Nsparse:10,} {Nsparse_rate:7,}/s {frac_redundant:4.1f}'
    )
    if len(rslt.err):
        print('err 0 25 50 75 100',
              np.percentile(rslt.err, (0, 25, 50, 75, 100)))
    sys.stdout.flush()

    if not clash_check: return

    tclash = time()
    norig = len(rslt.idx)
    # rslt = prune_clashes(
    # ssdag, crit, rslt, at_most=clash_check, thresh=4.0, parallel=parallel
    # )
    print(
        'pruned clashes, %i of %i remain,' %
        (len(rslt.idx), min(clash_check, norig)), 'took',
        time() - tclash, 'seconds')

    for i, idx in enumerate(rslt.idx[:10]):
        graph_dump_pdb('graph_%i_nojoin.pdb' % i,
                       ssdag,
                       idx,
                       rslt.pos[i],
                       join=0)
        # graph_dump_pdb('graph_%i.pdb' % i, ssdag, idx, rslt.pos[i])

    return

    if len(rslt.idx) > 0:
        tpdb = time()
        exe = cf.ThreadPoolExecutor if parallel else InProcessExecutor
        with exe(max_workers=3) as pool:
            futures = list()
            for i in range(min(dump_pdb, len(rslt.idx))):
                kw = dict(
                    bbdb=bbdb,
                    ssdag=ssdag,
                    # crit=crit,
                    i=i,
                    indices=rslt.idx[i],
                    positions=rslt.pos[i],
                    only_connected=False,
                )
                futures.append(pool.submit(_dump_pdb, **kw))
            [f.result() for f in futures]
        print('dumped %i structures' % min(dump_pdb, len(rslt.idx)), 'time',
              time() - tpdb)