Esempio n. 1
0
def progress():
    active_jobs = jittens.jobs.jobs('active')
    active_runs = runs.pandas()._env.dropna().apply(lambda p: p.get(
        'JITTENS_NAME', '') in active_jobs).pipe(lambda s: s.index[s])
    keys = runs.pandas().loc[active_runs, 'params'].apply(
        lambda p: (p['boardsize'], p['width'], p['depth']))
    return data.load_field(
        'elo-mohex',
        'μ').resample('1min').mean().bfill().notnull().sum().reindex(
            keys.values)
Esempio n. 2
0
def board_runs(boardsize=9):
    import pandas as pd
    from pavlov import stats, runs
    import matplotlib.pyplot as plt

    # date of the first 9x9 run
    valid = runs.pandas().query(f'_created > "2020-12-23 09:52Z" & boardsize == {boardsize} & parent == ""')

    results = {}
    for name in valid.index:
        if stats.exists(name, 'elo-mohex'):
            s = stats.pandas(name, 'elo-mohex')
            if len(s) > 60 and (s.notnull().sum() > 15).any():
                results[name] = s.μ
    df = pd.concat(results, 1)
    smoothed = df.ffill(limit=3).where(df.bfill().notnull()).iloc[3:].head(900)

    with plt.style.context('seaborn-poster'):
        ax = smoothed.plot(cmap='viridis_r', legend=False, linewidth=1.5)
        ax.set_facecolor('whitesmoke')
        ax.grid(axis='y')
        ax.set_ylim(None, 0)
        ax.set_ylabel('eElo')
        ax.set_title(f'all runs on {boardsize}x{boardsize} boards')

    return smoothed
Esempio n. 3
0
def run_data():
    r = runs.pandas().loc[lambda df: df._created >= FIRST_RUN]
    params = r.params.dropna().apply(pd.Series).reindex(r.index)
    insert = pd.concat([
        r.index.to_series().to_frame('run'), r[['description']],
        params[['boardsize', 'width', 'depth', 'nodes']]
    ], 1)
    insert['nodes'] = insert.nodes.fillna(64)
    return insert.reset_index(drop=True)
Esempio n. 4
0
def acknowledged(desc):
    fresh = [j.params for j in jittens.jobs.jobs('fresh').values()]
    active = [j.params for j in jittens.jobs.jobs('active').values()]

    rs = runs.pandas().loc[lambda df: df.description == desc]
    fetched = [
        ast.literal_eval(r['JITTENS_PARAMS']) for _, r in rs._env.iteritems()
    ]

    return fresh + active + fetched
Esempio n. 5
0
def acknowledged(desc):
    fresh = [j.params for j in jittens.jobs.jobs('fresh').values()]
    active = [j.params for j in jittens.jobs.jobs('active').values()]

    fetched = runs.pandas(description=desc)
    if fetched.size:
        fetched = fetched.params.values.tolist()
    else:
        fetched = []

    return fresh + active + fetched
Esempio n. 6
0
def convert_all():
    from pavlov import runs
    for i in runs.pandas().query(
            '_created <= "2020-12-27 18:49:04+00:00"').index:
        runs.delete(i)

    for p in Path('output/traces').iterdir():

        n_saved = len(list(p.glob('**/*.pkl')))
        if (n_saved - 1) / 4 > 1:
            convert(p.name)
Esempio n. 7
0
def update():
    from pavlov import runs
    import git

    rows = runs.pandas().query('tag.notnull()')
    repo = git.Repo('.')
    for run, row in rows.iterrows():
        print(run)
        if not files.path(run, 'archive.tar.gz').exists():
            repo.git.checkout(f'tags/pavlov_{row.tag}')
            archive(run)

    for run, row in rows.iterrows():
        with runs.update(run) as i:
            del i['tag']
Esempio n. 8
0
def load_field(*args, key=('boardsize', 'width', 'depth')):
    rs = runs.pandas().loc[lambda df: df.description.fillna('').str.startswith('main/')].index

    head, tail = [], []
    for r in rs:
        try:
            tail.append(stats.pandas(r, *args))
            d = ast.literal_eval(runs.info(r)['_env']['JITTENS_PARAMS'])
            head.append(tuple(d[f] for f in key))
        except Exception as e:
            log.info(f'Failed to load {r}: {e}')
            
    df = pd.DataFrame(tail, index=pd.MultiIndex.from_tuples(head)).T.sort_index(axis=1)
    df.columns.names = key

    return df.mean(axis=1, level=[0, 1, 2])
Esempio n. 9
0
def test_evaluator():
    from pavlov import runs, storage
    from boardlaw.arena import common

    n_envs_per = 512
    df = runs.pandas(description='cat/nodes')

    names = []
    for r in df.index:
        snaps = storage.snapshots(r)
        for i in snaps:
            names.append((r, i))
    names = names[:12]

    games = pd.DataFrame(0, names, names)

    from IPython import display

    start = time.time()
    results = []
    moves, matches = 0, 0
    for rs, stats in evaluate(worldfunc,
                              agentfunc,
                              games,
                              chunksize=4,
                              n_envs_per=n_envs_per):
        results.extend(rs)
        moves += sum(r.moves for r in rs)
        matches += len(rs)

        duration = time.time() - start
        display.clear_output(wait=True)
        print(
            f'{moves/duration:.0f} moves/s, {60*matches/duration:.0f} matches/min'
        )

    from collections import defaultdict
    counts = defaultdict(lambda: 0)
    for r in results:
        counts[r.names] += r.games

    assert len(counts) == len(names) * (len(names) - 1)
    assert set(counts.values()) == {n_envs_per}
Esempio n. 10
0
def test_chunk_evaluator():
    from pavlov import runs, storage
    from boardlaw.arena import common

    df = runs.pandas(description='cat/nodes')
    agents = {}
    for r in df.index:
        snaps = storage.snapshots(r)
        for i in snaps:
            agents[f'{r}.{i}'] = common.agent(r, i, 'cuda')
    agents = {k: agents[k] for k in list(agents)[:100]}

    worldfunc = lambda n_envs: common.worlds(
        df.index[0], n_envs, device='cuda')
    evaluator = ChunkEvaluator(worldfunc, agents, 512)

    from IPython import display

    results = []
    while not evaluator.finished():
        results.extend(evaluator.step())

        display.clear_output(wait=True)
        evaluator.report()