Exemple #1
0
def sweep_runs():
    runs = sql.query('select * from runs where description like "bee/%"')
    runs = runs[runs.width*runs.depth <= 1024] # Bigger than this blows out my memory. Think there's a leak somewhere.
    runs = runs.sample(frac=1)

    set_start_method('spawn', True)
    for i, run in enumerate(runs.run.unique()):
        snaps = sql.query('select * from snaps where run == ?', run)
        with parallel.parallel(evaluate, N=2, executor='cuda', desc=str(i)) as pool:
            pool.wait([pool(run, idx, 64, 1/16, perf=False) for idx in snaps.idx.unique()])
Exemple #2
0
def evaluate(run, idx, nodes, c_puct, perf=True):
    snap_id = sql.query_one('select id from snaps where run == ? and idx == ?', run, int(idx)).id
    extant = sql.query('select * from agents where snap == ? and nodes == ? and c == ?', int(snap_id), int(nodes), float(c_puct))
    if len(extant) == 0:
        log.info(f'Creating agent run="{run}", idx={idx}, nodes={nodes}, c_puct={c_puct:.3f}')
        sql.execute('insert into agents values (null, ?, ?, ?)', int(snap_id), int(nodes), float(c_puct))
        extant = sql.query('select * from agents where snap == ? and nodes == ? and c == ?', int(snap_id), int(nodes), float(c_puct))
    agent_id = extant.id.iloc[0]

    evaluate_noise_scale(agent_id)
    if perf:
        evaluate_perf(agent_id)
Exemple #3
0
def agents_opponent(agent_id):
    return sql.query('''
        select id from agents 
        where snap == (
            select snap from agents where id == ?)
        and nodes == 64 
        and c == 1./16''', int(agent_id)).id.iloc[0]
Exemple #4
0
def evaluate_noise_scale(agent_id):
    extant = sql.query('select * from noise_scales where agent_id == ?', int(agent_id))
    if len(extant) == 0:
        agent, chunk = collect(agent_id)
        gs = gradients(agent.network, chunk)

        results = pd.DataFrame([noise_scale_components(chunk, gs[k], k) for k in gs])
        results['agent_id'] = agent_id
        log.info(f'{agent_id}: {noise_scale(results.iloc[0]):.0f}')
        sql.save_noise_scale(results)
Exemple #5
0
def load():
    from analysis import data

    ags = data.load()

    noise = (sql.query('select * from noise_scales')
                .set_index(['agent_id', 'kind'])
                .pipe(lambda df: df.batch_size*df.variance/df.mean_sq)
                .unstack())

    df = pd.merge(ags, noise, left_index=True, right_index=True, how='inner')
    df['uplift'] = df.groupby('snap_id').apply(lambda g: g.elo - g.query('test_nodes == 1').elo.mean()).reset_index(0, drop=True)
    df['tree_spec'] = df.test_c.astype(str) + '/' + df.test_nodes.astype(str)
    df['params'] = df.width**2 * df.depth
    df = df.groupby('boardsize', as_index=False).apply(relative_elo).reset_index(level=0, drop=True)

    expected = sql.query('select * from snaps').groupby('run').idx.count()
    actual = df.groupby(['run', 'idx']).idx.count().groupby(level=0).count()
    df['complete'] = actual.reindex_like(expected).eq(expected).reindex(df.run.values).values

    return df
Exemple #6
0
def sweep_trees(boardsize=None):
    if boardsize is None:
        for b in RUNS:
            sweep_trees(b)
        return 
    run = RUNS[boardsize]
    snaps = sql.query('select * from snaps where run == ?', run)

    set_start_method('spawn', True)
    with parallel.parallel(evaluate, N=2, executor='cuda', desc=str(boardsize)) as pool:
        futures = {}
        for idx in snaps.idx[snaps.idx % 2 == 0].unique():
            for nodes in [1, 2, 4, 8, 16, 32, 64, 128, 256]:
                for c in [1/64, 1/32, 1/16, 1/8, 1/4, 1/2, 1.]:
                    futures[idx, nodes, c] = pool(run, idx, nodes, c)
        pool.wait(futures)
Exemple #7
0
def evaluate_perf(agent_id, n_envs=1024):
    opponent_id = agents_opponent(agent_id)
    extant = sql.query('''
        select * from trials 
        where ((black_agent == ?) and (white_agent == ?)) 
        or ((white_agent == ?) and (black_agent == ?))''', 
        int(agent_id), int(opponent_id), int(agent_id), int(opponent_id))
    games = (extant.black_wins + extant.white_wins).sum()
    if games < n_envs:
        a = stored_agent(agent_id)
        o = stored_agent(opponent_id)
        w = stored_worlds(agent_id, n_envs)

        results = common.evaluate(w, [(agent_id, a), (opponent_id, o)])

        sql.save_trials(results)
Exemple #8
0
def run():
    # Quick sanity check that adding a copy of an agent doesn't inflate Elos in any way
    boardsize = 9
    trials = sql.query('''
            select trials.* 
            from trials 
                inner join agents_details as black
                    on (trials.black_agent == black.id)
                inner join agents_details as white
                    on (trials.white_agent == white.id)
            where 
                (black.boardsize == ?) and (white.boardsize == ?) and
                (black.test_nodes == 64) and (white.test_nodes == 64)''',
                       index_col='id',
                       params=(int(boardsize), int(boardsize)))
    ws, gs = elos.symmetrize(trials)

    # Set up a copy of each agent
    N = ws.shape[0]
    ws2 = np.full((2 * N, 2 * N), np.nan)
    ws2[:N, :N] = ws
    ws2[-N:, -N:] = ws
    ws2[:N, -N:][np.diag_indices_from(ws)] = 256
    ws2[-N:, :N][np.diag_indices_from(ws)] = 256
    ws2 = pd.DataFrame(ws2)

    gs2 = np.full((2 * N, 2 * N), np.nan)
    gs2[:N, :N] = gs
    gs2[-N:, -N:] = gs
    gs2[:N, -N:][np.diag_indices_from(gs)] = 512
    gs2[-N:, :N][np.diag_indices_from(gs)] = 512
    gs2 = pd.DataFrame(gs2)

    first = elos.solve(ws, gs)

    second = elos.solve(ws2, gs2)
    second = pd.Series(second.values[:N], ws.index)

    pd.concat({
        'first': first,
        'second': second
    }, 1).sort_values('first').plot.scatter('first', 'second')
Exemple #9
0
def stored_worlds(agent_id, n_envs):
    info = sql.query('select * from agents_details where id == ?', int(agent_id)).iloc[0]
    return common.worlds(info.run, n_envs, 'cuda')
Exemple #10
0
def stored_agent(agent_id):
    info = sql.query('select * from agents_details where id == ?', int(agent_id)).iloc[0]
    agent = common.agent(info.run, info.idx, 'cuda')
    agent.kwargs['n_nodes'] = info.test_nodes
    agent.kwargs['c_puct'] = info.test_c
    return agent