Example #1
0
def load_results(exp_cfgs):
    exp_cfgs = exp_cfgs[0] + exp_cfgs[1]

    sources, targets = {}, {}
    for i, exp_cfg in enumerate(exp_cfgs):
        cwd = os.getcwd()

        batch = jobfactory.make_jobgroup([exp_cfg])
        os.chdir(os.path.expanduser(exp_cfg.meta.rootpath))

        results_hub = hub.ResultsHub(batch, kind='cov')
        src_data = results_hub.data()
        assert len(src_data) == 1

        results = {'exp_cfg': exp_cfg}
        results.update(src_data[0])

        if len(exp_cfg.exp.key[1]) == 0:
            sources[(exp_cfg.exp.env_name,
                     exp_cfg.exp.explorer_name)] = results
        else:
            if len(src_data[0]['avg']) == 0:
                print('MISSING: {}'.format(exp_cfg.exp.key))
            else:
                targets[exp_cfg.exp.key] = results

        os.chdir(cwd)

    return sources, targets
Example #2
0
def load_data(kind):
    cwd = os.getcwd()

    Ns = [1000, 2000, 5000, 10000]
    ps, avgs, stds = ({(env, d, N): [] for env in env_names for d in disturbs for N in Ns},
                      {(env, d, N): {} for env in env_names for d in disturbs for N in Ns},
                      {(env, d, N): {} for env in env_names for d in disturbs for N in Ns})
    ds = [0.001, 0.05, 0.5]
    env_keys = []

    min_avgs, min_stds = {}, {}

    for i, exp_cfg in enumerate(exp_cfgs):
        if exp_cfg.exploration.explorer.ex_1.learner.m_disturb in ds:

            batch = jobfactory.make_jobgroup([exp_cfg])
            os.chdir(os.path.expanduser(exp_cfg.meta.rootpath))

            results_hub = hub.ResultsHub(batch, kind=kind)
            data = results_hub.data()[0]

            d = exp_cfg.exploration.explorer.ex_1.learner.m_disturb
            p = exp_cfg.exploration.explorer.weights[1][0]

            for N in Ns:
                env = exp_cfg.exp.env_name
                index = data['ticks'].index(N)
                env_key = (env, d, N)
                ps[env_key].append(p)
                avgs[env_key][p] = data['avg'][index]
                stds[env_key][p] = data['std'][index]
                if p == 0:
                    env_keys.append(env_key)

            for t in data['ticks']:
                env_name = exp_cfg.exp.env_name
                env_key = (env_name, d, t)
                min_avgs.setdefault(env_key, float('inf'))
                index = data['ticks'].index(t)
                if min_avgs[env_key] >= data['avg'][index]:
                    min_avgs[env_key] = data['avg'][index]
                    min_stds[env_key] = data['std'][index]

    for env_key in env_keys:
        ps[env_key] = sorted(ps[env_key])
        avgs[env_key] = np.array([avgs[env_key][p] for p in ps[env_key]])
        stds[env_key] = np.array([stds[env_key][p] for p in ps[env_key]])

    os.chdir(cwd)

    return env_keys, ps, avgs, stds, min_avgs, min_stds
Example #3
0
def load_data(kind):
    cwd = os.getcwd()

    Ns = [1000, 2000, 5000]
    ps, avgs, stds = ({(env, d, N): [] for env in env_names for d in disturbs for N in Ns},
                      {(env, d, N): {} for env in env_names for d in disturbs for N in Ns},
                      {(env, d, N): {} for env in env_names for d in disturbs for N in Ns})
    ds = [0.001, 0.05, 0.5]
    env_keys = []

    min_avgs, min_stds = {}, {}

    for i, exp_cfg in enumerate(exp_cfgs):
        if exp_cfg.exploration.explorer.ex_1.learner.m_disturb in ds:

            batch = jobfactory.make_jobgroup([exp_cfg])
            os.chdir(os.path.expanduser(exp_cfg.meta.rootpath))

            results_hub = hub.ResultsHub(batch, kind=kind)
            data = results_hub.data()[0]

            d = exp_cfg.exploration.explorer.ex_1.learner.m_disturb
            p = exp_cfg.exploration.explorer.weights[1][0]

            for N in Ns:
                env = exp_cfg.exp.env_name
                index = data['ticks'].index(N)
                env_key = (env, d, N)
                ps[env_key].append(p)
                avgs[env_key][p] = data['avg'][index]
                stds[env_key][p] = data['std'][index]
                if p == 0:
                    env_keys.append(env_key)

            for t in data['ticks']:
                env_name = exp_cfg.exp.env_name
                env_key = (env_name, d, t)
                min_avgs.setdefault(env_key, float('inf'))
                index = data['ticks'].index(t)
                if min_avgs[env_key] >= data['avg'][index]:
                    min_avgs[env_key] = data['avg'][index]
                    min_stds[env_key] = data['std'][index]

    for env_key in env_keys:
        ps[env_key] = sorted(ps[env_key])
        avgs[env_key] = np.array([avgs[env_key][p] for p in ps[env_key]])
        stds[env_key] = np.array([stds[env_key][p] for p in ps[env_key]])

    os.chdir(cwd)

    return env_keys, ps, avgs, stds, min_avgs, min_stds
Example #4
0
def coverage_graphs(expcfgs_levels, dest='tmp.html', n_graphs=3):
    cwd = os.getcwd()
    graphs.output_file(dest)

    red = '#DF6464'

    for level in expcfgs_levels:
        for i, exp_cfg in enumerate(level):
            n = 0
            batch = jobfactory.make_jobgroup([exp_cfg])
            os.chdir(os.path.expanduser(exp_cfg.meta.rootpath))

            data_hub = hub.DataHub(batch, sensory_only=True)
            datas = data_hub.data()

            for j, data in enumerate(datas):
                if n_graphs is None or n < n_graphs:
                    for N in [200, 1000]:
                        print(exp_cfg.exploration.explorer)
                        n_reuse = exp_cfg.exploration.explorer.eras[0]
                        s_vectors = [
                            tools.to_vector(s_signal, data.s_channels)
                            for s_signal in data.s_signals
                        ][:N]

                        graphs.coverage(data.s_channels,
                                        exp_cfg.testscov.buffer_size,
                                        s_vectors=s_vectors,
                                        swap_xy=False,
                                        title_text_font_size='6pt',
                                        title='{} {}'.format(
                                            exp_cfg.exp.key, j))
                        graphs.hold(True)
                        graphs.spread(data.s_channels,
                                      s_vectors=s_vectors[n_reuse:],
                                      swap_xy=False,
                                      e_radius=2.0)
                        graphs.hold(True)
                        graphs.spread(data.s_channels,
                                      s_vectors=s_vectors[:n_reuse],
                                      swap_xy=False,
                                      e_radius=2.0,
                                      e_color=red)
                        n += 1

    os.chdir(cwd)
Example #5
0
File: replay.py Project: afcarl/phd
def dovecot_replay(exp_cfg, rep, collision_only=True, headless=False):

    batch = jobfactory.make_jobgroup([exp_cfg])
    os.chdir(os.path.expanduser(exp_cfg.meta.rootpath))

    data_hub = hub.DataHub(batch,
                           folder='/Users/fabien/research/data.hard/',
                           verbose=True)
    job_data = data_hub.data()
    job_data = job_data[rep]
    assert job_data.job.cfg.job.rep == rep

    job_cfg = job_data.jobcfg._deepcopy()
    job_cfg.job._freeze(False)

    env_cfg = job_cfg.job.env
    env_cfg = envs.catalog[env_name]
    if headless:
        env_cfg.execute.simu.ppf = 200
        env_cfg.execute.simu.headless = True
    else:
        env_cfg.execute.simu.ppf = 1
        env_cfg.execute.simu.headless = False

    env = environments.Environment.create(env_cfg)

    raw_input()
    assert job_data.observations is not None, "motor signals (observations) could not be loaded (datafiles probably missing)."
    print('obs from file: {}{}{}'.format(gfx.green,
                                         job_cfg.exploration.hardware.datafile,
                                         gfx.end))
    for step, (m_signal, s_signal) in enumerate(job_data.observations):

        if not collision_only or s_signal['push_saliency'] != 0:
            #            if s_signal['x'] > 100.0 and s_signal['y'] > 100.0:
            #if step in [896, 786, 648, 414]:
            #assert s_signal['push_saliency'] > 999
            print('{:04d} {}{}{} [recorded]'.format(step, gfx.cyan, s_signal,
                                                    gfx.end))
            feedback = env.execute(m_signal)
            print('     {}'.format(feedback['s_signal']))
        raw_input()
import dotdot
import graphs

from fig3_8_unreach_graph import ratios, avgs, stds
from figB_4_adapt_reach_grid_cluster import expcfgs_levels, windows, RESS

#Ns = [100, 1000, 2000, 5000, 10000]
Ns = [2000, 10000]
W = windows[0]

cwd = os.getcwd()
adapt_avgs = {}
adapt_stds = {}

for i, exp_cfg in enumerate(expcfgs_levels[0]):
    batch = jobfactory.make_jobgroup([exp_cfg])
    os.chdir(os.path.expanduser(exp_cfg.meta.rootpath))

    data = hub.ResultsHub(batch, kind='nn').data()[0]
    window = exp_cfg.exploration.explorer.ex_1.window
    res = exp_cfg.exploration.explorer.ex_1.res
    for N in Ns:
        print(data['ticks'])
        index = data['ticks'].index(N)
        adapt_avgs[(window, N, res)] = data['avg'][index]
        adapt_stds[(window, N, res)] = data['std'][index]

os.chdir(cwd)

# for N in Ns:
#     for w in windows:
Example #7
0
File: replay.py Project: afcarl/phd
def dovecot_replay(exp_cfg, rep, collision_only=True, headless=False):

    batch = jobfactory.make_jobgroup([exp_cfg])
    os.chdir(os.path.expanduser(exp_cfg.meta.rootpath))

    data_hub = hub.DataHub(batch, folder='/Users/fabien/research/data.sim/', verbose=True)
    job_data = data_hub.data()
    job_data = job_data[rep]
    assert job_data.job.cfg.job.rep == rep

    job_cfg = job_data.jobcfg._deepcopy()
    job_cfg.job._freeze(False)

    env_cfg = job_cfg.job.env
    env_cfg = envs.catalog[env_name]
    env_cfg.execute.prefilter = False
    env_cfg.execute.simu.calibr_check = False
    if headless:
        env_cfg.execute.simu.ppf = 200
        env_cfg.execute.simu.headless = True
    else:
        env_cfg.execute.simu.ppf = 1
        env_cfg.execute.simu.headless = False

    env = environments.Environment.create(env_cfg)

    raw_input()
    assert job_data.observations is not None, "motor signals (observations) could not be loaded (datafiles probably missing)."
    print('obs from file: {}{}{}'.format(gfx.green, job_cfg.exploration.hardware.datafile, gfx.end))

    for step, (expl, fback) in enumerate(job_data.explorations):
        s_vector = environments.tools.to_vector(fback['s_signal'], job_data.s_channels)
        print('{}:{}: {}'.format(step, s_vector[-1], expl['from']))

    step = -1
    while True:
        print('choose a step:')
        inp = raw_input()
        if inp == '':
            step += 1
        elif inp == '=':
            step += 0 # no change
        else:
            step = int(inp)

        expl, fback = job_data.explorations[step]
        m_signal = expl['m_signal']
        s_signal = fback['s_signal']

    # for step, (expl, fback) in enumerate(job_data.explorations):
    #     m_signal = expl['m_signal']
    #     s_signal = fback['s_signal']

        # print(m_signal)
        # m_signal = environments.tools.random_signal(job_data.m_channels)
        # print(m_signal)



        print('{:04d} {}'.format(step, expl['from']))

        # if step in [50, 49, 7, 19, 15]:
        # if (expl['from'] == 'goal.babbling'):
        #     if s_signal['push_saliency'] == 0:
#            if (not collision_only or s_signal['push_saliency'] == 0):

    #            if s_signal['x'] > 100.0 and s_signal['y'] > 100.0:
                #if step in [896, 786, 648, 414]:
                    #assert s_signal['push_saliency'] > 999
            # for _ in range(5):
        print('{:04d} {}{}{} [recorded]'.format(step, gfx.cyan, s_signal, gfx.end))
        feedback = env.execute(m_signal)
        print('     {}'.format(feedback['s_signal']))
Example #8
0
import graphs

from fixed_graph import load_data, disturbs
from ddmab_cluster import expcfgs_levels, cfg


Ns = [5000]

mbratio_env_keys, ps, avgs, stds, min_avgs, min_stds = load_data('cov')

cwd = os.getcwd()
adapt_avgs = {}
adapt_stds = {}

for i, exp_cfg in enumerate(expcfgs_levels[0]):
    batch = jobfactory.make_jobgroup([exp_cfg])
    os.chdir(os.path.expanduser(exp_cfg.meta.rootpath))

    data = hub.ResultsHub(batch, kind=KIND).data()[0]
    window    = exp_cfg.exploration.explorer.window
    d         = exp_cfg.exploration.explorer.ex_1.learner.m_disturb

    # for N in cfg.testsnn.ticks:
    for N in Ns:
        index = data['ticks'].index(N)
        adapt_avgs[(d, N)] = data['avg'][index]
        adapt_stds[(d, N)] = data['std'][index]

os.chdir(cwd)