def usage_graph(ex_cfg, explorations, weight_history, title='no title'): colors = [graphs.BLUE, graphs.PINK] window = 100.0 uuid_history = tuple(e[0]['uuid'] for e in explorations) for i, weights in enumerate(weight_lists): ex_uuid = weight_history['ex_uuids'][i] ex_name = weight_history['ex_names'][i] usage = [t_usage(window, t, ex_uuid, uuid_history) for t in range(len(uuid_history))] graphs.perf_std(range(len(usage)), usage, [0.0 for _ in usage], color=colors[i], alpha=0.5, plot_width=1000, plot_height=300, title='usage: {}'.format(title), y_range=(0.0, 1.0)) graphs.hold(True) graphs.hold(False)
def coverage_graphs(expcfgs_levels, dest='tmp.html', n_graphs=3): cwd = os.getcwd() graphs.output_file(dest) red = '#DF6464' for level in expcfgs_levels: for i, exp_cfg in enumerate(level): n = 0 batch = jobfactory.make_jobgroup([exp_cfg]) os.chdir(os.path.expanduser(exp_cfg.meta.rootpath)) data_hub = hub.DataHub(batch, sensory_only=True) datas = data_hub.data() for j, data in enumerate(datas): if n_graphs is None or n < n_graphs: for N in [200, 1000]: print(exp_cfg.exploration.explorer) n_reuse = exp_cfg.exploration.explorer.eras[0] s_vectors = [ tools.to_vector(s_signal, data.s_channels) for s_signal in data.s_signals ][:N] graphs.coverage(data.s_channels, exp_cfg.testscov.buffer_size, s_vectors=s_vectors, swap_xy=False, title_text_font_size='6pt', title='{} {}'.format( exp_cfg.exp.key, j)) graphs.hold(True) graphs.spread(data.s_channels, s_vectors=s_vectors[n_reuse:], swap_xy=False, e_radius=2.0) graphs.hold(True) graphs.spread(data.s_channels, s_vectors=s_vectors[:n_reuse], swap_xy=False, e_radius=2.0, e_color=red) n += 1 os.chdir(cwd)
def adapt_graphs(ex_cfg, explorations, s_vectors, weight_history, mesh=None, title='no title'): s_channels = ex_cfg.s_channels tally_dict = {} for exploration, feedback in explorations: tally_dict.setdefault(exploration['from'], 0) tally_dict[exploration['from']] += 1 print(tally_dict) if ex_cfg.div_algorithm == 'hyperball': graphs.coverage(s_channels, ex_cfg.threshold, s_vectors=s_vectors, title=title) else: assert ex_cfg.div_algorithm == 'grid' if mesh is not None: graphs.mesh(mesh, title=title) graphs.hold(True) graphs.spread(s_channels, s_vectors=s_vectors, e_radius=1.25, e_alpha=0.75) # interest measure colors = [graphs.BLUE, graphs.PINK] weight_lists = zip(*weight_history['data']) for i, weights in enumerate(weight_lists): graphs.perf_std(range(len(weights)), weights, [0.0 for _ in weights], legend=weight_history['ex_names'][i], color=colors[i], alpha=0.5, plot_width=1000, plot_height=300, title='diversity: {}'.format(title)) graphs.hold(True) graphs.hold(False) # # usage with sliding window window = 100.0 uuid_history = tuple(e[0]['uuid'] for e in explorations) for i, weights in enumerate(weight_lists): ex_uuid = weight_history['ex_uuids'][i] ex_name = weight_history['ex_names'][i] usage = [t_usage(window, t, ex_uuid, uuid_history) for t in range(len(uuid_history))] graphs.perf_std(range(len(usage)), usage, [0.0 for _ in usage], color=colors[i], alpha=0.5, plot_width=1000, plot_height=300, title='usage: {}'.format(title), y_range=(0.0, 1.0)) graphs.hold(True) graphs.hold(False) # print('{} {:.3f}'.format(env_name, np.average(errors))) return tally_dict
def perf_graphs(sources, targets, mb_steps=200, y_maxs=(360000, 200000)): already = False for (env_name, ex_name), results in targets.items(): exp_cfg = results['exp_cfg'] if not exp_cfg.exp.explorer_name.startswith('reuse.random'): if len(exp_cfg.exp.key[1]) != 0: if not already: already = False src_results = sources[(exp_cfg.exp.env_name, 'random.goal_{}'.format(mb_steps))] y_max = y_maxs[0] if exp_cfg.exp.env_name.startswith( 'dov_ball') else y_maxs[1] graphs.perf_astd(src_results['ticks'], src_results['avg'], src_results['astd'], color=graphs.NOREUSE_COLOR, plot_width=1000, plot_height=500, x_range=(0, exp_cfg.job.steps), y_range=(0, y_max), title='{}'.format(exp_cfg.exp.key)) graphs.hold(True) print('{}::{} {}'.format(exp_cfg.exp.env_name, exp_cfg.exp.explorer_name, results['avg'][-1])) graphs.bokeh_astds(results['ticks'], results['avg'], results['astd'], color=graphs.REUSE_COLOR) graphs.hold(True) random_key = randomit(exp_cfg.exp.key) if random_key in targets: random_results = targets[random_key] graphs.perf_astd(random_results['ticks'], random_results['avg'], random_results['astd'], color=graphs.RANDREUSE_COLOR) graphs.hold(False)
for ex_name in ['random.goal']: random.seed(0) # instanciating the environment env_name, env_cfg = envs.kin(dim=DIM, limit=LIMIT) env = environments.Environment.create(env_cfg) # instanciating the explorer ex_cfg = exs.catalog[ex_name]._deepcopy() ex_cfg.m_channels = env.m_channels ex_cfg.s_channels = env.s_channels ex = explorers.Explorer.create(ex_cfg) # running exploration explorations, s_vectors, s_goals = factored.run_exploration(env, ex, N, verbose=True) # making graphs for t1, t2 in [(0, 100), (100, N)]: alpha = 1.0 if t2 == 100 else 0.25 graphs.posture_random(env, explorations[t1:t2], n=10, alpha=0.75, radius_factor=0.35) graphs.hold(True) graphs.bokeh_spread(env.s_channels, s_vectors=s_vectors[:t2], e_radius=1.5, e_alpha=alpha, x_range=(-1.05, 1.05), y_range=(-1.05, 1.05), title='{}::{}'.format(ex_name, env_name)) graphs.show()
# Graph of reused effects in first env s_vectors0 = [] for explo in explorations[:MB]: feedback = env0.execute(explo[0]['m_signal']) s_vectors0.append( explorers.tools.to_vector(feedback['s_signal'], env0.s_channels)) graphs.spread(env.s_channels, s_vectors=s_vectors0[:MB], e_radius=3.0, e_alpha=1.0, e_color='#DF6464', title='first arm - reused effects') for e in explorations[:MB]: graphs.hold(True) graphs.posture_signals(env0, [e[0]['m_signal']], alpha=ARM_ALPHA, radius_factor=0.75) graphs.hold(True) graphs.spread(env.s_channels, s_vectors=s_vectors0[:MB], grid=False, e_radius=3.0, e_alpha=1.0, e_color='#DF6464') graphs.hold(False) # Graph Reuse for t in [MB, 200, 400, N]:
min_avgs[env_key] = data['avg'][index] min_stds[env_key] = data['std'][index] for env_key in env_keys: ps[env_key] = sorted(ps[env_key]) avgs[env_key] = np.array([avgs[env_key][p] for p in ps[env_key]]) stds[env_key] = np.array([stds[env_key][p] for p in ps[env_key]]) os.chdir(cwd) return env_keys, ps, avgs, stds, min_avgs, min_stds if __name__ == '__main__': env_keys, ps, avgs, stds, min_avgs, min_stds = load_data('nn') colors = [graphs.BLUE, graphs.PINK, graphs.GREEN] graphs.output_file('fixed_graph.html') env_displayed = [env_key for env_key in env_keys if env_key[0] == 'kin20_150' and env_key[2] == 5000] for color, envd in zip(colors, env_displayed): print(avgs[envd]) y_max = max(avgs[envd] + stds[envd]) graphs.perf_std_discrete(ps[envd], avgs[envd], stds[envd], std_width=0.0035, color=color, y_range=[0.0, y_max+0.02], plot_width=1000, plot_height=500, title='{} {}'.format(*envd)) graphs.hold(True) graphs.show()
for N in Ns: index = data['ticks'].index(N) adapt_avgs[(d, N)] = data['avg'][index] adapt_stds[(d, N)] = data['std'][index] os.chdir(cwd) for d in disturbs: for N in Ns: print('N={}: {} +- {}'.format(N, adapt_avgs[(d, N)], adapt_stds[(d, N)])) if __name__ == '__main__': graphs.output_file('ddmab_graph.html') y_ranges=[(0.075, 0.3), (0.0, 0.1), (0.08, 0.14)] for i, d in enumerate(disturbs): y_range = y_ranges[i] for N in Ns: graphs.perf_std_discrete([100*i*0.05 for i in range(21)], avgs[('kin20_150', d, N)], stds[('kin20_150', d, N)], std_width=0.25, alpha=0.5, y_range=y_range, plot_height=300, title='d={} t={}'.format(d, N)) graphs.hold(True) graphs.line([0, 100], adapt_avgs[(d, N)], adapt_stds[(d, N)]) graphs.hold(False) graphs.show()