def run(host_name='local', pipeline=''): if pipeline != '': return exp_shared.load_exp(pipeline) # Create and run new experiment exp = Experiment(exp_name='random', fixed_params=[('env', 'wcs')], param_ranges=[('avg_over', range(25)), ('term_usage', range(3, 12))]) wcs = com_enviroments.make(exp.fixed_params['env']) exp_i = 0 for (params_i, params_v) in exp: print('Scheduled %d experiments out of %d' % (exp_i, len(list(exp)))) exp_i += 1 N = wcs.data_dim() map = np.array([ np.random.randint(params_v[exp.axes['term_usage']]) for n in range(N) ]) exp.set_result('language_map', params_i, map) return exp
def run(host_name='local', pipeline=''): if pipeline != '': return exp_shared.load_exp(pipeline) wcs = com_enviroments.make('wcs') # Create and run new experiment exp = Experiment(exp_name='human', fixed_params=[('env', 'wcs')], param_ranges=[ ('lang_id', list(wcs.human_mode_maps.keys())) ]) # range(1, 5))]) #range(1, 5))]) # exp_i = 0 for (params_i, params_v) in exp: print('Scheduled %d experiments out of %d' % (exp_i, len(list(exp)))) exp_i += 1 map = wcs.human_mode_maps[params_v[exp.axes['lang_id']]] exp.set_result('language_map', params_i, map) exp.set_result('term_usage', params_i, exp.run(evaluate.compute_term_usage, V=map).result()) exp.set_result('regier_cost', params_i, exp.run(evaluate.regier2, wcs, map=map).result()) #exp.set_result('regier_cost', params_i, exp.run(evaluate.communication_cost_regier, wcs, V=map, sum_over_whole_s=True).result()) exp.set_result('wellformedness', params_i, exp.run(evaluate.wellformedness, wcs, V=map).result()) exp.save() return exp
def visualize(exp): viz.plot_with_conf2(exp, 'regier_cost', 'term_usage', 'bw_boost') viz.plot_with_conf2(exp, 'wellformedness', 'term_usage', 'bw_boost') viz.plot_with_conf2(exp, 'term_usage', 'term_usage', 'bw_boost') e = com_enviroments.make('wcs') for t, maps in zip(exp.param_ranges['term_usage'], exp.reshape('language_map', as_function_of_axes=['term_usage'])): e.plot_with_colors(maps[0,:], save_to_path=exp.pipeline_path + 'ccc_map-' + str(t) + '_terms.png')
def main(): consensus_iters = 10 e = com_enviroments.make('wcs') k = 3 sims = [] # human maps human_maps = list(e.human_mode_maps.values()) # robo maps exp = Experiment.load('color_fix.1') robo_maps = exp.reshape('agent_language_map') human_rand = evaluate.mean_rand_index(human_maps) exp.log.info('mean rand for all human maps = {:.3f}'.format(human_rand)) robo_rand = evaluate.mean_rand_index(robo_maps) exp.log.info('mean rand for all agent maps = {:.3f}'.format(robo_rand)) cross_rand = evaluate.mean_rand_index(human_maps, robo_maps) exp.log.info( 'mean rand cross human and robot maps = {:.3f}'.format(cross_rand)) for k in range(3, 12): cielab_map = evaluate.compute_cielab_map(e, k, iter=consensus_iters, bw_boost=1) e.plot_with_colors(cielab_map, save_to_path=exp.pipeline_path + 'cielab_map_' + str(k) + '.png') human_consensus_map = Correlation_Clustering.compute_consensus_map( human_maps, k=k, iter=consensus_iters) e.plot_with_colors(human_consensus_map, save_to_path=exp.pipeline_path + 'human_consensus_language_map_' + str(k) + '.png') robo_consensus_map = Correlation_Clustering.compute_consensus_map( robo_maps, k=k, iter=consensus_iters) e.plot_with_colors(robo_consensus_map, save_to_path=exp.pipeline_path + 'consensus_language_map_' + str(k) + '.png') # compare human and robo maps rand_i = adjusted_rand_score(human_consensus_map, robo_consensus_map) print('rand i between human consensus and agent consensus = {:.3f}'. format(rand_i)) sims += [rand_i] sims = np.array(sims) print(sims.mean())
def analyse(exp): wcs = com_enviroments.make(exp.fixed_params['env']) i = 1 for (params_i, params_v) in exp: consensus = exp.get_result('language_map', params_i) exp.set_result('regier_cost', params_i, exp.run(evaluate.regier2, wcs, map=consensus).result()) exp.set_result('wellformedness', params_i, exp.run(evaluate.wellformedness, wcs, V=consensus).result()) exp.set_result('term_usage', params_i, exp.run(evaluate.compute_term_usage, V=consensus).result()) print('Scheduled analysis of %d experiments out of %d' % (i, len(list(exp)))) i += 1
def print_tables(exp): term_usage_to_analyse = list(range(3, 12)) iter = 10 agent_maps = exp.reshape('agent_language_map') agent_term_usage = exp.reshape('term_usage') maps_vs_noise = exp.reshape('agent_language_map', as_function_of_axes=['perception_noise']) term_usage_vs_noise = exp.reshape('term_usage', as_function_of_axes=['perception_noise']) e = com_enviroments.make('wcs') human_maps = np.array(list(e.human_mode_maps.values())) human_term_usage = np.array([np.unique(m).shape[0] for m in human_maps]) agent_mean_rand_vs_term_usage = [] agent_mean_rand_over_noise_groups_vs_term_usage = [] human_mean_rand_vs_term_usage = [] cross_rand_vs_term_usage = [] cross_agent_consensus_to_humans_vs_term_usage = [] human_to_cielab_rand = [] human_to_random_rand = [] for t in term_usage_to_analyse: agent_mean_rand_vs_term_usage += [evaluate.mean_rand_index(agent_maps[agent_term_usage == t])] a = np.array([evaluate.mean_rand_index(maps_vs_noise[noise_i][term_usage_vs_noise[noise_i] == t]) for noise_i in range(len(maps_vs_noise))]) agent_mean_rand_over_noise_groups_vs_term_usage += [a[~np.isnan(a)].mean()] human_mean_rand_vs_term_usage += [evaluate.mean_rand_index(human_maps[human_term_usage == t])] cross_rand_vs_term_usage += [evaluate.mean_rand_index(human_maps[human_term_usage == t], agent_maps[agent_term_usage == t])] if len(agent_maps[agent_term_usage == t]) >= 1: agent_consensus_map = Correlation_Clustering.compute_consensus_map(agent_maps[agent_term_usage == t], k=t, iter=iter) cross_agent_consensus_to_humans_vs_term_usage += [ evaluate.mean_rand_index(human_maps[human_term_usage == t], [agent_consensus_map])] e.plot_with_colors(agent_consensus_map, save_to_path=exp.pipeline_path + 'agent_consensus_map-' + str(t) + '_terms.png') else: cross_agent_consensus_to_humans_vs_term_usage += [np.nan] human_to_cielab_rand += [evaluate.mean_rand_index(human_maps[human_term_usage == t], [evaluate.compute_cielab_map(e, k=t, iterations=10)])] human_to_random_rand += [evaluate.mean_rand_index(human_maps[human_term_usage == t], [[np.random.randint(t) for n in range(330)] for n in range(100)])]
def run(): exp = Experiment(exp_name='local_experiment', fixed_params=[('env', 'wgs'), ('max_epochs', 10000), #10000 ('hidden_dim', 20), ('batch_size', 100), ('perception_dim', 3), ('target_dim', 330), ('print_interval', 1000)], param_ranges=[('avg_over', range(2)), # 50 ('perception_noise', [0, 25]), # [0, 25, 50, 100], ('msg_dim', range(9, 11)), #3, 12 ('com_noise', np.linspace(start=0, stop=0.5, num=2))]) env = com_enviroments.make(exp.fixed_params['env']) exp_i = 0 for (params_i, params_v) in exp: print('Scheduled %d experiments out of %d' % (exp_i, len(list(exp)))) exp_i += 1 agent_a = agent_b = agents.SoftmaxAgent(msg_dim=params_v[exp.axes['msg_dim']], hidden_dim=exp.fixed_params['hidden_dim'], color_dim=exp.fixed_params['target_dim'], perception_dim=exp.fixed_params['perception_dim']) game = com_game.NoisyChannelGame(com_noise=params_v[exp.axes['com_noise']], msg_dim=params_v[exp.axes['msg_dim']], max_epochs=exp.fixed_params['max_epochs'], perception_noise=params_v[exp.axes['perception_noise']], batch_size=exp.fixed_params['batch_size'], print_interval=exp.fixed_params['print_interval']) game_outcome = game.play(env, agent_a, agent_b) V = evaluate.agent_language_map(env, a=game_outcome) exp.set_result('gibson_cost', params_i, game.compute_gibson_cost(env, a=game_outcome)[1]) exp.set_result('regier_cost', params_i, evaluate.communication_cost_regier(env, V=V)) exp.set_result('wellformedness', params_i, evaluate.wellformedness(env, V=V)) exp.set_result('term_usage', params_i, evaluate.compute_term_usage(V=V)) print("\nAll tasks queued to clusters") # wait for all tasks to complete exp.save() return exp.pipeline_name
def run(host_name='local', pipeline=''): if pipeline != '': return exp_shared.load_exp(pipeline) # Create and run new experiment queue = exp_shared.create_queue(host_name) queue.sync('.', '.', exclude=['pipelines/*', 'fig/*', 'old/*', 'cogsci/*'], sync_to=sge.SyncTo.REMOTE, recursive=True) exp = Experiment(exp_name='ccc', fixed_params=[('iterations', 10), ('env', 'wcs')], param_ranges=[('avg_over', range(5)), ('bw_boost', [1]), ('term_usage', range(3, 12))], # np.linspace(start=0, stop=1, num=1) queue=queue) queue.sync(exp.pipeline_path, exp.pipeline_path, sync_to=sge.SyncTo.REMOTE, recursive=True) wcs = com_enviroments.make(exp.fixed_params['env']) exp_i = 0 for (params_i, params_v) in exp: print('Scheduled %d experiments out of %d' % (exp_i, len(list(exp)))) exp_i += 1 N = wcs.data_dim() corr_graph = np.zeros((N, N)) for i in range(0, N): for j in range(0, i): corr_graph[i, j] = (wcs.sim_index(i, j, bw_boost=params_v[exp.axes['bw_boost']]).numpy() - 0.5) * 100 corr_graph[j, i] = (wcs.sim_index(i, j, bw_boost=params_v[exp.axes['bw_boost']]).numpy() - 0.5) * 100 consensus = exp.run(Correlation_Clustering.max_correlation, corr_graph, params_v[exp.axes['term_usage']], exp.fixed_params['iterations']).result() #print(params_v) #print('set {} actual {}'.format(params_v[exp.axes['term_usage']], exp.run(evaluate.compute_term_usage, V=consensus).result().get())) exp.set_result('language_map', params_i, consensus) return exp
def visualize(exp): regier_cost = exp.reshape('regier_cost', as_function_of_axes=['lang_id']) term_usage = exp.reshape('term_usage', as_function_of_axes=['lang_id']) plt.figure() plt.scatter(term_usage, regier_cost) plt.show() fig_name = exp.pipeline_path + '/fig_regier_wcs_scatter.png' plt.savefig(fig_name) #viz.plot_with_conf2(exp, 'regier_cost', 'term_usage', 'lang_id') #viz.plot_with_conf2(exp, 'wellformedness', 'term_usage', 'lang_id') maps = exp.reshape('language_map') term_usage = exp.reshape('term_usage') iterations = 10 e = com_enviroments.make('wcs') for t in np.unique(term_usage): if len(maps[term_usage == t]) >= 1: consensus_map = Correlation_Clustering.compute_consensus_map( maps[term_usage == t], k=t, iter=iterations) e.plot_with_colors(consensus_map, save_to_path=exp.pipeline_path + 'human_consensus_map-' + str(t) + '_terms.png')