def run_guessing_game(thread, n_cycles, context_size): """ runs a guessing game for a number of cycles """ count = 0 output = [] test_succes = [] while count < cfg.replicas: gl.agent2 = agent.Agent("agent2", "learner") if cfg.training_type == "c1": # rgb colour data if cfg.training_data == 0: training_data = auks.generate_training_data_colour(n_cycles, context_size) gl.agent1 = agent.Agent("agent1", "teacher", ["l", data.colour_data]) elif cfg.training_data == 2: training_data = auks.generate_training_data_colour_shape(n_cycles, context_size) gl.agent1 = agent.Agent("agent1", "teacher", data.la_colour_shape) elif cfg.training_data == 3: training_data = auks.generate_training_data_artificial(n_cycles, context_size) gl.agent1 = agent.Agent("agent1", "teacher", data.artificial_data2) if cfg.training_type == "c2": # hsv colour data training_data = auks.generate_training_data_colour(n_cycles, context_size, "hsv") gl.agent1 = agent.Agent("agent1", "teacher", ["l", data.colour_data_hsv]) gl.cycle = 0 guessing_success = [] test_success = [] while gl.cycle < n_cycles: guessing_game(gl.agent1, gl.agent2, training_data[gl.cycle], ran.randint(0, context_size-1)) if cfg.test_type == "direct": gl.agent2.test_succes.append(test_knowledge(gl.agent1, gl.agent2, cfg.n_tests)) elif cfg.test_type =="language game": gl.agent2.test_succes.append(gl.agent2.guessing_success) gl.cycle += 1 # visuals if cfg.show_in_panda: if thread.stop: break thread.emit(SIGNAL("update()")) time.sleep(0.02) if gl.cycle % 100 == 0: print gl.cycle print "replica " + str(count+1) + " gg_success=" + str(gl.agent2.guessing_success) test_succes.append(gl.agent2.test_succes) count += 1 # name = "learning=" + str(cfg.associative_word_learning) + "_context=" + str(cfg.context_size) + "_replicas=" + str(cfg.replicas)+ "_cycle=" + str(cfg.n_cycles) + "_dist=" + str(cfg.sample_minimum_distance) # inout.write_output("guessing_game_" + name, output) # inout.write_output("test_success_" + name, success_output) # inout.save_matrix(gl.agent2.agent_name, "cs_cs", gl.agent2.cs_cs.matrix) # inout.save_matrix(gl.agent2.agent_name, "cs_lex", gl.agent2.cs_lex) # inout.save_matrix(gl.agent2.agent_name, "lex_lex", gl.agent2.lex_lex.matrix) inout.save_knowledge(gl.agent2) inout.write_out("teaching=" + cfg.teaching_type + " test=" + cfg.test_type, [auks.calc_mean(test_succes)]) return [[auks.calc_mean(test_succes), "teaching=" + cfg.teaching_type + " test=" + cfg.test_type]]
def run_discrimination_game(n_cycles, context_size): """ runs a discrimination game for a number of cycles """ count = 0 output = [] # output2 = [] while count < cfg.replicas: agent1 = agent.Agent("agent", "learner") if cfg.training_data == 0: training_data = auks.generate_training_data_colour(n_cycles, context_size) elif cfg.training_data == 2: training_data = auks.generate_training_data_colour_shape(n_cycles, context_size) elif cfg.training_data == 3: training_data = auks.generate_training_data_artificial(n_cycles, context_size) gl.cycle = 0 discrimination_success = [] n_concepts = [] while gl.cycle < n_cycles: discrimination_game(agent1, training_data[cycle], ran.randint(0, context_size-1)) discrimination_success.append(agent1.discrimination_success) n_concepts.append(agent1.cs.count_successfull_concepts(0.2)) gl.cycle += 1 if gl.cycle % 100 == 0: print gl.cycle print "replica " + str(count+1) + " disc_success=" + str(agent1.discrimination_success) output.append(discrimination_success) # output2.append(n_concepts) count += 1 name = "_replicas=" + str(cfg.replicas)+ "_cycle=" + str(cfg.n_cycles) + "_dist=" + str(cfg.sample_minimum_distance) inout.write_output("discrimination_game_" + name, output)
def run_objects_test(agent1, agent2, n_test_objects): """ runs tests between two agents, returns percentage correct """ test_data = [] if cfg.training_type == "a1": test_data = auks.generate_training_data_simple(n_test_objects) elif cfg.training_type == "c": test_dat = auks.generate_training_data_colour(n_test_objects, 1) for i in test_dat: test_data.append(i[0]) # remove one list level for compatibility n_correct = 0 for i in test_data: if cfg.test_2nd_word and len(agent2.lex.words) > 1: a1_word = agent1.name_object(i) a2_answers = [] a2_answers.append(agent2.name_object(i)) a2_answers.append(agent2.name_object_2nd_best(i)) if a1_word in a2_answers: n_correct += 1 else: # a1_word = agent1.name_object_alt(i) # a2_word = agent2.name_object_alt(i) a1_word = agent1.name_object(i) a2_word = agent2.name_object(i) if a1_word == a2_word: n_correct += 1 else: if cfg.correct: agent2.learn_concept(a1_word, i) return n_correct/len(test_data)
def test_knowledge(agent1, agent2, n_tests): """ Test the knowledge of an agent. A random test sample is drawn, and both agents name it using the closest matching word label. If the word labels match the test is successful success rate is returned """ counter, success = 0, 0 while counter < n_tests: sample = auks.generate_training_data_colour(1, 1) a1_cs_tag = [agent1.cs.get_closest_concept_tag(sample[0][0])] a2_cs_tag = [agent2.cs.get_closest_concept_tag(sample[0][0])] a1_word = agent1.get_word2(a1_cs_tag) a2_word = agent2.get_word2(a2_cs_tag) if a1_word == a2_word: success += 1 counter +=1 return success/n_tests
def run_guessing_game_test(agent1, agent2, n_test_objects, context_size): """ runs guessing game tests between two agents, returns communicative success no learning """ if cfg.training_data == 0: test_data = auks.generate_training_data_colour(n_test_objects, context_size) elif cfg.training_data == 2: test_data = auks.generate_training_data_colour_shape(n_test_objects, context_size) elif cfg.training_data == 3: test_data = auks.generate_training_data_artificial(n_test_objects, context_size) guessing_game_result = 0 for i in test_data: topic_index = ran.randint(0, context_size-1) a1_cs_tags = agent1.cs.get_closest_concept_tags_domains(i[topic_index]) a1_words = agent1.get_word2(a1_cs_tags) a2_guessing_game_answer = agent2.answer_gg([a1_words], i) if a2_guessing_game_answer[0] == topic_index: guessing_game_result += 1 return guessing_game_result/n_test_objects
def run_direct_instruction(thread): """ runs direct instruction from teaching agent to learning agent for given number of cycles """ if cfg.teacher_type == "h": run_human_teaching(cfg.n_cycles) if cfg.teacher_type == "a": count = 0 agent2_similarity = [] agent2_test_succes = [] while count < cfg.replicas: training_data = [] if cfg.training_type == "a1": #generate_artificial_knowledge(n_dom, av_dimensions, avn_exemplars, av_words, av_associations, av_cluster_size = None) a_data = auks.generate_artificial_knowledge(1, 3, 10, 10, 3, 3) gl.agent1 = agent.Agent("agent1", "teacher", ('a', a_data)) training_data = auks.generate_training_data_simple(cfg.n_cycles) elif cfg.training_type == "a2": a_data = auks.create_knowledge_with_a_data(data.artificial_percepts, data.artificial_san_percepts, data.artificial_words, data.artificial_san_words, data.artificial_matrix) gl.agent1 = agent.Agent("agent1", "teacher", ('a', a_data)) training_data = auks.generate_training_data_simple(cfg.n_cycles) elif cfg.training_type == "c1": gl.agent1 = agent.Agent("agent1", "teacher", ('l', data.colour_data)) training_dat = auks.generate_training_data_colour(cfg.n_cycles, 1) for i in training_dat: training_data.append(i[0]) # remove one list level for compatibility elif cfg.training_type == "c2": gl.agent1 = agent.Agent("agent1", "teacher", ('l', data.colour_data_hsv)) training_dat = auks.generate_training_data_colour(cfg.n_cycles, 1) for i in training_dat: training_data.append(i[0]) # remove one list level for compatibility inout.save_knowledge(gl.agent1) gl.agent2 = agent.Agent("agent2", "learner") agent2_numbers = [["words","percepts"]] agent2_sim = [] for x, i in enumerate(training_data): a1_word = gl.agent1.name_object(i) gl.agent2.learn_concept(a1_word, i) #learning if cfg.associative_word_learning: a1_word_related = gl.agent1.name_object_related(i) #print a1_word, a1_word_related gl.agent2.learn_word_association(a1_word, a1_word_related) if cfg.associative_object_learning: pass # to be implemented agent2_numbers.append([gl.agent2.n_words, gl.agent2.n_percepts]) #testing if cfg.test_type == "direct": # direct test gl.agent2.test_succes.append(run_objects_test(gl.agent1, gl.agent2, cfg.n_tests)) elif cfg.test_type == "language game": # language game test gl.agent2.test_succes.append(run_guessing_game_test(gl.agent1, gl.agent2, cfg.n_tests, cfg.context_size)) agent2_sim.append(calc_similarity_agents(gl.agent1, gl.agent2)) # visuals if cfg.show_in_panda: if thread.stop: break thread.emit(SIGNAL("update()")) time.sleep(0.05) if x % 10 == 0: print x count += 1 agent2_similarity.append(agent2_sim) agent2_test_succes.append(gl.agent2.test_succes) print "replica " + str(count) # stats inout.save_knowledge(gl.agent2) inout.write_out("agent2_numbers", agent2_numbers) inout.write_out("teaching=" + cfg.teaching_type + " test=" + cfg.test_type + " 2nd word=" + str(cfg.test_2nd_word), [auks.calc_mean(agent2_test_succes)]) #inout.write_out_average("agent2_test_correct_clusters=" + str(cfg.SAN_clusters) + "_domain=" + str(cfg.training_type) + "_alearning=" + str(cfg.associative_word_learning), agent2_correct) #inout.write_out_average("agent2_similarity_clusters=" + str(cfg.SAN_clusters) + "_domain=" + str(cfg.training_type) + "_alearning=" + str(cfg.associative_word_learning), agent2_similarity) #go.output([auks.calc_mean(agent2_correct)], "test success", "# interactions", "% correct", "agent2_test_succes") #go.output([auks.calc_mean(agent2_comm_succes)], "communicative success", "# interactions", "% correct", "agent2_comm_succes") return [[auks.calc_mean(agent2_test_succes), gl.test_title], [auks.calc_mean(agent2_similarity), "similarity"]]