Beispiel #1
0
def run_discrimination_game(n_cycles, context_size):
    """ runs a discrimination game for a number of cycles
    """
    count = 0
    output = []
#    output2 = []
    while count < cfg.replicas:
        agent1 = agent.Agent("agent", "learner")
        if cfg.training_data == 0:
            training_data = auks.generate_training_data_colour(n_cycles, context_size)
        elif cfg.training_data == 2:
            training_data = auks.generate_training_data_colour_shape(n_cycles, context_size)
        elif cfg.training_data == 3:
            training_data = auks.generate_training_data_artificial(n_cycles, context_size)
        gl.cycle = 0
        discrimination_success = []
        n_concepts = []
        while gl.cycle < n_cycles:
            discrimination_game(agent1, training_data[cycle], ran.randint(0, context_size-1))
            discrimination_success.append(agent1.discrimination_success)
            n_concepts.append(agent1.cs.count_successfull_concepts(0.2))
            gl.cycle += 1
            if gl.cycle % 100 == 0:
                print gl.cycle
        print "replica " + str(count+1) + " disc_success=" + str(agent1.discrimination_success)
        output.append(discrimination_success)
#        output2.append(n_concepts)
        count += 1
    name = "_replicas=" + str(cfg.replicas)+ "_cycle=" + str(cfg.n_cycles) + "_dist=" + str(cfg.sample_minimum_distance)
    inout.write_output("discrimination_game_" + name, output)
Beispiel #2
0
def run_guessing_game(thread, n_cycles, context_size):
    """ runs a guessing game for a number of cycles
    """
    count = 0
    output = []
    test_succes = []
    while count < cfg.replicas:
        gl.agent2 = agent.Agent("agent2", "learner")
        if cfg.training_type == "c1":   # rgb colour data
            if cfg.training_data == 0:
                training_data = auks.generate_training_data_colour(n_cycles, context_size)
                gl.agent1 = agent.Agent("agent1", "teacher", ["l", data.colour_data])
            elif cfg.training_data == 2:
                training_data = auks.generate_training_data_colour_shape(n_cycles, context_size)
                gl.agent1 = agent.Agent("agent1", "teacher", data.la_colour_shape)
            elif cfg.training_data == 3:
                training_data = auks.generate_training_data_artificial(n_cycles, context_size)
                gl.agent1 = agent.Agent("agent1", "teacher", data.artificial_data2)
        if cfg.training_type == "c2":   # hsv colour data
                training_data = auks.generate_training_data_colour(n_cycles, context_size, "hsv")
                gl.agent1 = agent.Agent("agent1", "teacher", ["l", data.colour_data_hsv])
        gl.cycle = 0
        guessing_success = []
        test_success = []
        while gl.cycle < n_cycles:
            guessing_game(gl.agent1, gl.agent2, training_data[gl.cycle], ran.randint(0, context_size-1))
            if cfg.test_type == "direct":
                gl.agent2.test_succes.append(test_knowledge(gl.agent1, gl.agent2, cfg.n_tests))
            elif cfg.test_type =="language game":
                gl.agent2.test_succes.append(gl.agent2.guessing_success)
            gl.cycle += 1
            
            # visuals
            if cfg.show_in_panda:
                if thread.stop:
                    break
                thread.emit(SIGNAL("update()"))
                time.sleep(0.02)
                                   
            if gl.cycle % 100 == 0:
                print gl.cycle
        print "replica " + str(count+1) + " gg_success=" + str(gl.agent2.guessing_success)
        test_succes.append(gl.agent2.test_succes)
        count += 1
#    name = "learning=" + str(cfg.associative_word_learning) + "_context=" + str(cfg.context_size) + "_replicas=" + str(cfg.replicas)+ "_cycle=" + str(cfg.n_cycles) + "_dist=" + str(cfg.sample_minimum_distance)
#    inout.write_output("guessing_game_" + name, output)
#    inout.write_output("test_success_" + name, success_output)
#    inout.save_matrix(gl.agent2.agent_name, "cs_cs", gl.agent2.cs_cs.matrix)
#    inout.save_matrix(gl.agent2.agent_name, "cs_lex", gl.agent2.cs_lex)
#    inout.save_matrix(gl.agent2.agent_name, "lex_lex", gl.agent2.lex_lex.matrix)
    inout.save_knowledge(gl.agent2)
    inout.write_out("teaching=" + cfg.teaching_type + " test=" + cfg.test_type, [auks.calc_mean(test_succes)])
    return [[auks.calc_mean(test_succes), "teaching=" + cfg.teaching_type + " test=" + cfg.test_type]]
Beispiel #3
0
def run_guessing_game_test(agent1, agent2, n_test_objects, context_size):
    """ runs guessing game tests between two agents, returns communicative success
        no learning
    """
    if cfg.training_data == 0:
        test_data = auks.generate_training_data_colour(n_test_objects, context_size)
    elif cfg.training_data == 2:
        test_data = auks.generate_training_data_colour_shape(n_test_objects, context_size)
    elif cfg.training_data == 3:
        test_data = auks.generate_training_data_artificial(n_test_objects, context_size)
    guessing_game_result = 0
    for i in test_data:
        topic_index = ran.randint(0, context_size-1)
        a1_cs_tags = agent1.cs.get_closest_concept_tags_domains(i[topic_index])
        a1_words = agent1.get_word2(a1_cs_tags)
        a2_guessing_game_answer = agent2.answer_gg([a1_words], i)
        
        if a2_guessing_game_answer[0] == topic_index:
            guessing_game_result += 1
    return guessing_game_result/n_test_objects