Ejemplo n.º 1
0
 def two_agents_LG(self):
     """ language game with two agents (typically teacher and learner)
     """
     
     all_results = []
     
     for i in range(pm.n_replicas):
     
         agent1 = agent.Agent("agent1", learning=False)
         agent1.load_knowledge() # as agent1 is the teacher, it needs some predefined knowledge
         
         agent2= agent.Agent("agent2")
         
         for x, j in enumerate(self.td):
             self.guessing_game(agent1, agent2, j, random.randint(0, pm.context_size-1))
             if x == 500:
                 pass
         
         print "end replica " + str(i)
         all_results.append(agent2.gg_running_av)
         
     av_results = hp.calc_average(all_results)
     output.plot_DG(av_results)
     
     agent2.matrix.to_csv("output.csv")
     import csv
     w = csv.writer(open("output2.csv", "w"))
     for key, val in agent2.cs.percepts.items():
         w.writerow([key, val])
Ejemplo n.º 2
0
 def run_discrimination_game(self):
     """ run a series of discrimination games
     """
     print "starting DG"
     
     all_results, all_n_percepts = [], []
     
     for i in range(pm.n_replicas):
         
         agent1 = agent.Agent("agent1")
         for j in self.td:
             self.discrimination_game(agent1, j, random.randint(0, pm.context_size-1))
             agent1.dg_n_percepts.append(len(agent1.cs.percepts))
             
         print "percepts: ", len(agent1.cs.percepts)
         print "success at end of replica " + str(i) + ": " + str(agent1.dg_running_av[-1])
         
         all_results.append(agent1.dg_running_av)
         all_n_percepts.append(agent1.dg_n_percepts)
     
     av_results = hp.calc_average(all_results)
     av_percepts = hp.calc_average(all_n_percepts)
     output.plot_DG(av_results, av_percepts)