Example #1
0
    def from_log(cls,
                 config,
                 log_dir,
                 from_log_dir,
                 from_log_trial,
                 n_logs=1,
                 forward_im=False):

        log = ExperimentLog()

        keys = ['agentM', 'agentS', 'babbling_module']
        for mid in config.modules.keys():
            keys.append('im_update_' + mid)

        for key in keys:
            for n in range(n_logs):
                filename = from_log_dir + 'log{}-'.format(
                    from_log_trial) + key + '-{}.pickle'.format(n)
                with open(filename, 'r') as f:
                    log_key_n = cPickle.load(f)
                log._logs[key] = log._logs[key] + log_key_n

        experiment = cls(config=config, log=log, log_dir=log_dir)
        experiment.ag.fast_forward(log, forward_im=forward_im)
        experiment.log.purge()

        return experiment
Example #2
0
 def reset(self):
     self.ag = self.config.supervisor_cls(self.config, self.env,
                                          **self.config.supervisor_config)
     self.log = ExperimentLog(self.ag.conf, self.ag.expl_dims,
                              self.ag.inf_dims)
     self.log.log_dir = self.log_dir
     self.evaluate_at(self.config.eval_at, self.testcases)
     self.ag.subscribe('agentM', self)
     self.ag.subscribe('agentS', self)
     self.ag.subscribe('babbling_module', self)
     self.ag.subscribe_topics_mod(
         ['interest', 'competence', 'chidren_choice'], self)
Example #3
0
 def reset(self):
     self.ag = self.config.supervisor_cls(self.config, self.env,
                                          **self.config.supervisor_config)
     self.log = ExperimentLog(self.ag.conf, self.ag.expl_dims,
                              self.ag.inf_dims)
     self.log.log_dir = self.log_dir
     self.evaluate_at(self.config.eval_at, self.testcases)
     self.ag.subscribe('agentM', self)
     self.ag.subscribe('agentS', self)
     self.ag.subscribe('interests', self)
     self.ag.subscribe('babbling_module', self)
     self.ag.subscribe('module_to_credit', self)
    def reset(self):
        self.ag = self.config.supervisor_cls(self.config, self.env, **self.config.supervisor_config)
        self.log = ExperimentLog(self.ag.conf, self.ag.expl_dims, self.ag.inf_dims)
        self.log.log_dir = self.log_dir
        self.evaluate_at(self.config.eval_at, self.testcases)
#         self.ag.subscribe('agentM', self)
#         self.ag.subscribe('agentS', self)
        self.ag.subscribe('babbling_module', self)
        self.ag.subscribe_topics_mod(['interest', 'competence', 'chidren_choice', 'im_update'], self)
 def reset(self):
     self.ag = self.config.supervisor_cls(self.config, self.env, **self.config.supervisor_config)
     self.log = ExperimentLog(self.ag.conf, self.ag.expl_dims, self.ag.inf_dims)
     self.log.log_dir = self.log_dir
     self.evaluate_at(self.config.eval_at, self.testcases)
     self.ag.subscribe('agentM', self)
     self.ag.subscribe('agentS', self)
     self.ag.subscribe('interests', self)
     self.ag.subscribe('babbling_module', self)
     self.ag.subscribe('module_to_credit', self)
    def from_log(cls, config, log_dir, from_log_dir, from_log_trial, n_logs=1, forward_im=False):

        log = ExperimentLog()
        
        keys=['motor', 'sensori', 'babbling_module']
        for mid in config.modules.keys():
            keys.append('im_update_' + mid)
        
        for key in keys:
            for n in range(n_logs):
                filename = from_log_dir + 'log{}-'.format(from_log_trial) + key + '-{}.pickle'.format(n)
                with open(filename, 'r') as f:
                    log_key_n = cPickle.load(f)
                log._logs[key] = log._logs[key] + log_key_n
                 
        experiment = cls(config=config, log=log, log_dir=log_dir)
        experiment.ag.fast_forward(log, forward_im=forward_im)
        experiment.log.purge()
        
        return experiment
class ToolsExperiment(Experiment):
    def __init__(self, config, context_mode, log = None, log_dir = None, n_trials = 1):
        
        self.config = config
        
        if hasattr(config, 'env_cls') and hasattr(config, 'env_cfg'):
            self.env = config.env_cls(**config.env_cfg)
        else:
            raise NotImplementedError
            #self.env = VrepDivaEnvironment(self.config.environment, self.config.vrep, self.config.diva)
            
        #self.ag = DmpAgent(self.config, self.env)
        self.ag = self.config.supervisor_cls(self.config, self.env, **self.config.supervisor_config)
        
        Experiment.__init__(self, self.env, self.ag, context_mode)
        
            
        if log is None:
            if log_dir is None:
                self.log_dir = (os.path.join(os.path.dirname(os.path.abspath(__file__)), 
                                             '../../logs/') 
                                + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") 
                                + '-' 
                                + config.tag)
                
            else:
                self.log_dir = log_dir + config.tag
            try: # muliprocess collisions
                if not os.path.exists(self.log_dir):
                    os.mkdir(self.log_dir)
            except OSError:
                pass
            config.log_dir = self.log_dir
        else:
            assert log_dir is not None
            self.log_dir = log_dir
            self.log = log 
            
#         self.ag.subscribe('agentM', self)
#         self.ag.subscribe('agentS', self)
        self.ag.subscribe('interests', self)
        self.ag.subscribe('babbling_module', self)
        self.ag.subscribe_topics_mids(['interest', 'competence', 'chidren_choice', 'im_update'], self)
        self.ag.subscribe_topics_mods(['im_update'], self)
            
        self.n_trials = n_trials
        self.trial = 0
        
        
        
    def reset(self):
        self.ag = self.config.supervisor_cls(self.config, self.env, **self.config.supervisor_config)
        self.log = ExperimentLog(self.ag.conf, self.ag.expl_dims, self.ag.inf_dims)
        self.log.log_dir = self.log_dir
        self.evaluate_at(self.config.eval_at, self.testcases)
#         self.ag.subscribe('agentM', self)
#         self.ag.subscribe('agentS', self)
        self.ag.subscribe('babbling_module', self)
        self.ag.subscribe_topics_mod(['interest', 'competence', 'chidren_choice', 'im_update'], self)
        
    @classmethod
    def from_log(cls, config, log_dir, from_log_dir, from_log_trial, n_logs=1, forward_im=False):

        log = ExperimentLog()
        
        keys=['motor', 'sensori', 'babbling_module']
        for mid in config.modules.keys():
            keys.append('im_update_' + mid)
        
        for key in keys:
            for n in range(n_logs):
                filename = from_log_dir + 'log{}-'.format(from_log_trial) + key + '-{}.pickle'.format(n)
                with open(filename, 'r') as f:
                    log_key_n = cPickle.load(f)
                log._logs[key] = log._logs[key] + log_key_n
                 
        experiment = cls(config=config, log=log, log_dir=log_dir)
        experiment.ag.fast_forward(log, forward_im=forward_im)
        experiment.log.purge()
        
        return experiment
        
    
    def motor_babbling(self, n, range_div = 1.):
        #print 'Motor babbling : ', n, "points..."

        for i in range(n):
            if self.context_mode.has_key('reset_iterations') and np.mod(i, self.context_mode['reset_iterations']) == 0:
                self.env.reset()
            m = self.ag.motor_babbling()
            m_mov = self.ag.motor_primitive(m)
            s_mov = self.env.update(m_mov, reset=False, log=False)
            s = self.ag.sensory_primitive(s_mov)
            #print 'Babbling iteration', i, ': m =', m, 's =', s
            ms = np.hstack((m,s))
            self.ag.update_sensorimotor_models(ms)

        self._update_logs()

    def rest_trial(self):
        m = self.ag.rest_params()
        m_mov = self.ag.motor_primitive(m)
        s_mov = self.env.update(m_mov, log=False)
        s = self.ag.sensory_primitive(s_mov)
        self.ag.update_sensorimotor_models(m, s)
        self._update_logs()
        print "Rest trial", "m:", m, "m_mov:", m_mov, "s:", s
        

    def start(self):
        for i in range(1,self.n_trials+1):
            self.trial = i
            self.start_trial()
            if i < self.n_trials:
                self.reset()

    def start_trial(self):

        print '[' + self.config.tag + '] ' + 'Starting trial', self.trial 

        #self.ag.subscribe('movement', self)
        # xp.evaluate_at(eval_at, tc)


        self.log.bootstrap_conf = {'n': self.config.bootstrap, 
                                   'bootstap_range_div': self.config.bootstrap_range_div}
        if self.config.init_rest_trial:
            self.rest_trial()
        if self.config.bootstrap > 0:
            self.motor_babbling(self.config.bootstrap, self.config.bootstrap_range_div)
        
        #print "Running", self.config.iter, "iterations..."
        log_each = self.config.log_each
        
        for i in range((self.config.iter) / log_each):
            t_start = time.time()
            self.run(log_each)
            print '[' + self.config.tag + '] ' + 'Run up to ' + str((i + 1) * log_each)
            print "Time for", log_each, "iterations :", time.time() - t_start
            self.save_logs()
            

    def save_logs(self):
        #print 'Log directory : ', self.log_dir
        #self.log.config = copy.copy(self.config)
        #self.log.config.env_config = None
        
        for key in self.log._logs.keys():
            filename = self.log_dir + '/log{}-'.format(self.trial) + key + '-{}.pickle'.format(self.log.n_purge)
            with open(filename, 'wb') as f:
                cPickle.dump(self.log._logs[key], f)
            f.close()
        self.log.purge()
Example #8
0
def main(explo_config_name, trial):
    def mean_std(d):
        v = np.zeros((n / p, len(d)))
        for i, l in zip(range(len(d)), d.values()):
            for j, lj in zip(range(len(l)), l):
                v[j, i] = lj
        mean = np.mean(v, axis=1)
        std = np.std(v, axis=1) / np.sqrt(len(d))
        return mean, std

    comp = {}
    logs = {}

    keys = ["agentM", "agentS", "babbling_module"]

    def eval_comp(config_name, trial, i, log_i):
        global xp, testcases
        config = configs[config_name]
        for key in log_i._logs.keys():
            print key, len(log_i._logs[key])
        if i == 0:
            config.gui = gui
            config.env_cfg['gui'] = gui
            xp = ToolsExperiment(config, log_dir=log_dir + config_name + '/')
        else:
            xp.ag.fast_forward(log_i)
        xp.ag.eval_mode()

        evaluation = Evaluation(xp.log,
                                xp.ag,
                                xp.env,
                                testcases,
                                modes=["inverse"])
        result = evaluation.evaluate()
        return result

    print "explo_config_name", explo_config_name

    for s_space in testcases.keys():
        comp[s_space] = {}

    for s_space in testcases.keys():
        comp[s_space][explo_config_name] = {}

    logs[explo_config_name] = {}

    print "trial", trial

    logs[explo_config_name][trial] = {}
    log = ExperimentLog(None, None, None)
    for key in keys:
        for i in range(n_logs):
            filename = log_dir + explo_config_name + '/log{}-'.format(
                trial) + key + '-{}.pickle'.format(i)
            with open(filename, 'r') as f:
                log_key = cPickle.load(f)
            log._logs[key] = log._logs[key] + log_key
        print key, len(log._logs[key])

    for s_space in testcases.keys():
        comp[s_space][explo_config_name][trial] = {}

    for regression_config_name in config_list["xp2"]:
        print "regression_config_name", regression_config_name

        for s_space in testcases.keys():
            comp[s_space][explo_config_name][trial][
                regression_config_name] = []

        for i in range(n_checkpoints + 1):
            print "checkpoint", i

            log_i = ExperimentLog(None, None, None)
            for key in ["agentM", "agentS"]:
                if i > 0:
                    log_i._logs[key] = log._logs[key][(i - 1) * n /
                                                      n_checkpoints:(i) * n /
                                                      n_checkpoints]
                else:
                    log_i._logs[key] = []
                print regression_config_name, trial, key, i, n, n_checkpoints, [
                    i * n / n_checkpoints, (i + 1) * n / n_checkpoints
                ], len(log_i._logs[key])

            errors = eval_comp(regression_config_name, trial, i, log_i)[0]
            for s_space in testcases.keys():
                comp[s_space][explo_config_name][trial][
                    regression_config_name] += [errors[s_space]]
        logs[explo_config_name][trial][regression_config_name] = xp.log._logs

        if True:
            fig, ax = plt.subplots()
            fig.canvas.set_window_title('Competence')
            for s_space in testcases.keys():
                print x, np.median(comp[s_space][explo_config_name][trial]
                                   [regression_config_name],
                                   axis=1)
                ax.plot(x,
                        np.median(comp[s_space][explo_config_name][trial]
                                  [regression_config_name],
                                  axis=1),
                        label=s_space)
            handles, labels = ax.get_legend_handles_labels()
            ax.legend(handles, labels)

            plt.savefig(
                log_dir + "img/" + explo_config_name +
                '-log-{}-{}-comp.png'.format(regression_config_name, trial))
            plt.close(fig)

    with open(
            log_dir + explo_config_name +
            '/analysis_comp_eval-{}.pickle'.format(trial), 'wb') as f:
        cPickle.dump(comp, f)

    with open(
            log_dir + explo_config_name +
            '/analysis_comp_logs-{}.pickle'.format(trial), 'wb') as f:
        cPickle.dump(logs, f)
def main(log_dir, config_name, trial):
    
    config = configs[config_name]
    
    #config.env_cfg["env_conf"]["gui"]= True
    
    
    log = ExperimentLog(None, None, None)
    for key in ["motor", "sensori"]:
        try:
            filename = log_dir + config_name + '/log{}-'.format(trial) + key + '-{}.pickle'.format(0)
            with open(filename, 'r') as f:
                log_key = cPickle.load(f)
            log._logs[key] = log_key
        except:
            print "File not Found:", filename
        
        
        
    iterations = [1000, 5000, 10000, 20000, 50000]
    
    results_niter_2 = {}
    results_niter_3 = {}
    results_strategies_2 = {}
    results_strategies_3 = {}
    
    for iteration in iterations:
        print iteration
        
        xp = ToolsExperiment(config, context_mode=config.context_mode)
        
        log_i = ExperimentLog(None, None, None)
        log_i._logs["motor"] = log._logs["motor"][:iteration]
        log_i._logs["sensori"] = log._logs["sensori"][:iteration]
        
        xp.ag.fast_forward(log_i, forward_im=False)
        
        s_space = "s_o"
        
        
        
        
        
        
        
        
        problems_2 = dict(
                      A=[-0.2, 0.7],
                      B=[0., 0.7],
                      C=[0.2, 0.7],
                      )
        
        results_niter_2_i = dict(
                      A=-1,
                      B=-1,
                      C=-1,
                      )
        
        results_strategies_2_i = dict(
                      A=[],
                      B=[],
                      C=[],
                      )
        
        
        problems_3 = dict(
                      A=[-0.1, 1.2],
                      B=[0., 1.25],
                      C=[0.1, 1.2],
                      )
        
        results_niter_3_i = dict(
                      A=-1,
                      B=-1,
                      C=-1,
                      )
        
        results_strategies_3_i = dict(
                      A=[],
                      B=[],
                      C=[],
                      )
        
        n_iter_max = 200
        
        
        # Reachable contexts, learning
    #     while True:
    #         print
    #         context =  [0, 1.2]
    #         #context = rand_bounds(np.array([[-0.5, -0.5], [0.5, 0.5]]))[0]
    #         sg = [0] + list(-np.array(context))
    #         xp.env.env.env.top_env.pos = context
    #         print "context", xp.env.get_current_context()
    #         #print "ds goal", sg
    #         m = xp.ag.inverse(s_space, sg, context=xp.env.get_current_context(), babbling=True, explore=None)[0]
    #         sr = xp.env.update(m, reset=False)
    #         xp.ag.perceive([sr], context=context)
         
         
#         print "----- Phase 2"
#         # Hreachable contexts, learning
#         for p2 in sorted(problems_2.keys()):
#             context = problems_2[p2]
#             sg = [0] + list(-np.array(context))
#             xp.env.env.env.top_env.pos = context
#             print "\n-------------- new context", xp.env.get_current_context()
#             #print "ds goal", sg
#             for i in range(n_iter_max):
#                 context = xp.env.get_current_context()
#                 m = xp.ag.inverse(s_space, sg, context=context, babbling=True, explore=None)[0]
#                 print "m", m
#                 sr = xp.env.update(m, reset=False)
#                 print "s", sr
#                 xp.ag.perceive([sr], context=context)
#                 results_strategies_2_i[p2].append(strategy_used(sr))
#                 if abs(sr[-1]) > 0.0001:
#                     results_niter_2_i[p2] = i
#                     break
            
            
            
        print "----- Phase 3"
        # UnHreachable contexts, learning
        for p3 in sorted(problems_3.keys()):
            context = problems_3[p3]
            sg = [0] + list(-np.array(context))
            xp.env.env.env.top_env.pos = context
            print "\n-------------- new context", xp.env.get_current_context()
            #print "ds goal", sg
            for i in range(n_iter_max):
                context = xp.env.get_current_context()
                m = xp.ag.inverse(s_space, sg, context=context, babbling=True, explore=None)[0]
                #print "m", m
                sr = xp.env.update(m, reset=False)
                #print "s", sr
                xp.ag.perceive([sr], context=context)
                results_strategies_3_i[p3].append(strategy_used(sr))
                if abs(sr[-1]) > 0.0001:
                    results_niter_3_i[p3] = i
                    break
            
        #results_niter_2[iteration] = results_niter_2_i
        results_niter_3[iteration] = results_niter_3_i
        #results_strategies_2[iteration] = results_strategies_2_i
        results_strategies_3[iteration] = results_strategies_3_i
        
    
    with open(log_dir + config_name + '/results-{}.pickle'.format(trial), 'wb') as f:
        cPickle.dump(dict(
                          #results_niter_2=results_niter_2,
                          results_niter_3=results_niter_3,
                          #results_strategies_2=results_strategies_2,
                          results_strategies_3=results_strategies_3
                          ), f)
     
     
    #print "results_niter_2", results_niter_2
    print "results_niter_3", results_niter_3
    #print "results_strategies_2", results_strategies_2
    print "results_strategies_3", results_strategies_3
Example #10
0
class ToolsExperiment(Experiment):
    def __init__(self, config, log=None, log_dir=None, n_trials=1):

        self.config = config

        if hasattr(config, 'env_cls') and hasattr(config, 'env_cfg'):
            self.env = config.env_cls(**config.env_cfg)
        else:
            raise NotImplementedError
            #self.env = VrepDivaEnvironment(self.config.environment, self.config.vrep, self.config.diva)

        #self.ag = DmpAgent(self.config, self.env)
        self.ag = self.config.supervisor_cls(self.config, self.env,
                                             **self.config.supervisor_config)

        Experiment.__init__(self, self.env, self.ag)

        if log is None:
            if log_dir is None:
                self.log_dir = (
                    os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 '../../logs/') +
                    datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") +
                    '-' + config.tag)

            else:
                self.log_dir = log_dir + config.tag
            try:  # muliprocess collisions
                if not os.path.exists(self.log_dir):
                    os.mkdir(self.log_dir)
            except OSError:
                pass
            config.log_dir = self.log_dir
        else:
            assert log_dir is not None
            self.log_dir = log_dir
            self.log = log

        self.ag.subscribe('agentM', self)
        self.ag.subscribe('agentS', self)
        self.ag.subscribe('babbling_module', self)
        self.ag.subscribe_topics_mod(
            ['interest', 'competence', 'chidren_choice'], self)

        self.n_trials = n_trials
        self.trial = 0

    def reset(self):
        self.ag = self.config.supervisor_cls(self.config, self.env,
                                             **self.config.supervisor_config)
        self.log = ExperimentLog(self.ag.conf, self.ag.expl_dims,
                                 self.ag.inf_dims)
        self.log.log_dir = self.log_dir
        self.evaluate_at(self.config.eval_at, self.testcases)
        self.ag.subscribe('agentM', self)
        self.ag.subscribe('agentS', self)
        self.ag.subscribe('babbling_module', self)
        self.ag.subscribe_topics_mod(
            ['interest', 'competence', 'chidren_choice'], self)

    @classmethod
    def from_log(cls,
                 config,
                 log_dir,
                 from_log_dir,
                 from_log_trial,
                 n_logs=1,
                 forward_im=False):

        log = ExperimentLog()

        keys = ['agentM', 'agentS', 'babbling_module']
        for mid in config.modules.keys():
            keys.append('im_update_' + mid)

        for key in keys:
            for n in range(n_logs):
                filename = from_log_dir + 'log{}-'.format(
                    from_log_trial) + key + '-{}.pickle'.format(n)
                with open(filename, 'r') as f:
                    log_key_n = cPickle.load(f)
                log._logs[key] = log._logs[key] + log_key_n

        experiment = cls(config=config, log=log, log_dir=log_dir)
        experiment.ag.fast_forward(log, forward_im=forward_im)
        experiment.log.purge()

        return experiment

    def motor_babbling(self, n, range_div=1.):
        #print 'Motor babbling : ', n, "points..."

        for i in range(n):
            m = self.ag.motor_babbling()
            m_mov = self.ag.motor_primitive(m)
            s_mov = self.env.update(m_mov, log=False)
            s = self.ag.sensory_primitive(s_mov)
            #print 'Babbling iteration', i, ': m =', m, 's =', s
            self.ag.update_sensorimotor_models(m, s)

        self._update_logs()

    def rest_trial(self):
        m = self.ag.rest_params()
        m_mov = self.ag.motor_primitive(m)
        s_mov = self.env.update(m_mov, log=False)
        s = self.ag.sensory_primitive(s_mov)
        self.ag.update_sensorimotor_models(m, s)
        self._update_logs()
        print "Rest trial", "m:", m, "m_mov:", m_mov, "s:", s

    def start(self):
        for i in range(1, self.n_trials + 1):
            self.trial = i
            self.start_trial()
            if i < self.n_trials:
                self.reset()

    def start_trial(self):

        print '[' + self.config.tag + '] ' + 'Starting trial', self.trial

        #self.ag.subscribe('movement', self)
        # xp.evaluate_at(eval_at, tc)

        self.log.bootstrap_conf = {
            'n': self.config.bootstrap,
            'bootstap_range_div': self.config.bootstrap_range_div
        }
        if self.config.init_rest_trial:
            self.rest_trial()
        if self.config.bootstrap > 0:
            self.motor_babbling(self.config.bootstrap,
                                self.config.bootstrap_range_div)

        #print "Running", self.config.iter, "iterations..."
        log_each = self.config.log_each

        for i in range((self.config.iter) / log_each):
            t_start = time.time()
            self.run(log_each)
            print '[' + self.config.tag + '] ' + 'Run up to ' + str(
                (i + 1) * log_each)
            print "Time for", log_each, "iterations :", time.time() - t_start
            self.save_logs()

    def save_logs(self):
        #print 'Log directory : ', self.log_dir
        #self.log.config = copy.copy(self.config)
        #self.log.config.env_config = None

        for key in self.log._logs.keys():
            filename = self.log_dir + '/log{}-'.format(
                self.trial) + key + '-{}.pickle'.format(self.log.n_purge)
            with open(filename, 'wb') as f:
                cPickle.dump(self.log._logs[key], f)
            f.close()
        self.log.purge()
def main(explo_config_name, trial):
    def mean_std(d):
        v = np.zeros((n / p, len(d)))
        for i, l in zip(range(len(d)), d.values()):
            for j, lj in zip(range(len(l)), l):
                v[j, i] = lj
        mean = np.mean(v, axis=1)
        std = np.std(v, axis=1) / np.sqrt(len(d))
        return mean, std

    comp = {}
    logs = {}

    keys = ["agentM", "agentS", "babbling_module"]

    def eval_comp(config_name, trial, i, log_i):
        global xp, testcases
        config = configs[config_name]
        for key in log_i._logs.keys():
            print key, len(log_i._logs[key])
        if i == 0:
            config.gui = gui
            config.env_cfg["gui"] = gui
            xp = ToolsExperiment(config, log_dir=log_dir + config_name + "/")
        else:
            xp.ag.fast_forward(log_i)
        xp.ag.eval_mode()

        evaluation = Evaluation(xp.log, xp.ag, xp.env, testcases, modes=["inverse"])
        result = evaluation.evaluate()
        return result

    print "explo_config_name", explo_config_name

    for s_space in testcases.keys():
        comp[s_space] = {}

    for s_space in testcases.keys():
        comp[s_space][explo_config_name] = {}

    logs[explo_config_name] = {}

    print "trial", trial

    logs[explo_config_name][trial] = {}
    log = ExperimentLog(None, None, None)
    for key in keys:
        for i in range(n_logs):
            filename = log_dir + explo_config_name + "/log{}-".format(trial) + key + "-{}.pickle".format(i)
            with open(filename, "r") as f:
                log_key = cPickle.load(f)
            log._logs[key] = log._logs[key] + log_key
        print key, len(log._logs[key])

    for s_space in testcases.keys():
        comp[s_space][explo_config_name][trial] = {}

    for regression_config_name in config_list["xp2"]:
        print "regression_config_name", regression_config_name

        for s_space in testcases.keys():
            comp[s_space][explo_config_name][trial][regression_config_name] = []

        for i in range(n_checkpoints + 1):
            print "checkpoint", i

            log_i = ExperimentLog(None, None, None)
            for key in ["agentM", "agentS"]:
                if i > 0:
                    log_i._logs[key] = log._logs[key][(i - 1) * n / n_checkpoints : (i) * n / n_checkpoints]
                else:
                    log_i._logs[key] = []
                print regression_config_name, trial, key, i, n, n_checkpoints, [
                    i * n / n_checkpoints,
                    (i + 1) * n / n_checkpoints,
                ], len(log_i._logs[key])

            errors = eval_comp(regression_config_name, trial, i, log_i)[0]
            for s_space in testcases.keys():
                comp[s_space][explo_config_name][trial][regression_config_name] += [errors[s_space]]
        logs[explo_config_name][trial][regression_config_name] = xp.log._logs

        if True:
            fig, ax = plt.subplots()
            fig.canvas.set_window_title("Competence")
            for s_space in testcases.keys():
                print x, np.median(comp[s_space][explo_config_name][trial][regression_config_name], axis=1)
                ax.plot(
                    x, np.median(comp[s_space][explo_config_name][trial][regression_config_name], axis=1), label=s_space
                )
            handles, labels = ax.get_legend_handles_labels()
            ax.legend(handles, labels)

            plt.savefig(
                log_dir + "img/" + explo_config_name + "-log-{}-{}-comp.png".format(regression_config_name, trial)
            )
            plt.close(fig)

    with open(log_dir + explo_config_name + "/analysis_comp_eval-{}.pickle".format(trial), "wb") as f:
        cPickle.dump(comp, f)

    with open(log_dir + explo_config_name + "/analysis_comp_logs-{}.pickle".format(trial), "wb") as f:
        cPickle.dump(logs, f)
def main(log_dir, config_name, trial):
    
    config = configs[config_name]
    
    #config.env_cfg["env_conf"]["gui"]= True
    
    
    log = ExperimentLog(None, None, None)
    for key in ["motor", "sensori"]:
        try:
            filename = log_dir + config_name + '/log{}-'.format(trial) + key + '-{}.pickle'.format(0)
            with open(filename, 'r') as f:
                log_key = cPickle.load(f)
            log._logs[key] = log_key
        except:
            print "File not Found:", filename
        
        
        
    iterations = [1000, 10000, 50000]
    
    results = {}
    
    for iteration in iterations:
        print iteration
        results[iteration] = {}
        
        xp = ToolsExperiment(config, context_mode=config.context_mode)
        
        log_i = ExperimentLog(None, None, None)
        log_i._logs["motor"] = log._logs["motor"][:iteration]
        log_i._logs["sensori"] = log._logs["sensori"][:iteration]
        
        xp.ag.fast_forward(log_i, forward_im=False)
        
        s_space = xp.ag.config.s_spaces["s_o"]
        
        
        n_test_point = 100
        n_trial_per_point = 100
        
        x_points = np.linspace(-1.5, 1.5, n_test_point)
        y_points = np.linspace(-1.5, 1.5, n_test_point)
         
        results[iteration] = np.zeros((n_test_point, n_test_point))
        
        for ix in range(len(x_points)):
            for iy in range(len(y_points)):
                probas = xp.ag.choose_space_child(s_space, [x_points[ix], y_points[iy]], mode=xp.ag.choose_children_mode, local=xp.ag.ccm_local, k=n_trial_per_point)
                results[iteration][ix][iy] = probas[0]
                
                
        # Plot 
        
        fig, ax = plt.subplots()
        fig.canvas.set_window_title("Age " + str(iteration))
        
        pcol = plt.pcolormesh(x_points, y_points, results[iteration], vmin=0, vmax=1, linewidth=0, cmap="jet")  
        pcol.set_rasterized(True)      
        pcol.set_edgecolor('face')
        
        cb = plt.colorbar()
        cb.ax.set_yticklabels(["Tool", "", "", "", "", "", "", "", "", "", "Hand"], fontsize = 30)
        cb.solids.set_rasterized(True)
        cb.solids.set_edgecolor("face")

#         plt.xlabel("X", fontsize = 30)
#         plt.ylabel("Y", fontsize = 30)
        plt.xticks([-1., 0, 1.], fontsize = 26)
        plt.yticks([-1., 0, 1.], fontsize = 26)
        
        plt.savefig(log_dir + "img/" + config_name + '-map-{}.pdf'.format(iteration), format='pdf', bbox_inches='tight', rasterized=True)
        #plt.savefig(log_dir + "img/" + config_name + '-map-{}.png'.format(iteration), format='png', bbox_inches='tight', rasterized=True)
        
        
                     
    print results
    
    with open(log_dir + config_name + '/results-map-{}.pickle'.format(trial), 'wb') as f:
        cPickle.dump(results, f)