def run(self):
        date = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        log_dir = 'logs/' + date
        os.mkdir(log_dir)

        env = VrepEnvironment(self.robot, **conf)

        ag = DmpAgent(**get_params(self.n_bfs, env.rest_position, self.babbling_name, self.sm_name, self.im_name))

        print 'Running xp', self.tag

        xp = Experiment(env, ag)

        env.unsubscribe('motor', xp)
        env.unsubscribe('sensori', xp)
        ag.subscribe('movement', xp)
        # xp.evaluate_at(eval_at, tc)

        xp.log.env_conf = conf
        xp.log.ag_conf = {'n_bfs': self.n_bfs,
                          'starting_position': env.rest_position,
                          'babbling_name': self.babbling_name,
                          'sm_name': self.sm_name,
                          'im_name': self.im_name
                          }
        xp.log.bootstrap_conf = {'n': 16, 'bootstap_range_div': 28.}
        self.bootstrap(xp, **xp.log.bootstrap_conf)

        log_each = 10
        for run in range(100 / log_each):
            xp.run(log_each)
            with open(log_dir + '/{}'.format(self.tag), 'wb') as f:
                pickle.dump(xp.log, f)
            f.close()
            print 'saved ' + str((run + 1) * log_each)
Exemple #2
0
class LearningIkXp(VrepXp):
    def __init__(self, conf):
        VrepXp.__init__(self, 'poppy', conf['scene'])

        self.xp_conf = conf

    def run(self):
        env = VrepEnvironment(self.robot, **env_conf)

        # Create the Interest Model
        im_dims = env.conf.m_dims if self.xp_conf['bab'] == 'motor' else env.conf.s_dims
        im_cls, im_conf = interest_models[self.xp_conf['im']['name']]
        im_conf = im_conf['default']
        im_conf.update(self.xp_conf['im']['conf'])
        print 'Create IM', self.xp_conf['im']['name'], 'using', im_conf
        im = im_cls(env.conf, im_dims, **im_conf)

        # Create the SensoriMotor Model
        sm_cls = sms[self.xp_conf['sm']['name']]
        print 'Create SM', self.xp_conf['sm']['name'], 'using', self.xp_conf['sm']['conf']
        sm = sm_cls(env.conf, **self.xp_conf['sm']['conf'])

        ag = Agent(env.conf, sm, im)

        self.xp = Experiment(env, ag)
        self.xp.evaluate_at(self.xp_conf['eval_at'], load(self.xp_conf['tc']))
        self.xp.run()

    def save(self, f):
        pickle.dump(self.xp.log, f)
Exemple #3
0
    def __init__(self, config, log=None, log_dir=None, n_trials=1):

        self.config = config

        if hasattr(config, 'env_cls') and hasattr(config, 'env_cfg'):
            self.env = config.env_cls(**config.env_cfg)
        else:
            raise NotImplementedError
            #self.env = VrepDivaEnvironment(self.config.environment, self.config.vrep, self.config.diva)

        #self.ag = DmpAgent(self.config, self.env)
        self.ag = self.config.supervisor_cls(self.config, self.env,
                                             **self.config.supervisor_config)

        Experiment.__init__(self, self.env, self.ag)
        self.ag.log = self.log

        if log is None:
            if log_dir is None:
                self.log_dir = (
                    os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 '../../logs/') +
                    datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") +
                    '-' + config.tag)

            else:
                self.log_dir = log_dir + config.tag
            try:  # muliprocess collisions
                if not os.path.exists(self.log_dir):
                    os.mkdir(self.log_dir)
            except OSError:
                pass
            config.log_dir = self.log_dir
        else:
            assert log_dir is not None
            self.log_dir = log_dir
            self.log = log

        self.ag.subscribe('agentM', self)
        self.ag.subscribe('agentS', self)
        self.ag.subscribe('interests', self)
        self.ag.subscribe('babbling_module', self)
        self.ag.subscribe('module_to_credit', self)
        self.ag.subscribe('babbling_interest', self)
        #self.ag.subscribe_topics_mod(['competence', 'chidren_choice'], self)

        self.n_trials = n_trials
        self.trial = 0
 def __init__(self, config, log = None, log_dir = None, n_trials = 1):
     
     self.config = config
     
     if hasattr(config, 'env_cls') and hasattr(config, 'env_cfg'):
         self.env = config.env_cls(**config.env_cfg)
     else:
         raise NotImplementedError
         #self.env = VrepDivaEnvironment(self.config.environment, self.config.vrep, self.config.diva)
         
     #self.ag = DmpAgent(self.config, self.env)
     self.ag = self.config.supervisor_cls(self.config, self.env, **self.config.supervisor_config)
     
     Experiment.__init__(self, self.env, self.ag)
     self.ag.log = self.log
         
     if log is None:
         if log_dir is None:
             self.log_dir = (os.path.join(os.path.dirname(os.path.abspath(__file__)), 
                                          '../../logs/') 
                             + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") 
                             + '-' 
                             + config.tag)
             
         else:
             self.log_dir = log_dir + config.tag
         try: # muliprocess collisions
             if not os.path.exists(self.log_dir):
                 os.mkdir(self.log_dir)
         except OSError:
             pass
         config.log_dir = self.log_dir
     else:
         assert log_dir is not None
         self.log_dir = log_dir
         self.log = log 
         
     self.ag.subscribe('agentM', self)
     self.ag.subscribe('agentS', self)
     self.ag.subscribe('interests', self)
     self.ag.subscribe('babbling_module', self)
     self.ag.subscribe('module_to_credit', self)
     self.ag.subscribe('babbling_interest', self)
     #self.ag.subscribe_topics_mod(['competence', 'chidren_choice'], self)
         
     self.n_trials = n_trials
     self.trial = 0
    def run(self):
        date = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        log_dir = 'logs/' + date
        os.mkdir(log_dir)

        env = VrepEnvironment(self.robot, **conf)

        ag = DmpAgent(**get_params(self.n_bfs, env.rest_position, self.
                                   babbling_name, self.sm_name, self.im_name))

        print 'Running xp', self.tag

        xp = Experiment(env, ag)

        env.unsubscribe('motor', xp)
        env.unsubscribe('sensori', xp)
        ag.subscribe('movement', xp)
        # xp.evaluate_at(eval_at, tc)

        xp.log.env_conf = conf
        xp.log.ag_conf = {
            'n_bfs': self.n_bfs,
            'starting_position': env.rest_position,
            'babbling_name': self.babbling_name,
            'sm_name': self.sm_name,
            'im_name': self.im_name
        }
        xp.log.bootstrap_conf = {'n': 16, 'bootstap_range_div': 28.}
        self.bootstrap(xp, **xp.log.bootstrap_conf)

        log_each = 10
        for run in range(100 / log_each):
            xp.run(log_each)
            with open(log_dir + '/{}'.format(self.tag), 'wb') as f:
                pickle.dump(xp.log, f)
            f.close()
            print 'saved ' + str((run + 1) * log_each)
Exemple #6
0
    def run(self):
        env = VrepEnvironment(self.robot, **env_conf)

        # Create the Interest Model
        im_dims = env.conf.m_dims if self.xp_conf['bab'] == 'motor' else env.conf.s_dims
        im_cls, im_conf = interest_models[self.xp_conf['im']['name']]
        im_conf = im_conf['default']
        im_conf.update(self.xp_conf['im']['conf'])
        print 'Create IM', self.xp_conf['im']['name'], 'using', im_conf
        im = im_cls(env.conf, im_dims, **im_conf)

        # Create the SensoriMotor Model
        sm_cls = sms[self.xp_conf['sm']['name']]
        print 'Create SM', self.xp_conf['sm']['name'], 'using', self.xp_conf['sm']['conf']
        sm = sm_cls(env.conf, **self.xp_conf['sm']['conf'])

        ag = Agent(env.conf, sm, im)

        self.xp = Experiment(env, ag)
        self.xp.evaluate_at(self.xp_conf['eval_at'], load(self.xp_conf['tc']))
        self.xp.run()
# To switch from motor to goal babbling
# You just need to change the babling parameter at instanciation

from explauto.experiment import Experiment, make_settings

s_goal = make_settings(environment='simple_arm',
                       babbling_mode='goal',
                       interest_model='random',
                       sensorimotor_model='nearest_neighbor')

goal_expe = Experiment.from_settings(s_goal)

goal_expe.evaluate_at([1, 10, 20, 30, 100, 200, 300, 400], s_goal.default_testcases)

goal_expe.run()

ax = axes()
data = goal_expe.log.scatter_plot(ax, (('sensori', [0, 1]), ), color='green')
# To switch from motor to goal babbling
# You just need to change the babling parameter at instanciation

from explauto.experiment import Experiment, make_settings

s_goal = make_settings(environment='simple_arm',
                       babbling_mode='goal',
                       interest_model='random',
                       sensorimotor_model='nearest_neighbor')

goal_expe = Experiment.from_settings(s_goal)

goal_expe.evaluate_at([1, 10, 20, 30, 100, 200, 300, 400],
                      s_goal.default_testcases)

goal_expe.run()

ax = axes()
data = goal_expe.log.scatter_plot(ax, (('sensori', [0, 1]), ), color='green')