def new_episode(self): self.send_rest_command() id_episode = unique_timestamp_string() extra = None warnings.warn('get environment from some place') id_environment = 'lab' desc = EpisodeDesc(id_episode, id_environment, extra) return desc
def new_episode(self): self._make_sure_inited() # initialize the state rgb = self.images.next() # TODO: reshape self.y = UncertainImage(rgb) id_environment = self.id_image_stream id_episode = unique_timestamp_string() self.timestamp = time.time() self.commands = np.array([0], dtype='int') self.commands_source = 'rest' return EpisodeDesc(id_episode, id_environment, extra=None)
def get_simlog_filename_stream(self, id_robot, id_agent): """ Also creates a suitable id_stream. Returns id_stream, filename. """ timestamp = unique_timestamp_string() timestamp = timestamp.replace('_', '') # TODO: use phase? id_stream = '%s-%s-%s' % (id_robot, id_agent, timestamp) filename = self.get_simlog_filename(id_robot=id_robot, id_agent=id_agent, id_stream=id_stream) return id_stream, filename
def simulate_agent_robot( data_central, id_agent, id_robot, max_episode_len, num_episodes, cumulative, id_episodes=None, # if None, just use the ID given by the world stateful=False, interval_print=None, write_extra=True, ): """ If not cumulative, returns the list of the episodes IDs simulated, otherwise it returns all episodes. """ # Reseed the generator (otherwise multiprocessing will use the same) np.random.seed() if id_episodes is not None: if len(id_episodes) != num_episodes: raise ValueError("Expected correct number of IDs.") # Instance agent object config = data_central.get_bo_config() agent = config.agents.instance(id_agent) # @UndefinedVariable # Instance robot object robot = config.robots.instance(id_robot) # @UndefinedVariable # logger = logging.getLogger("BO:%s(%s)" % (id_agent, id_robot)) # logger.setLevel(logging.DEBUG) # AgentInterface.logger = logger # XXX boot_spec = robot.get_spec() # If --stateful is passed, we try to load a previous state. if stateful: db = data_central.get_agent_state_db() if db.has_state(id_agent=id_agent, id_robot=id_robot): logger.info("Using previous state.") db.reload_state_for_agent(id_agent=id_agent, id_robot=id_robot, agent=agent) else: logger.info("No previous state found.") agent.init(boot_spec) else: agent.init(boot_spec) ds = data_central.get_dir_structure() timestamp = unique_timestamp_string() timestamp = timestamp.replace("_", "") id_stream = "%s-%s-%s" % (id_robot, id_agent, timestamp) filename = ds.get_simlog_filename(id_robot=id_robot, id_agent=id_agent, id_stream=id_stream) logger.info("Creating stream %r\n in file %r" % (id_stream, filename)) logs_format = LogsFormat.get_reader_for(filename) bk = Bookkeeping( data_central=data_central, id_robot=id_robot, num_episodes=num_episodes, cumulative=cumulative, interval_print=interval_print, ) if bk.another_episode_todo(): with logs_format.write_stream(filename=filename, id_stream=id_stream, boot_spec=boot_spec) as writer: while bk.another_episode_todo(): if id_episodes is not None: id_episode = id_episodes.pop(0) logger.info("Simulating episode %s" % id_episode) else: id_episode = None for observations in run_simulation( id_robot, robot, id_agent, agent, 100000, max_episode_len, id_episode=id_episode ): bk.observations(observations) if write_extra: extra = dict(robot_state=robot.get_state()) else: extra = {} writer.push_observations(observations=observations, extra=extra) bk.episode_done() logger.info("Peacefully done all episodes") else: logger.warn("No episodes to do?") logger.info("done") if cumulative: return bk.get_all_episodes() else: return bk.get_id_episodes()
def check_logs_writing(id_agent, agent, id_robot, robot): try: agent.init(robot.get_spec()) except UnsupportedSpec: return root = tempfile.mkdtemp() ds = DirectoryStructure(root) id_stream = unique_timestamp_string() filename = ds.get_simlog_filename(id_agent, id_robot, id_stream) written = [] written_extra = [] logs_format = LogsFormat.get_reader_for(filename) with logs_format.write_stream(filename=filename, id_stream=id_stream, boot_spec=robot.get_spec()) as writer: print run_simulation for observations in run_simulation(id_robot=id_robot, robot=robot, id_agent=id_agent, agent=agent, max_observations=3, max_time=1000, check_valid_values=True): extra = {'random_number': np.random.randint(1)} writer.push_observations(observations, extra) written_extra.append(extra) written.append(observations) logdirs = ds.get_log_directories() index = LogIndex() for logdir in logdirs: index.index(logdir) # now use the cached version index = LogIndex() for logdir in logdirs: index.index(logdir) assert index.has_streams_for_robot(id_robot) streams = index.get_streams_for_robot(id_robot) assert len(streams) == 1 stream = streams[0] assert isinstance(stream, BootStream) assert stream.get_spec() == robot.get_spec() read_back = [] read_back_extra = [] for observations2 in stream.read(read_extra=True): read_back_extra.append(observations2['extra']) read_back.append(observations2) if len(read_back) != len(written): raise Exception('Written %d, read back %d.' % (len(written), len(read_back))) for i in range(len(read_back)): a = written[i] b = read_back[i] fields = set(a.dtype.names) or set(b.dtype.names) fields.remove('extra') for field in fields: assert_equal(a[field], b[field]) for i in range(len(read_back)): a = written_extra[i] b = read_back_extra[i] assert_equal(a, b) shutil.rmtree(root)
def task_servonav( data_central, id_agent, id_robot, max_episode_len, num_episodes, fail_if_not_working, id_episodes=None, # if None, just use the ID given by the world cumulative=False, interval_print=None, interval_write=10, # write every 10 frames num_episodes_with_robot_state=0, resolution=1, ): """ Returns the list of the episodes IDs simulated. """ # Reseed the generator (otherwise multiprocessing will use the same) np.random.seed() if id_episodes is not None: if len(id_episodes) != num_episodes: raise ValueError("Expected correct number of IDs.") # Instance robot object robot = data_central.get_bo_config().robots.instance(id_robot) # TODO: check that this is a Vehicles simulation boot_spec = robot.get_spec() # Instance agent object agent, _ = load_agent_state(data_central, id_agent, id_robot, reset_state=False, raise_if_no_state=True) # TODO: check servo servo_agent = agent.get_servo() id_agent_servo = "%s_servo" % id_agent ds = data_central.get_dir_structure() id_stream = "%s_%s_%s_servonav" % (id_robot, id_agent, unique_timestamp_string()) filename = ds.get_simlog_filename(id_robot=id_robot, id_agent=id_agent, id_stream=id_stream) logger.info("Creating stream %r\n in file %r" % (id_stream, filename)) logs_format = LogsFormat.get_reader_for(filename) bk = BookkeepingServo( data_central=data_central, id_robot=id_robot, id_agent=id_agent_servo, num_episodes=num_episodes, cumulative=cumulative, interval_print=interval_print, ) if not bk.another_episode_todo(): return with logs_format.write_stream(filename=filename, id_stream=id_stream, boot_spec=boot_spec) as writer: counter = 0 while bk.another_episode_todo(): episode = robot.new_episode() if id_episodes is not None: id_episode = id_episodes.pop(0) else: id_episode = episode.id_episode save_robot_state = counter < num_episodes_with_robot_state servonav_episode( id_robot=id_robot, robot=robot, id_servo_agent=id_agent_servo, servo_agent=servo_agent, writer=writer, id_episode=id_episode, resolution=resolution, max_episode_len=max_episode_len, save_robot_state=save_robot_state, interval_write=interval_write, fail_if_not_working=fail_if_not_working, max_tries=10000, ) bk.episode_done() counter += 1
def new_episode(self): self.timestamp = time.time() episode = EpisodeDesc(unique_timestamp_string(), 'n/a') # print('Creating episode %s' % episode) return episode
def new_episode(self): self.timestamp = time.time() return EpisodeDesc(unique_timestamp_string(), 'n/a') # XXX: add constant
def task_servo(data_central, id_agent, id_robot, max_episode_len, num_episodes, displacement, id_episodes=None, # if None, just use the ID given by the world cumulative=False, interval_print=None, num_episodes_with_robot_state=0): ''' Returns the list of the episodes IDs simulated. ''' # Reseed the generator (otherwise multiprocessing will use the same) np.random.seed() if id_episodes is not None: if len(id_episodes) != num_episodes: raise ValueError('Expected correct number of IDs.') # Instance robot object robot = data_central.get_bo_config().robots.instance(id_robot) # TODO: check that this is a Vehicles simulation boot_spec = robot.get_spec() # Instance agent object agent, _ = load_agent_state(data_central, id_agent, id_robot, reset_state=False, raise_if_no_state=True) servo_agent = agent.get_servo() servo_agent.init(boot_spec) id_agent_servo = '%s_servo' % id_agent ds = data_central.get_dir_structure() id_stream = '%s_%s_%s_servo' % (id_robot, id_agent, unique_timestamp_string()) filename = ds.get_simlog_filename(id_robot=id_robot, id_agent=id_agent, id_stream=id_stream) logger.info('Creating stream %r\n in file %r' % (id_stream, filename)) logs_format = LogsFormat.get_reader_for(filename) bk = BookkeepingServo(data_central=data_central, id_robot=id_robot, id_agent=id_agent_servo, num_episodes=num_episodes, cumulative=cumulative, interval_print=interval_print) if bk.another_episode_todo(): with logs_format.write_stream(filename=filename, id_stream=id_stream, boot_spec=boot_spec) as writer: counter = 0 while bk.another_episode_todo(): episode = robot.new_episode() if id_episodes is not None: id_episode = id_episodes.pop(0) else: id_episode = episode.id_episode save_robot_state = counter < num_episodes_with_robot_state servoing_episode(id_robot=id_robot, robot=robot, id_servo_agent=id_agent_servo, servo_agent=servo_agent, writer=writer, id_episode=id_episode, displacement=displacement, max_episode_len=max_episode_len, save_robot_state=save_robot_state, max_tries=10000) bk.episode_done() counter += 1