def define_jobs_context(self, context): options = self.get_options() boot_root = options.boot_root rs2b_config = get_rs2b_config() boot_config = get_boot_config() data_central = DataCentral(boot_root) id_robot = options.id_robot id_robot_res = options.id_robot_res id_explog = options.id_explog id_stream = '%s%s' % (options.id_episode_prefix, id_explog) id_episode = id_stream id_agent = None # make sure we have them boot_config.robots[id_robot] rs2b_config.explogs[id_explog] ds = data_central.get_dir_structure() filename = ds.get_explog_filename(id_robot=id_robot_res, id_agent=id_agent, id_stream=id_stream) return context.comp_config(do_convert_job2, id_robot=id_robot, id_robot_res=id_robot_res, id_explog=id_explog, id_stream=id_stream, id_episode=id_episode, filename=filename)
def main(self): rospy.init_node('servo_demo', disable_signals=True) self.info('Started.') contracts.disable_all() boot_root = rospy.get_param('~boot_root') boot_root = expand_environment(boot_root) config_dir = rospy.get_param('~config_dir') id_robot_learned = rospy.get_param('~id_robot_learn') self.info('loading %r' % config_dir) GlobalConfig.global_load_dir(config_dir) id_agent = rospy.get_param('~id_agent') self.id_robot = rospy.get_param('~id_robot') self.sleep = rospy.get_param('~sleep', 0.005) self.info('sleep: %s' % self.sleep) self.error_threshold = float(rospy.get_param('~error_threshold')) raise_if_no_state = rospy.get_param('~raise_if_no_state', True) data_central = DataCentral(boot_root) ag_st = load_agent_state(data_central, id_agent, id_robot_learned, reset_state=False, raise_if_no_state=raise_if_no_state) self.agent, state = ag_st self.info('Loaded state: %s' % state) self.servo_agent = self.agent.get_servo() bo_config = get_boot_config() self.robot = bo_config.robots.instance(self.id_robot) self.boot_spec = self.robot.get_spec() self.publish_info_init() self.y = None self.y_goal = None self.started_now = False self.stopped_now = False self.e0 = 1 self.e = 1 self.last_boot_data = None self.state = STATE_WAIT self.info('Defining services') rospy.Service('set_goal', Empty, self.srv_set_goal) rospy.Service('start_servo', Empty, self.srv_start_servo) rospy.Service('stop_servo', Empty, self.srv_stop_servo) self.info('Finished initialization') self.count = 0 self.go()
def read_all(self): data_central = DataCentral(self.boot_root) log_index = data_central.get_log_index() observations = log_index.read_all_robot_streams(id_robot=self.id_robot) pairs = pairwise(observations) i = 0 for bd1, bd2 in pairs: i += 1 if i % 100 == 1: print('read %d' % i) y0 = bd1['observations'] y1 = bd2['observations'] u = bd1['commands'] if self.shape is not None: y0 = scipy_image_resample(y0, self.shape, order=0) np.clip(y0, 0, 1, y0) y1 = scipy_image_resample(y1, self.shape, order=0) np.clip(y1, 0, 1, y1) log_item = LogItem(y0=y0, y1=y1, u=u, x0=None) yield log_item
def check_logs_formats(id_agent, agent, id_robot, robot): # @UnusedVariable with create_tmp_dir() as root: os.mkdir(os.path.join(root, 'config')) data_central = DataCentral(root) # Simulate two episodes # NO! there is a bug in bag reading; the messages are read # in timestamp order; and for now different episodes can # have overlapping timestamps try: simulate(data_central, id_agent=id_agent, id_robot=id_robot, max_episode_len=2, num_episodes=1, # changed from 2 (see above) cumulative=False, id_episodes=None, stateful=False, interval_print=None, write_extra=True) except UnsupportedSpec: return log_index = data_central.get_log_index() log_index.reindex() streams = log_index.get_streams_for(id_robot, id_agent) if len(streams) != 1: msg = 'Expected to find 1 stream, not %d' % len(streams) raise Exception(msg) stream_orig = streams[0] for logs_format, interface in LogsFormat.formats.items(): try: dirname = os.path.join(root, logs_format) safe_makedirs(dirname) filename = os.path.join(dirname, 'example.%s' % logs_format) written = [] id_stream = 'example' with interface.write_stream(filename, id_stream, robot.get_spec()) as writer: for observations in stream_orig.read(): logger.info('Writing %s:%s (%s)' % (observations['id_episode'], observations['counter'], observations['timestamp'])) writer.push_observations(observations) written.append(observations) count = 0 for obs_read in interface.read_from_stream(filename, id_stream): logger.info('Reading %s:%s (%s)' % (obs_read['id_episode'], obs_read['counter'], obs_read['timestamp'])) original = written[count] try: if obs_read['counter'] != original['counter']: msg = ('Not even the counter is the same!' ' %s vs %s' % (obs_read['counter'], original['counter'])) raise Exception(msg) assert_allclose(obs_read['timestamp'], original['timestamp']) assert_allclose(obs_read['observations'], original['observations']) assert_allclose(obs_read['commands'], original['commands']) except: logger.error('Error at count = %d' % count) logger.error(' original: %s' % original) logger.error(' obs_read: %s' % obs_read) raise count += 1 if count != len(written): msg = ('I wrote %d entries, but obtained %d.' % (len(written), count)) raise Exception(msg) except: logger.error('Could not pass tests for format %r.' % logs_format) raise
def check_cmdline(id_agent, agent, id_robot, robot): # @UnusedVariable try: agent.init(robot.get_spec()) except UnsupportedSpec: return with create_tmp_dir() as root: os.mkdir(os.path.join(root, 'config')) # XXX make it automatic data_central = DataCentral(root) log_index = data_central.get_log_index() def execute_command(*args): arguments = ['-d', root, '--contracts'] + list(args) boot_olympics_manager(arguments) assert not log_index.has_streams_for_robot(id_robot) formats = LogsFormat.formats.keys() for logs_format in formats: execute_command('--logformat', logs_format, 'simulate', '-a', id_agent, '-r', id_robot, '--num_episodes', '2', '--episode_len', '2') execute_command('--logformat', logs_format, 'simulate', '-a', id_agent, '-r', id_robot, '--num_episodes', '2', '--episode_len', '2') assert not log_index.has_streams_for_robot(id_robot) log_index.reindex() assert log_index.has_streams_for_robot(id_robot) n = len(formats) assert_allclose(len(log_index.get_streams_for_robot(id_robot)), 2 * n) assert_allclose(len(log_index.get_streams_for(id_robot, id_agent)), 2 * n) assert_allclose(len(log_index.get_episodes_for_robot(id_robot)), 4 * n) assert_allclose(len(log_index.get_episodes_for_robot(id_robot, id_agent)), 4 * n) execute_command('learn-log', '-a', id_agent, '-r', id_robot) try: agent.get_servo() except NotImplementedError: pass else: execute_command('servo', '-a', id_agent, '-r', id_robot, '--num_episodes', '1', '--max_episode_len', '1') try: agent.get_predictor() except NotImplementedError: pass else: execute_command('predict', '-a', id_agent, '-r', id_robot) execute_command('list-logs') execute_command('list-logs', '-e') execute_command('list-logs', '-l') execute_command('list-logs', '-s') execute_command('list-logs', '-R') # TODO: publish # execute_command('batch') # TODO: test batch execute_command('list-agents') execute_command('list-agents', '-v') execute_command('list-robots') execute_command('list-robots', '-v') execute_command('list-states') execute_command('list-states', '-v')