Exemplo n.º 1
0
    def from_yaml(spec):
        try:
            if not isinstance(spec, dict):
                raise ValueError('Expected a dict, got %s' % spec)
            s = dict(**spec)
            id_stream = s.pop('id', None)
            desc = s.pop('desc', None)
            required = ['shape', 'format', 'range']
            for x in required:
                if not x in s:
                    raise ValueError('Missing entry %r.' % x)
            shape = s.pop('shape')
            check('list[>0](int,>0)', shape)  # XXX: slow
            format = s.pop('format')  # @ReservedAssignment
            range = s.pop('range')  # @ReservedAssignment
            extra = s.pop('extra', {})
            filtered = s.pop('filtered', None)
            default = s.pop('default', None)

            names = s.pop('names', None)  # TODO: do something useful with this
            if names:
                extra['names'] = names

            if s.keys():
                logger.warning('While reading\n%s\nextra keys detected: %s' % 
                               ((spec), s.keys()))

            streamels = streamels_from_spec(shape, format, range, default)

            return StreamSpec(id_stream, streamels, extra, filtered, desc)
        except:
            logger.error('Error while parsing the StreamSpec:\n%s' % spec)
            raise
Exemplo n.º 2
0
    def from_yaml(xo):
        try:
            if not isinstance(xo, dict):
                raise ValueError('Expected a dict, got %s' % xo)
            x = dict(**xo)  # make a copy

            check_contained('observations', x)
            check_contained('commands', x)
            observations = StreamSpec.from_yaml(x.pop('observations'))
            commands = StreamSpec.from_yaml(x.pop('commands'))
            extra = x.pop('extra', None)
            desc = x.pop('desc', None)
            id_robot = x.pop('id', None)

            if x.keys():
                logger.warning('While reading\n%s\nextra keys detected: %s' % 
                               (pformat(xo), x.keys()))

            return BootSpec(observations, commands,
                            id_robot=id_robot,
                            extra=extra,
                            desc=desc)
        except:
            logger.error('Error while parsing the BootSpec:\n%s' % xo)
            raise
Exemplo n.º 3
0
def index_robots(file2streams):
    ''' Groups the streams by robot, making sure the specs are compatible. 
        Returns dict: id_robot -> list of streams.
    '''
    robot2streams = defaultdict(lambda: [])
    robot2spec = {}
    for _, streams in file2streams.items():
        for stream in streams:
            id_robot = stream.get_id_robot()

            if not id_robot in robot2spec:
                robot2spec[id_robot] = stream.get_spec()
            else:
                # XXX:
                stream_spec = stream.get_spec()
                if str(stream_spec) != str(robot2spec[id_robot]):
                    msg = 'Warning! You got your logs mixed up. \n'
                    msg += ('Problem spec in:\n\t%s\nis\n\t%s\n' % 
                           (stream, stream_spec))
                    msg += ('and this is different from:\n\t%s\n'
                           'found in e.g.,:\n\t%s' % 
                           (robot2spec[id_robot], robot2streams[id_robot][0]))
                    msg += '\nI will skip this stream.'
                    logger.error(msg)
                    continue
            robot2streams[id_robot].append(stream)

    for robot in robot2streams:
        robot2streams[robot] = sorted(robot2streams[robot],
                                 key=lambda x: list(x.get_id_episodes())[0])

    return dict(**robot2streams)
Exemplo n.º 4
0
 def compmake_job(self, *args, **kwargs):
     """ Calls compmake's self.compmake_job() function. """    
     try:
         from compmake import comp
     except ImportError:
         logger.error('Compmake not installed')
     return comp(*args, **kwargs)
Exemplo n.º 5
0
def hdf_list_streams(filename):
    f = tables.openFile(filename)
    try:
        # TODO: check this has a valid format
        group = f.root.boot_olympics.streams
        ids = group._v_children.keys()

        streams = []
        for sid in ids:
            table = group._v_children[sid].boot_stream
            extra = group._v_children[sid].extra
            spec = spec_from_group(group._v_children[sid])
            id_episodes = set(np.unique(table[:]['id_episode']))

            summaries = [episode_summary(table, extra, id_episode)
                         for id_episode in id_episodes]

            topic = sid
            id_robot = table[0]['id_robot']

            filename = os.path.realpath(filename)

            stream = BootStream(id_robot=id_robot,
                                filename=filename,
                                topic=topic, spec=spec,
                                summaries=summaries)

            streams.append(stream)
        return streams
    except (ValueError, AttributeError):
        logger.error('Error while trying to index %r.' % filename)
        raise
    finally:
        f.close()
Exemplo n.º 6
0
def cmd_list_states(data_central, argv):
    '''Shows a summary of the states present in DB. '''
    parser = OptionParser(prog='list-states',
                          usage=cmd_list_states.short_usage)
    parser.disable_interspersed_args()
    parser.add_option("-v", dest='verbose', default=False, action='store_true',
                      help="Show more verbose output.")
    (options, args) = parser.parse_args(argv)

    check_no_spurious(args)
    db = data_central.get_agent_state_db()

    combinations = list(db.list_states())
    if not combinations:
        logger.info('No learning states saved in DB.')
    else:
        logger.info('Found %d combinations in DB.' % len(combinations))

    for id_agent, id_robot in combinations:
        logger.info('- Found state a: %-35s  r: %-25s' % (id_agent, id_robot))

        if options.verbose:
            try:
                state = db.get_state(id_robot=id_robot, id_agent=id_agent)
                logger.info('  # episodes: %s' % len(state.id_episodes))
                logger.info('      object: %s' % 
                            describe_value(state.agent_state))
            except Exception as e:
                logger.error('  (could not load state: %s) ' % e)

    if not options.verbose:
        logger.debug('Use -v for more information.')
Exemplo n.º 7
0
def index_directory(directory, ignore_cache=False, warn_if_longer=3):
    ''' Returns a hash filename -> list of streams. '''
    file2streams = {}
    # logger.debug('Indexing directory %r (ignore cache: %s).' % 
    #             (friendly_path(directory), ignore_cache))
    
    with warn_long_time(warn_if_longer, 'indexing directory %r' % 
                                        friendly_path(directory)):
        files = get_all_log_files(directory)
    
    # Shuffle the list so that multiple threads will index different files
    import random
    random.seed()
    random.shuffle(files)

    with warn_long_time(warn_if_longer, 'indexing %d files (use cache: %s)' % 
                        (len(files), not ignore_cache)):
        for filename in files:
            reader = LogsFormat.get_reader_for(filename)
            try:
                file2streams[filename] = \
                    reader.index_file_cached(filename, ignore_cache=ignore_cache)
                for stream in file2streams[filename]:
                    assert isinstance(stream, BootStream)
                if not file2streams[filename]:
                    logger.warning('No streams found in file %r.' % 
                                   friendly_path(filename))
            except None:  # XXX
                logger.error('Invalid data in file %r.' % friendly_path(filename))
                logger.error(traceback.format_exc())

  
    return file2streams
Exemplo n.º 8
0
def yaml_load(yaml_string):
    try:
        return load(yaml_string, Loader=Loader)
    except KeyboardInterrupt:
        raise
    except:
        logger.error('Could not deserialize YAML')
        dump_emergency_string(yaml_string)
        raise
Exemplo n.º 9
0
def servo_stats_summary(data_central, id_agent, id_robot, id_episode):  # @UnusedVariable
    from geometry import (SE2, SE2_from_SE3, translation_from_SE2,
                          angle_from_SE2, SE3)

    log_index = data_central.get_log_index()
    errors = []
    timestamps = []
    poses = []
    dist_xy = []
    dist_th = []
    for observations in \
        log_index.read_robot_episode(id_robot, id_episode,
                                     read_extra=True):
        extra = observations['extra'].item()

        servoing = extra.get('servoing', None)
        if servoing is None:
            logger.error('Warning, no "servoing" in episode %r.' % 
                          id_episode)
            break

        obs0 = np.array(servoing['obs0'])
        pose0 = SE2_from_SE3(SE3.from_yaml(servoing['pose0']))
        pose1 = SE2_from_SE3(SE3.from_yaml(servoing['pose1']))
        poseK = SE2_from_SE3(SE3.from_yaml(servoing['poseK']))

        pose1 = SE2.multiply(SE2.inverse(pose0), pose1)
        poseK = SE2.multiply(SE2.inverse(pose0), poseK)
        poses.append(poseK)

        dist_xy.append(np.linalg.norm(translation_from_SE2(poseK)))
        dist_th.append(np.abs(angle_from_SE2(poseK)))

        # obs1 = np.array(servoing['obs1'])
        obsK = np.array(servoing['obsK'])

        err_L2 = np.linalg.norm(obs0 - obsK)

        errors.append(err_L2)
        timestamps.append(observations['timestamp'])
# last['time_from_episode_start'] = observations['time_from_episode_start']

    initial_distance = np.linalg.norm(translation_from_SE2(pose1))

    summary = {}
    summary['pose0'] = pose0
    summary['pose1'] = pose1
    summary['poses'] = poses
    summary['errors'] = errors
    summary['timestamps'] = timestamps
    summary['initial_translation'] = translation_from_SE2(pose1)
    summary['initial_distance'] = initial_distance
    summary['initial_rotation'] = angle_from_SE2(pose1)
    summary['dist_xy'] = dist_xy
    summary['dist_th'] = dist_th
    return summary
Exemplo n.º 10
0
 def write_stream(self, filename, id_stream, boot_spec):
     writer = HDFLogWriter(filename, id_stream, boot_spec)
     try:
         yield writer
     except:
         logger.error("Got exception in the write_stream() context manager")
         writer.cleanup()
         raise
     else:
         writer.close()
Exemplo n.º 11
0
 def close(self):
     if self.num == 0:
         logger.error('No data given for writing; deleting tmp file.')
         self.bag.close()
         os.unlink(self.tmp_filename)
     else:
         self.bag.close()
         if os.path.exists(self.filename):
             os.unlink(self.filename)
         os.rename(self.tmp_filename, self.filename)
Exemplo n.º 12
0
    def reload_state_for_agent(self, id_agent, id_robot, agent):
        state = self.get_state(id_agent, id_robot)

        logger.debug('State after learning %d episodes.' % 
                     len(state.id_episodes))
        try:
            agent.set_state(state.agent_state)
        except:
            logger.error('Could not set agent to previous state.')
            raise
        return state
Exemplo n.º 13
0
    def get(self, key):
        if not self.exists(key):
            raise Exception('Could not find key %r.' % key)
        
        filename = self.filename_for_key(key)
        try:
            with warn_long_time(self.warn_long_time, 'reading %r' % key):  
                return safe_pickle_load(filename)

        except Exception as e:
            msg = "Could not unpickle file %r: %s" % (filename, e)
            logger.error(msg)
            raise
Exemplo n.º 14
0
def save_report(data_central, report, filename, resources_dir=None,
                save_pickle=False, save_hdf=True,
                check_hdf_written_correctly=True):
    """ filename.html """
    
    report.text('report_date', isodate())  # TODO: add other stuff
    
    ds = data_central.get_dir_structure()
    
    report.to_html(filename, resources_dir=resources_dir)
    ds.file_is_done(filename)
    
    if save_pickle:
        pickle_name = os.path.splitext(filename)[0] + '.pickle'
        
        with warn_long_time_writing(pickle_name):
            safe_pickle_dump(report, pickle_name, protocol=2)

        ds.file_is_done(pickle_name)


    if save_hdf:
        hdf_name = os.path.splitext(filename)[0] + '.rr1.h5'
        
        with warn_long_time_writing(hdf_name):
            report.to_hdf(hdf_name)
        
        if check_hdf_written_correctly:
            with warn_long_time_reading(hdf_name, logger=logger):
                r2 = report_from_hdf(hdf_name)

            if report != r2:
                logger.error("Dont match")
                logger.error(report.format_tree())
                logger.error(r2.format_tree())
                raise Exception()
        
        ds.file_is_done(hdf_name)
Exemplo n.º 15
0
def check_logs_formats(id_agent, agent, id_robot, robot):  # @UnusedVariable

    with create_tmp_dir() as root:
        os.mkdir(os.path.join(root, 'config'))
        data_central = DataCentral(root)

        # Simulate two episodes
        # NO! there is a bug in bag reading; the messages are read
        # in timestamp order; and for now different episodes can
        # have overlapping timestamps
        try:
            simulate(data_central, id_agent=id_agent, id_robot=id_robot,
                 max_episode_len=2,
                 num_episodes=1,  # changed from 2 (see above)
                 cumulative=False,
                 id_episodes=None,
                 stateful=False,
                 interval_print=None,
                 write_extra=True)
        except UnsupportedSpec:
            return

        log_index = data_central.get_log_index()
        log_index.reindex()

        streams = log_index.get_streams_for(id_robot, id_agent)
        if len(streams) != 1:
            msg = 'Expected to find 1 stream, not %d' % len(streams)
            raise Exception(msg)

        stream_orig = streams[0]

        for logs_format, interface in LogsFormat.formats.items():
            try:
                dirname = os.path.join(root, logs_format)
                safe_makedirs(dirname)
                filename = os.path.join(dirname, 'example.%s' % logs_format)
                written = []
                id_stream = 'example'
                with interface.write_stream(filename, id_stream,
                                            robot.get_spec()) as writer:
                    for observations in stream_orig.read():
                        logger.info('Writing %s:%s (%s)' % 
                              (observations['id_episode'],
                               observations['counter'],
                               observations['timestamp']))
                        writer.push_observations(observations)
                        written.append(observations)

                count = 0
                for obs_read in interface.read_from_stream(filename,
                                                           id_stream):
                    logger.info('Reading %s:%s (%s)' % 
                          (obs_read['id_episode'],
                           obs_read['counter'],
                           obs_read['timestamp']))

                    original = written[count]

                    try:
                        if obs_read['counter'] != original['counter']:
                            msg = ('Not even the counter is the same!'
                                   ' %s vs %s' % 
                                   (obs_read['counter'], original['counter']))
                            raise Exception(msg)

                        assert_allclose(obs_read['timestamp'],
                                        original['timestamp'])
                        assert_allclose(obs_read['observations'],
                                        original['observations'])
                        assert_allclose(obs_read['commands'],
                                        original['commands'])
                    except:
                        logger.error('Error at count = %d' % count)
                        logger.error('  original: %s' % original)
                        logger.error('  obs_read: %s' % obs_read)
                        raise
                    count += 1

                if count != len(written):
                    msg = ('I wrote %d entries, but obtained %d.' % 
                           (len(written), count))
                    raise Exception(msg)
            except:
                logger.error('Could not pass tests for format %r.'
                             % logs_format)
                raise
Exemplo n.º 16
0
def dump_emergency_string(s):
    emergency = '/home/andrea/yaml_load.yaml'  # XXX FIXME
    with open(emergency, 'w') as f:
        f.write(s)
    logger.error('String written to %r.' % emergency)
Exemplo n.º 17
0
def check_conversions(stream_spec1, nuisance):
    # print('Checking %s / %s ' % (stream_spec1, nuisance))
    nuisance_inv = None
    try:
        try:
            stream_spec2 = nuisance.transform_spec(stream_spec1)
        except UnsupportedSpec as e:
            logger.info('Skipping %s/%s because incompatible: %s' % 
                        (stream_spec1, nuisance, e))
            return

        value1 = stream_spec1.get_random_value()
        stream_spec1.check_valid_value(value1)


        value2 = nuisance.transform_value(value1)
        stream_spec2.check_valid_value(value2)

        try:
            nuisance_inv = nuisance.inverse()
        except NuisanceNotInvertible as e:
            logger.info('Skipping some tests %s/%s because not invertible:'
                        ' %s' % (stream_spec1, nuisance, e))
            return

        try:
            stream_spec1b = nuisance_inv.transform_spec(stream_spec2)
        except UnsupportedSpec as e:
            msg = ('The inverse of the nuisance does not seem to be able '
                   'to handle the result:\n%s\n\n'
                   '  stream_spec1: %s\n'
                   '      nuisance: %s\n'
                   '  stream_spec2: %s\n'
                   '  nuisance_inv: %s\n' % 
                   (indent(str(e), '>'),
                    stream_spec1.to_yaml(), nuisance,
                    stream_spec2.to_yaml(), nuisance_inv))
            raise ValueError(msg)

        try:
            StreamSpec.check_same_spec(stream_spec1, stream_spec1b)
        except Exception as e:
            msg = ('The inverse of the nuisance does not recreate the '
                   'initial spec:\n%s\n\n'
                   '  stream_spec1: %s\n'
                   '      nuisance: %s\n'
                   '  stream_spec2: %s\n'
                   '  nuisance_inv: %s\n'
                   ' stream_spec1b: %s\n' % 
                   (indent(str(e), '>'),
                    stream_spec1.to_yaml(), nuisance,
                    stream_spec2.to_yaml(), nuisance_inv,
                    stream_spec1b.to_yaml()))
            raise ValueError(msg)

        value1b = nuisance_inv.transform_value(value2)
        stream_spec1.check_valid_value(value1b)

        # TODO: if exact
        assert_allclose(value1, value1b, rtol=1e-5)

    except:
        logger.error('Error while testing:')
        logger.error(' stream_spec:  %s ' % stream_spec1.to_yaml())
        logger.error(' nuisance:     %s' % nuisance)
        logger.error(' nuisance_inv: %s' % nuisance_inv)
        raise
Exemplo n.º 18
0
def batch_process_manager(data_central, which_sets, command=None):
    try:
        import compmake  # @UnusedImport
    except:
        logger.error('Compmake not installed; multiprocessor '
                     'processes not available.')
        raise

    from compmake import (comp_prefix, use_filesystem,
                          compmake_console, batch_command)

    batch_config = BatchConfigMaster()
    configs = data_central.get_dir_structure().get_config_directories()
    for config in configs:
        batch_config.load(config)

    sets_available = batch_config.sets.keys()

    # logger.info('Available: %r' % sets_available)
    # logger.info('Sets:      %r' % which_sets)
    which_sets_int = expand_string(which_sets, options=sets_available)

    if not which_sets_int:
        msg = 'Specified sets %r not found.' % which_sets
        msg += ' Available: %s' % sets_available
        raise UserError(msg)

    # logger.info('Expanded:  %r' % which_sets)

    for x in which_sets_int:
        if not x in sets_available:
            msg = 'Set %r not available.' % x
            raise UserError(msg)

    if len(which_sets_int) == 1:
        combid = which_sets[0]
    else:
        combid = '-'.join(which_sets)

    # Create the new root        
    root = data_central.root
    root_set = os.path.join(data_central.root, 'sets', combid)
    safe_makedirs(root_set)
    data_central_set = DataCentral(root_set)

    # add symbolic links to logs and config
    main_config = os.path.realpath(os.path.join(root, 'config'))
    set_config = os.path.join(root_set, 'config')
    safe_symlink(main_config, set_config) 

    safe_makedirs(os.path.join(root_set, 'logs'))
    safe_symlink(os.path.join(root, 'logs'),
                 os.path.join(root_set, 'logs', 'original'))

    storage = data_central_set.get_dir_structure().get_storage_dir()
    compmake_storage = os.path.join(storage, 'compmake')
    logger.debug('Using storage directory %r.' % friendly_path(compmake_storage))
    use_filesystem(compmake_storage)

    for id_set in which_sets:
        if len(which_sets) > 1:
            comp_prefix(id_set)

        try:
            spec = batch_config.sets[x]
            batch_set(data_central_set, id_set, spec)
        except ConfToolsException:
            msg = ('Bad configuration for the set %r with spec\n %s' % 
                   (id_set, pformat(spec)))
            logger.error(msg)
            raise

    if command:
        return batch_command(command)
    else:
        compmake_console()
        return 0
Exemplo n.º 19
0
def servonav_episode(
    id_robot,
    robot,
    id_servo_agent,
    servo_agent,
    writer,
    id_episode,
    max_episode_len,
    save_robot_state,
    interval_write=1,
    interval_print=5,
    resolution=0.5,  # grid resolution
    delta_t_threshold=0.2,  # when to switch
    MIN_PATH_LENGTH=8,
    MAX_TIME_FOR_SWITCH=20.0,
    fail_if_not_working=False,
    max_tries=10000,
):
    """
    
        :arg:displacement: Time in seconds to displace the robot.
    """
    from geometry import SE2_from_SE3, translation_from_SE2, angle_from_SE2, SE3

    stats_write = InAWhile(interval_print)

    # Access the vehicleSimulation interface
    vsim = get_vsim_from_robot(robot)

    for _ in xrange(max_tries):
        # iterate until we can do this correctly
        episode = robot.new_episode()
        locations = get_grid(robot=robot, vsim=vsim, resolution=resolution)

        if len(locations) < MIN_PATH_LENGTH:
            logger.info("Path too short, trying again.")
        else:
            break

    else:
        msg = "Could not find path in %d tries." % max_tries
        raise Exception(msg)

    locations_yaml = convert_to_yaml(locations)

    vsim.vehicle.set_pose(locations[0]["pose"])

    current_goal = 1
    current_goal_obs = locations[current_goal]["observations"]
    servo_agent.set_goal_observations(current_goal_obs)

    counter = 0
    time_last_switch = None

    num_written = 0
    for robot_observations, boot_observations in run_simulation_servonav(
        id_robot,
        robot,
        id_servo_agent,
        servo_agent,
        100000,
        max_episode_len,
        id_episode=id_episode,
        id_environment=episode.id_environment,
        raise_error_on_collision=fail_if_not_working,
    ):

        current_time = boot_observations["timestamp"].item()
        if time_last_switch is None:
            time_last_switch = current_time

        time_since_last_switch = float(current_time - time_last_switch)

        def obs_distance(obs1, obs2):
            return float(np.linalg.norm(obs1.flatten() - obs2.flatten()))

        curr_pose = robot_observations.robot_pose
        curr_obs = boot_observations["observations"]
        curr_goal = locations[current_goal]["observations"]
        prev_goal = locations[current_goal - 1]["observations"]
        curr_err = obs_distance(curr_goal, curr_obs)
        prev_err = obs_distance(prev_goal, curr_obs)
        current_goal_pose = locations[current_goal]["pose"]
        current_goal_obs = locations[current_goal]["observations"]

        delta = SE2_from_SE3(SE3.multiply(SE3.inverse(curr_pose), current_goal_pose))
        delta_t = np.linalg.norm(translation_from_SE2(delta))
        delta_th = np.abs(angle_from_SE2(delta))

        if stats_write.its_time():
            msg = "  deltaT: %.2fm  deltaTh: %.1fdeg" % (delta_t, np.rad2deg(delta_th))
            logger.debug(msg)

        # If at the final goal, go closer
        is_final_goal = current_goal == len(locations) - 1
        if is_final_goal:
            delta_t_threshold *= 0.3

        # TODO: should we care also about delta_th?
        time_to_switch = (delta_t < delta_t_threshold) or (time_since_last_switch > MAX_TIME_FOR_SWITCH)
        # does not work: curr_err < SWITCH_THRESHOLD * prev_err:

        if time_to_switch:
            current_goal += 1
            logger.info("Switched to goal %d." % current_goal)

            time_last_switch = current_time
            if current_goal >= len(locations):
                # finished
                logger.info("Finished :-)")
                break

        threshold_lost_m = 3
        if delta_t > threshold_lost_m:
            msg = "Breaking because too far away."
            if not (fail_if_not_working):
                logger.error(msg)
                break
            else:
                raise Exception(msg)

        servo_agent.set_goal_observations(current_goal_obs)

        extra = {}
        extra["servoing_base"] = dict(goal=curr_goal.tolist(), current=curr_obs.tolist())

        extra["servoing_poses"] = dict(goal=SE3.to_yaml(current_goal_pose), current=SE3.to_yaml(curr_pose))

        extra["servonav"] = dict(
            poseK=SE3.to_yaml(curr_pose),
            obsK=boot_observations["observations"].tolist(),
            pose1=SE3.to_yaml(current_goal_pose),
            locations=locations_yaml,
            current_goal=current_goal,
            curr_err=curr_err,
            prev_err=prev_err,
            time_last_switch=time_last_switch,
            time_since_last_switch=time_since_last_switch,
        )

        if counter % interval_write == 0:
            if save_robot_state:
                extra["robot_state"] = robot.get_state()

            writer.push_observations(observations=boot_observations, extra=extra)
            num_written += 1
        counter += 1

    if num_written == 0:
        msg = "This log was too short to be written (%d observations)" % counter
        raise Exception(msg)