class DataCentral(object):
    def __init__(self, boot_root=None):
        boot_root = expand_environment(boot_root)
        # Important, it can be deserialized from somewhere else
        self.boot_root = boot_root
        self.root = os.path.realpath(boot_root)
        self.dir_structure = DirectoryStructure(self.root)
        self.states_db = None
        self.log_index = None
        self.bo_config = None

    def get_boot_root(self):
        return self.root
        
    def __repr__(self):
        return 'DataCentral(root=%r)' % self.root

    def get_bo_config(self):
        if self.bo_config is None:
            self.bo_config = get_boot_config()
            
            dirs = self.dir_structure.get_config_directories()
            for dirname in dirs:
                if not os.path.exists(dirname):
                    msg = ('Warning, the config dir %r does not exist ' % 
                           friendly_path(dirname))
                    logger.info(msg)  
                else:
                    GlobalConfig.global_load_dir(dirname)
#                     self.bo_config.load(dirname)
        return self.bo_config

    def get_log_index(self, ignore_cache=False):
        if self.log_index is None:
            self.log_index = LogIndex()
            log_directories = self.dir_structure.get_log_directories()
            for dirname in log_directories:
                self.log_index.index(dirname, ignore_cache=ignore_cache)
        return self.log_index

    def get_agent_state_db(self):
        if self.states_db is None:
            state_db_dir = self.dir_structure.get_state_db_directory()
            self.states_db = LearningStateDB(state_db_dir)
        return self.states_db

    def get_dir_structure(self):
        return self.dir_structure
 def get_log_index(self, ignore_cache=False):
     if self.log_index is None:
         self.log_index = LogIndex()
         log_directories = self.dir_structure.get_log_directories()
         for dirname in log_directories:
             self.log_index.index(dirname, ignore_cache=ignore_cache)
     return self.log_index
Beispiel #3
0
def rosbag2h5(pargs):
    parser = OptionParser(usage=usage)
    parser.disable_interspersed_args()
    parser.add_option("-l", dest='log_directory',
                      default="~/.ros/log", # XXX: log
                      help="Log directory [%default].")

    (options, args) = parser.parse_args(pargs)

    if args:
        msg = 'Spurious arguments.'
        raise Exception(msg)

    index = LogIndex()
    index.index(options.log_directory)

    logger.info('Found %s files.' % len(index.file2streams))
    for filename, streams in index.file2streams.items():
        if len(streams) != 1:
            msg = ('Cannot deal with %d streams per file %r.' %
                   (len(streams), filename))
            logger.error(msg)
            continue
        convert(streams[0])
def check_logs_writing(id_agent, agent, id_robot, robot):
    try:
        agent.init(robot.get_spec())
    except UnsupportedSpec:
        return

    root = tempfile.mkdtemp()
    ds = DirectoryStructure(root)
    id_stream = unique_timestamp_string()
    filename = ds.get_simlog_filename(id_agent, id_robot, id_stream)

    written = []
    written_extra = []

    logs_format = LogsFormat.get_reader_for(filename)
    with logs_format.write_stream(filename=filename,
                                  id_stream=id_stream,
                                  boot_spec=robot.get_spec()) as writer:
        print run_simulation
        for observations in run_simulation(id_robot=id_robot,
                                           robot=robot,
                                           id_agent=id_agent,
                                           agent=agent,
                                           max_observations=3, max_time=1000,
                                           check_valid_values=True):
            extra = {'random_number': np.random.randint(1)}
            writer.push_observations(observations, extra)
            written_extra.append(extra)
            written.append(observations)

    logdirs = ds.get_log_directories()
    index = LogIndex()
    for logdir in logdirs:
        index.index(logdir)

    # now use the cached version        
    index = LogIndex()
    for logdir in logdirs:
        index.index(logdir)

    assert index.has_streams_for_robot(id_robot)
    streams = index.get_streams_for_robot(id_robot)
    assert len(streams) == 1
    stream = streams[0]
    assert isinstance(stream, BootStream)

    assert stream.get_spec() == robot.get_spec()

    read_back = []
    read_back_extra = []
    for observations2 in stream.read(read_extra=True):
        read_back_extra.append(observations2['extra'])
        read_back.append(observations2)

    if len(read_back) != len(written):
        raise Exception('Written %d, read back %d.' % 
                        (len(written), len(read_back)))

    for i in range(len(read_back)):
        a = written[i]
        b = read_back[i]

        fields = set(a.dtype.names) or set(b.dtype.names)
        fields.remove('extra')
        for field in fields:
            assert_equal(a[field], b[field])

    for i in range(len(read_back)):
        a = written_extra[i]
        b = read_back_extra[i]
        assert_equal(a, b)

    shutil.rmtree(root)