def from_yaml(spec): try: if not isinstance(spec, dict): raise ValueError('Expected a dict, got %s' % spec) s = dict(**spec) id_stream = s.pop('id', None) desc = s.pop('desc', None) required = ['shape', 'format', 'range'] for x in required: if not x in s: raise ValueError('Missing entry %r.' % x) shape = s.pop('shape') check('list[>0](int,>0)', shape) # XXX: slow format = s.pop('format') # @ReservedAssignment range = s.pop('range') # @ReservedAssignment extra = s.pop('extra', {}) filtered = s.pop('filtered', None) default = s.pop('default', None) names = s.pop('names', None) # TODO: do something useful with this if names: extra['names'] = names if s.keys(): logger.warning('While reading\n%s\nextra keys detected: %s' % ((spec), s.keys())) streamels = streamels_from_spec(shape, format, range, default) return StreamSpec(id_stream, streamels, extra, filtered, desc) except: logger.error('Error while parsing the StreamSpec:\n%s' % spec) raise
def index_directory(directory, ignore_cache=False, warn_if_longer=3): ''' Returns a hash filename -> list of streams. ''' file2streams = {} # logger.debug('Indexing directory %r (ignore cache: %s).' % # (friendly_path(directory), ignore_cache)) with warn_long_time(warn_if_longer, 'indexing directory %r' % friendly_path(directory)): files = get_all_log_files(directory) # Shuffle the list so that multiple threads will index different files import random random.seed() random.shuffle(files) with warn_long_time(warn_if_longer, 'indexing %d files (use cache: %s)' % (len(files), not ignore_cache)): for filename in files: reader = LogsFormat.get_reader_for(filename) try: file2streams[filename] = \ reader.index_file_cached(filename, ignore_cache=ignore_cache) for stream in file2streams[filename]: assert isinstance(stream, BootStream) if not file2streams[filename]: logger.warning('No streams found in file %r.' % friendly_path(filename)) except None: # XXX logger.error('Invalid data in file %r.' % friendly_path(filename)) logger.error(traceback.format_exc()) return file2streams
def from_yaml(xo): try: if not isinstance(xo, dict): raise ValueError('Expected a dict, got %s' % xo) x = dict(**xo) # make a copy check_contained('observations', x) check_contained('commands', x) observations = StreamSpec.from_yaml(x.pop('observations')) commands = StreamSpec.from_yaml(x.pop('commands')) extra = x.pop('extra', None) desc = x.pop('desc', None) id_robot = x.pop('id', None) if x.keys(): logger.warning('While reading\n%s\nextra keys detected: %s' % (pformat(xo), x.keys())) return BootSpec(observations, commands, id_robot=id_robot, extra=extra, desc=desc) except: logger.error('Error while parsing the BootSpec:\n%s' % xo) raise
def __init__(self, robot, obs_nuisance=[], cmd_nuisance=[]): self.inner_robot_name = robot boot_config = get_boot_config() id_robot, self.robot = boot_config.robots.instance_smarter(robot) if not isinstance(self.robot, RobotInterface): msg = 'Expected RobotInterface, got %s' % describe_type(self.robot) raise ValueError(msg) warnings.warn('handle the case better') self.desc = ('EquivRobot(%s,obs:%s,cmd:%s)' % (id_robot, obs_nuisance, cmd_nuisance)) # convert to (possibly empty) list of strings if isinstance(obs_nuisance, str): obs_nuisance = [obs_nuisance] if isinstance(cmd_nuisance, str): cmd_nuisance = [cmd_nuisance] instance = lambda y: boot_config.nuisances.instance_smarter(y)[1] self.obs_nuisances = [instance(x) for x in obs_nuisance] self.cmd_nuisances = [instance(x) for x in cmd_nuisance] # No - we should not call inverse() before transform_spec() obs_spec = self.robot.get_spec().get_observations() for n in self.obs_nuisances: obs_spec = n.transform_spec(obs_spec) cmd_spec = self.robot.get_spec().get_commands() for n in self.cmd_nuisances: cmd_spec = n.transform_spec(cmd_spec) # We don't really need to compute this... try: self.cmd_nuisances_inv = [x.inverse() for x in self.cmd_nuisances] # now initialize in reverse cmd_spec_i = cmd_spec for n in reversed(self.cmd_nuisances_inv): cmd_spec_i = n.transform_spec(cmd_spec_i) StreamSpec.check_same_spec(cmd_spec_i, self.robot.get_spec().get_commands()) # TODO: why do we do this for commands, not for osbservations? except Exception as e: logger.warning('It seems that this chain of nuisances is not ' 'exact, but it could be OK to continue. ' ' The chain is %s; the error is:\n%s' % (cmd_nuisance, indent(str(e).strip(), '> '))) self.spec = BootSpec(obs_spec=obs_spec, cmd_spec=cmd_spec) self.obs_nuisances_id = obs_nuisance self.cmd_nuisances_id = cmd_nuisance
def get_all_log_files(directory): ''' Returns all log files in the directory, for all registered extensions. ''' extensions = LogsFormat.formats.keys() files = [] for extension in extensions: pattern = '*.%s' % extension files.extend(locate_files(directory, pattern)) if not files: msg = ('No log files found in %r (extensions: %s).' % (friendly_path(directory), extensions)) logger.warning(msg) return files
def index_file_cached(self, filename, ignore_cache=False): cache = '%s.index_cache' % filename if os.path.exists(cache) and not ignore_cache: # TODO: mtime try: return safe_pickle_load(cache) except Exception as e: msg = 'Could not unpickle cache %r, deleting.' % friendly_path(cache) msg += '\n%s' % e logger.warning(msg) try: os.unlink(cache) except: pass logger.debug('Indexing file %r' % friendly_path(filename)) res = self.index_file(filename) for stream in res: assert isinstance(stream, BootStream) logger.debug('Now dumping file %r' % friendly_path(cache)) with warn_long_time(1, 'dumping %r' % friendly_path(cache)): safe_pickle_dump(res, cache, protocol=2) return res
""" Functions for dealing with HDF logs. """ try: import tables except ImportError as e: boot_has_hdf = False hdf_error = e from bootstrapping_olympics import logger logger.warning("PyTables/HDF support not available (%s)." % e) else: boot_has_hdf = True hdf_error = None from .utils import * from .index import * from .read import * from .write import * from .interface import *
try: import reprep except ImportError as e: boot_has_reprep = False reprep_error = e from bootstrapping_olympics import logger logger.warning('RepRep support not available (%s).' % e) else: boot_has_reprep = True reprep_error = None from .reprep_publisher import *
def boot_olympics_manager(arguments): usage = substitute(usage_pattern, commands_list=commands_list, cmd='boot_olympics_manager') parser = OptionParser(prog='boot_olympics_manager', usage=usage) parser.disable_interspersed_args() parser.add_option("-d", dest='boot_root', default=None, help='Root directory with logs, config, etc. [%default]') parser.add_option("-c", dest='extra_conf_dirs', action="append", default=[], help='Adds an extra config dir.') parser.add_option("-l", dest='extra_log_dirs', action="append", default=[], help='Adds an extra directory storing logs.') parser.add_option("--contracts", default=False, action='store_true', help="Activate PyContracts (disabled by default)") parser.add_option("--seterr", dest='seterr', default="warn", help="Sets np.seterr. " "Possible values: ignore, warn, raise, print, log") parser.add_option("--profile", default=False, action='store_true', help="Use Python profiler") available = LogsFormat.formats.keys() parser.add_option("--logformat", dest='log_format', default=BootOlympicsConstants.DEFAULT_LOG_FORMAT, help="Choose format for writing logs in %s. [%%default]" % str(available)) (options, args) = parser.parse_args(arguments) if not args: msg = ('Please supply command. Available: %s' % ", ".join(Storage.commands.keys())) raise UserError(msg) cmd = args[0] cmd_options = args[1:] if not cmd in Storage.commands: msg = ('Unknown command %r. Available: %s.' % (cmd, ", ".join(Storage.commands.keys()))) raise UserError(msg) np.seterr(all=options.seterr) # underflow is very common in all libraries (e.g. matplotlib) np.seterr(under='warn') if not options.contracts: contracts.disable_all() if options.boot_root is None: options.boot_root = DirectoryStructure.DEFAULT_ROOT logger.info('Using %r as default root directory ' '(use -d <dir> to change)' % options.boot_root) data_central = DataCentral(options.boot_root) GlobalConfig.global_load_dir('default') # need skins for dirname in options.extra_conf_dirs: GlobalConfig.global_load_dir(dirname) dir_structure = data_central.get_dir_structure() dir_structure.set_log_format(options.log_format) for dirname in options.extra_log_dirs: dir_structure.add_log_directory(dirname) def go(): return Storage.commands[cmd](data_central, cmd_options) if not options.profile: go() else: logger.warning('Note: the profiler does not work when using ' 'parallel execution. (use "make" instead of "parmake").') import cProfile cProfile.runctx('go()', globals(), locals(), 'bom_prof') import pstats p = pstats.Stats('bom_prof') p.sort_stats('cumulative').print_stats(30) p.sort_stats('time').print_stats(30)