def __init__(self, **kwargs): """ See :class:`~lab.reports.Report` for inherited parameters. You can filter and modify runs for a report with :py:class:`filters <.Report>`. For example, you can include only a subset of algorithms or compute new attributes. If you provide a list for *filter_algorithm*, it will be used to determine the order of algorithms in the report. >>> # Use a filter function to select algorithms. >>> def only_blind_and_lmcut(run): ... return run['algorithm'] in ['blind', 'lmcut'] >>> report = PlanningReport(filter=only_blind_and_lmcut) >>> # Use "filter_algorithm" to select and *order* algorithms. >>> r = PlanningReport(filter_algorithm=['lmcut', 'blind']) :py:class:`Filters <.Report>` can be very helpful so we recommend reading up on them to use their full potential. """ # Set non-default options for some attributes. attributes = tools.make_list(kwargs.get("attributes")) kwargs["attributes"] = [ self._prepare_attribute(attr) for attr in attributes ] # Remember the order of algorithms if it is given as a keyword argument filter. self.filter_algorithm = tools.make_list(kwargs.get("filter_algorithm")) Report.__init__(self, **kwargs)
def __init__(self, derived_properties=None, **kwargs): """ See :py:class:`Report <lab.reports.Report>` for inherited parameters. *derived_properties* must be a function or a list of functions taking a single argument. This argument is a list of problem runs i.e. it contains one run-dictionary for each config in the experiment. The function is called for every problem in the suite. A function that computes the IPC score based on the results of the experiment is added automatically to the *derived_properties* list and serves as an example here: .. literalinclude:: ../downward/reports/__init__.py :pyobject: quality You can include only specific domains or configurations by using :py:class:`filters <.Report>`. If you provide a list for *filter_config* or *filter_config_nick*, it will be used to determine the order of configurations in the report. :: # Use a filter function. def only_blind_and_lmcut(run): return run['config'] in ['WORK-blind', 'WORK-lmcut'] PlanningReport(filter=only_blind_and_lmcut) # Filter with a list and set the order of the configs. PlanningReport(filter_config=['WORK-lmcut', 'WORK-blind']) PlanningReport(filter_config_nick=['lmcut', 'blind']) Tip: When you append ``_relative`` to an attribute, you will get a table containing the attribute's values of each configuration relative to the leftmost column. """ # Allow specifying a single property or a list of properties. if hasattr(derived_properties, '__call__'): derived_properties = [derived_properties] self.derived_properties = derived_properties or [] # Set non-default options for some attributes. attributes = tools.make_list(kwargs.get('attributes') or []) kwargs['attributes'] = [self._prepare_attribute(attr) for attr in attributes] self._handle_relative_attributes(kwargs['attributes']) # Remember the order of the configs if it is given as a key word argument filter. self.filter_config = tools.make_list(kwargs.get('filter_config') or []) self.filter_config_nick = tools.make_list(kwargs.get('filter_config_nick') or []) Report.__init__(self, **kwargs) self.derived_properties.append(quality)
def __init__(self, **kwargs): """ See :class:`~lab.reports.Report` for inherited parameters. You can filter and modify runs for a report with :py:class:`filters <.Report>`. For example, you can include only a subset of algorithms or compute new attributes. If you provide a list for *filter_algorithm*, it will be used to determine the order of algorithms in the report. >>> # Use a filter function to select algorithms. >>> def only_blind_and_lmcut(run): ... return run["algorithm"] in ["blind", "lmcut"] ... >>> report = PlanningReport(filter=only_blind_and_lmcut) >>> # Use "filter_algorithm" to select and *order* algorithms. >>> report = PlanningReport(filter_algorithm=["lmcut", "blind"]) :py:class:`Filters <.Report>` can be very helpful so we recommend reading up on them to use their full potential. Subclasses can use the member variable ``problem_runs`` to access the experiment data. It is a dictionary mapping from tasks (i.e., ``(domain, problem)`` pairs) to the runs for that task. Each run is a dictionary that maps from attribute names to values. >>> class MinRuntimePerTask(PlanningReport): ... def get_text(self): ... map = {} ... for (domain, problem), runs in self.problem_runs.items(): ... times = [run.get("planner_time") for run in runs] ... times = [t for t in times if t is not None] ... map[(domain, problem)] = min(times) if times else None ... return str(map) ... """ # Set non-default options for some attributes. attributes = tools.make_list(kwargs.get("attributes")) kwargs["attributes"] = [ self._prepare_attribute(attr) for attr in attributes ] # Remember the order of algorithms if it is given as a keyword argument filter. self.filter_algorithm = tools.make_list(kwargs.get("filter_algorithm")) Report.__init__(self, **kwargs)
def _prepare_attribute(self, attr): if not isinstance(attr, Attribute): if attr in self.ATTRIBUTES: return self.ATTRIBUTES[attr] for pattern in self.ATTRIBUTES.values(): if (fnmatch(attr, pattern) or fnmatch(attr, pattern + '_relative')): return pattern.copy(attr) return Report._prepare_attribute(self, attr)
def _prepare_attribute(self, attr): predefined = {str(attr): attr for attr in self.PREDEFINED_ATTRIBUTES} if not isinstance(attr, Attribute): if attr in predefined: return predefined[attr] for pattern in predefined.values(): if fnmatch(attr, pattern): return pattern.copy(attr) return Report._prepare_attribute(self, attr)
def get_text(self): markup = Report.get_text(self) unxeplained_errors = 0 for run in self.runs.values(): if run.get('error', '').startswith('unexplained'): logging.warning('Unexplained error in \'%s\': %s' % (run.get('run_dir'), run.get('error'))) unxeplained_errors += 1 if unxeplained_errors: logging.warning('There were %s runs with unexplained errors.' % unxeplained_errors) return markup
def __init__(self, **kwargs): Report.__init__(self, **kwargs)
def _scan_data(self): self._scan_planning_data() Report._scan_data(self)
class PiReport(Report): def get_text(self): lines = [] for run_id, run in self.props.items(): lines.append('%s %s' % (run['time'], run['diff'])) return '\n'.join(lines) exp = Experiment(EXPPATH) exp.add_resource('PARSER', 'pi-parser-ext.py', 'pi-parser.py') exp.add_resource('CALC', 'calculate.py', 'calculate.py') for rounds in [1, 5, 10, 50, 100, 500, 1000, 5000, 10000]: run = exp.add_run() run.require_resource('PARSER') run.require_resource('CALC') run.add_command('calc-pi', ['CALC', rounds], time_limit=10, mem_limit=1024) run.add_command('parse-pi', ['PARSER']) run.set_property('id', ['calc-%d' % rounds]) def good(run): return run['diff'] <= 0.01 exp.add_step(Step('report', Report(format='html', attributes=['pi', 'diff'], filter=good), exp.eval_dir, os.path.join(exp.eval_dir, 'report.html'))) exp.add_step(Step('plot', PiReport(), exp.eval_dir, os.path.join(exp.eval_dir, 'plot.dat'))) exp()
def _scan_data(self): self._scan_planning_data() self._compute_derived_properties() Report._scan_data(self)
from lab.environments import LocalEnvironment from lab.experiment import Step from lab.reports import Report EXPNAME = 'simple-exp' EXPPATH = os.path.join('/tmp', EXPNAME) ENV = LocalEnvironment() # Create a new experiment. exp = Experiment(path=EXPPATH, environment=ENV) exp.add_resource('SIMPLE_PARSER', 'simple-parser.py', 'simple-parser.py') reportfile = os.path.join(exp.eval_dir, EXPNAME + '.html') run = exp.add_run() run.add_command('list-dir', ['ls', '-l']) # Every run has to have an id in the form of a list. run.set_property('id', ['current-dir']) run.require_resource('SIMPLE_PARSER') run.add_command('parse', ['SIMPLE_PARSER']) # Make a default report. exp.add_report(Report(attributes=['number_of_files', 'first_number']), outfile=reportfile) # Compress the experiment directory. exp.add_step(Step.zip_exp_dir(exp)) # Parse the commandline and run the specified steps. exp()
def __init__(self, *args, **kwargs): Report.__init__(self, *args, **kwargs)