def __init__(self, **kwargs): """ See :class:`~lab.reports.Report` for inherited parameters. You can filter and modify runs for a report with :py:class:`filters <.Report>`. For example, you can include only a subset of algorithms or compute new attributes. If you provide a list for *filter_algorithm*, it will be used to determine the order of algorithms in the report. >>> # Use a filter function to select algorithms. >>> def only_blind_and_lmcut(run): ... return run['algorithm'] in ['blind', 'lmcut'] >>> report = PlanningReport(filter=only_blind_and_lmcut) >>> # Use "filter_algorithm" to select and *order* algorithms. >>> r = PlanningReport(filter_algorithm=['lmcut', 'blind']) :py:class:`Filters <.Report>` can be very helpful so we recommend reading up on them to use their full potential. """ # Set non-default options for some attributes. attributes = tools.make_list(kwargs.get('attributes') or []) kwargs['attributes'] = [ self._prepare_attribute(attr) for attr in attributes ] # Remember the order of algorithms if it is given as a keyword argument filter. self.filter_algorithm = tools.make_list( kwargs.get('filter_algorithm', [])) Report.__init__(self, **kwargs)
def __init__(self, derived_properties=None, **kwargs): """ See :py:class:`Report <lab.reports.Report>` for inherited parameters. *derived_properties* must be a function or a list of functions taking a single argument. This argument is a list of problem runs i.e. it contains one run-dictionary for each config in the experiment. The function is called for every problem in the suite. A function that computes the IPC score based on the results of the experiment is added automatically to the *derived_properties* list and serves as an example here: .. literalinclude:: ../downward/reports/__init__.py :pyobject: quality You can include only specific domains or configurations by using :py:class:`filters <.Report>`. If you provide a list for *filter_config* or *filter_config_nick*, it will be used to determine the order of configurations in the report. :: # Use a filter function. def only_blind_and_lmcut(run): return run['config'] in ['WORK-blind', 'WORK-lmcut'] PlanningReport(filter=only_blind_and_lmcut) # Filter with a list and set the order of the configs. PlanningReport(filter_config=['WORK-lmcut', 'WORK-blind']) PlanningReport(filter_config_nick=['lmcut', 'blind']) Tip: When you append ``_relative`` to an attribute, you will get a table containing the attribute's values of each configuration relative to the leftmost column. """ # Allow specifying a single property or a list of properties. if hasattr(derived_properties, '__call__'): derived_properties = [derived_properties] self.derived_properties = derived_properties or [] # Set non-default options for some attributes. attributes = tools.make_list(kwargs.get('attributes') or []) kwargs['attributes'] = [self._prepare_attribute(attr) for attr in attributes] self._handle_relative_attributes(kwargs['attributes']) # Remember the order of the configs if it is given as a key word argument filter. self.filter_config = tools.make_list(kwargs.get('filter_config') or []) self.filter_config_nick = tools.make_list(kwargs.get('filter_config_nick') or []) Report.__init__(self, **kwargs) self.derived_properties.append(quality)
def __init__(self, **kwargs): """ See :class:`~lab.reports.Report` for inherited parameters. You can filter and modify runs for a report with :py:class:`filters <.Report>`. For example, you can include only a subset of algorithms or compute new attributes. If you provide a list for *filter_algorithm*, it will be used to determine the order of algorithms in the report. >>> # Use a filter function to select algorithms. >>> def only_blind_and_lmcut(run): ... return run["algorithm"] in ["blind", "lmcut"] ... >>> report = PlanningReport(filter=only_blind_and_lmcut) >>> # Use "filter_algorithm" to select and *order* algorithms. >>> report = PlanningReport(filter_algorithm=["lmcut", "blind"]) :py:class:`Filters <.Report>` can be very helpful so we recommend reading up on them to use their full potential. Subclasses can use the member variable ``problem_runs`` to access the experiment data. It is a dictionary mapping from tasks (i.e., ``(domain, problem)`` pairs) to the runs for that task. Each run is a dictionary that maps from attribute names to values. >>> class MinRuntimePerTask(PlanningReport): ... def get_text(self): ... map = {} ... for (domain, problem), runs in self.problem_runs.items(): ... times = [run.get("planner_time") for run in runs] ... times = [t for t in times if t is not None] ... map[(domain, problem)] = min(times) if times else None ... return str(map) ... """ # Set non-default options for some attributes. attributes = tools.make_list(kwargs.get("attributes")) kwargs["attributes"] = [ self._prepare_attribute(attr) for attr in attributes ] # Remember the order of algorithms if it is given as a keyword argument filter. self.filter_algorithm = tools.make_list(kwargs.get("filter_algorithm")) Report.__init__(self, **kwargs)
def __init__( self, name, absolute=False, min_wins=True, function=None, functions=None, scale=None, digits=2, ): """ Use this class if your **custom** attribute needs a non-default value for: * *absolute*: if False, only include tasks for which all task runs have values in a per-domain table (e.g. ``coverage`` is absolute, whereas ``expansions`` is not, because we can't compare algorithms A and B for task X if B has no value for ``expansions``). * *min_wins*: set to True if a smaller value for this attribute is better, to False if a higher value is better and to None if values can't be compared. (E.g., *min_wins* is False for ``coverage``, but it is True for ``expansions``). * *function*: the function used to aggregate values of multiple runs for this attribute, for example, in domain reports. Defaults to :py:func:`sum`. * *functions*: deprecated. Pass a single *function* instead. * *scale*: default scaling. Can be one of "linear", "log" and "symlog". If *scale* is None (default), the reports will choose the scaling. * *digits*: number of digits after the decimal point. The ``downward`` package automatically uses appropriate settings for most attributes. >>> avg_h = Attribute("avg_h", min_wins=False) >>> abstraction_done = Attribute( ... "abstraction_done", absolute=True, min_wins=False ... ) """ self.absolute = absolute self.min_wins = min_wins self.function = (get_aggregation_function(function, tools.make_list(functions)) or sum) self.scale = scale self.digits = digits
def __init__(self, attributes=None, format="html", filter=None, **kwargs): """ Inherit from this or a child class to implement a custom report. Depending on the type of output you want to make, you will have to overwrite the :meth:`.write`, :meth:`.get_text` or :meth:`.get_markup` method. *attributes* is the list of attributes you want to include in your report. If omitted, use all numerical attributes. Globbing characters * and ? are allowed. Example: >>> report = Report(attributes=["coverage", "translator_*"]) When a report is made, both the available and the selected attributes are printed on the commandline. *format* can be one of e.g. html, tex, wiki (MediaWiki), doku (DokuWiki), pmw (PmWiki), moin (MoinMoin) and txt (Plain text). Subclasses may allow additional formats. If given, *filter* must be a function or a list of functions that are passed a dictionary of a run's attribute keys and values. Filters must return True, False or a new dictionary. Depending on the returned value, the run is included or excluded from the report, or replaced by the new dictionary, respectively. Filters for properties can be given in shorter form without defining a function. To include only runs where attribute ``foo`` has value v, use ``filter_foo=v``. To include only runs where attribute ``foo`` has value v1, v2 or v3, use ``filter_foo=[v1, v2, v3]``. Filters are applied sequentially, i.e., the first filter is applied to all runs before the second filter is executed. Filters given as ``filter_*`` kwargs are applied *after* all filters passed via the ``filter`` kwarg. Examples: Include only the "cost" attribute in a LaTeX report: >>> report = Report(attributes=["cost"], format="tex") Only include successful runs in the report: >>> report = Report(filter_coverage=1) Only include runs in the report where the initial h value is at most 100: >>> def low_init_h(run): ... return run["initial_h_value"] <= 100 ... >>> report = Report(filter=low_init_h) Only include runs from "blocks" and "barman" with a timeout: >>> report = Report(filter_domain=["blocks", "barman"], filter_search_timeout=1) Add a new attribute: >>> def add_expansions_per_time(run): ... expansions = run.get("expansions") ... time = run.get("search_time") ... if expansions is not None and time: ... run["expansions_per_time"] = expansions / time ... return run ... >>> report = Report( ... attributes=["expansions_per_time"], filter=[add_expansions_per_time] ... ) Rename, filter and sort algorithms: >>> def rename_algorithms(run): ... name = run["algorithm"] ... paper_names = {"lama11": "LAMA 2011", "fdss_sat1": "FDSS 1"} ... run["algorithm"] = paper_names[name] ... return run ... >>> # We want LAMA 2011 to be the leftmost column. >>> # filter_* filters are evaluated last, so we use the updated >>> # algorithm names here. >>> algorithms = ["LAMA 2011", "FDSS 1"] >>> report = Report(filter=rename_algorithms, filter_algorithm=algorithms) """ self.attributes = tools.make_list(attributes) if format not in txt2tags.TARGETS + ["eps", "pdf", "pgf", "png", "py"]: raise ValueError(f"invalid format: {format}") self.output_format = format self.toc = True self.run_filter = tools.RunFilter(filter, **kwargs)