Exemple #1
0
    def _get_warnings_text_and_table(self):
        """
        Return a :py:class:`Table <lab.reports.Table>` containing one line for
        each run where an unexplained error occured.
        """
        if not self.ERROR_ATTRIBUTES:
            logging.critical("The list of error attributes must not be empty.")

        table = Table(title="Unexplained errors")
        table.set_column_order(self.ERROR_ATTRIBUTES)

        wrote_to_slurm_err = any(
            "output-to-slurm.err" in run.get("unexplained_errors", [])
            for run in self.runs.values()
        )

        for run in self.runs.values():
            error_message = tools.get_unexplained_errors_message(run)
            if error_message:
                logging.error(error_message)
                run_dir = run["run_dir"]
                for attr in self.ERROR_ATTRIBUTES:
                    value = run.get(attr, "?")
                    if attr == "unexplained_errors":
                        value = self._format_unexplained_errors(value)
                        # Use formatted value as-is.
                        table.cell_formatters[run_dir][attr] = CellFormatter()
                    table.add_cell(run_dir, attr, value)

        errors = []

        if wrote_to_slurm_err:
            src_dir = self.eval_dir.rstrip("/")[: -len("-eval")]
            slurm_err_file = src_dir + "-grid-steps/slurm.err"
            try:
                slurm_err_content = tools.get_slurm_err_content(src_dir)
            except OSError:
                slurm_err_content = (
                    "The slurm.err file was missing while creating the report."
                )
            else:
                slurm_err_content = tools.filter_slurm_err_content(slurm_err_content)

            logging.error("There was output to {slurm_err_file}.".format(**locals()))

            errors.append(
                ' Contents of {slurm_err_file} without "memory cg"'
                " errors:\n```\n{slurm_err_content}\n```".format(**locals())
            )

        if table:
            errors.append(str(table))

        infai_1_nodes = {f"ase{i:02d}.cluster.bc2.ch" for i in range(1, 25)}
        infai_2_nodes = {f"ase{i:02d}.cluster.bc2.ch" for i in range(31, 55)}
        nodes = self._get_node_names()
        if nodes & infai_1_nodes and nodes & infai_2_nodes:
            errors.append("Report combines runs from infai_1 and infai_2 partitions.")

        return "\n".join(errors)
    def get_markup(self):
        num_tasks_better = defaultdict(int)
        num_tasks = 0
        for (domain, problem), runs in sorted(self.problem_runs.items()):
            if all(self.attribute in run for run in runs):
                num_tasks += 1
            else:
                continue

            for run1, run2 in itertools.combinations(runs, 2):
                algo1 = run1["algorithm"]
                algo2 = run2["algorithm"]
                val1 = run1.get(self.attribute)
                val2 = run2.get(self.attribute)
                if val1 is not None and val2 is not None:
                    order = None
                    if self.attribute.min_wins:
                        if val1 < val2:
                            order = (algo1, algo2)
                        elif val1 > val2:
                            order = (algo2, algo1)
                    else:
                        assert not self.attribute.min_wins
                        if val1 > val2:
                            order = (algo1, algo2)
                        elif val1 < val2:
                            order = (algo2, algo1)
                    if order is not None:
                        num_tasks_better[order] += 1

        def get_wins(algo1):
            num_wins = 0
            for algo2 in self.algorithms:
                if algo1 == algo2:
                    continue
                num_algo1_better = num_tasks_better[(algo1, algo2)]
                num_algo2_better = num_tasks_better[(algo2, algo1)]
                if num_algo1_better >= num_algo2_better:
                    num_wins += 1
            return num_wins

        algorithms = self.algorithms[:]
        if self.sort:
            algorithms.sort(key=get_wins)

        comparison_table = Table()
        comparison_table.set_row_order(algorithms)
        comparison_table.set_column_order(algorithms)
        for algo1, algo2 in itertools.permutations(algorithms, 2):
            num_algo1_better = num_tasks_better[(algo1, algo2)]
            num_algo2_better = num_tasks_better[(algo2, algo1)]
            if num_algo1_better >= num_algo2_better:
                if self.output_format == "tex":
                    content = r" ''\textbf{{{}}}''".format(num_algo1_better)
                else:
                    content = r" ''<b>{}</b>''".format(num_algo1_better)
            else:
                content = num_algo1_better
            comparison_table.add_cell(algo1, algo2, content)
        for algo in algorithms:
            comparison_table.add_cell(algo, algo, " ''--''")

        print("Number of tasks for which all algorithms report {}: {}".format(
            self.attribute, num_tasks))

        return str(comparison_table)