Пример #1
0
 def get_markup(self):
     if not len(self.configs) == 1:
         logging.critical('The number of configs has to be one for taskwise reports.')
     table = Table()
     for (domain, problem), runs in self.problem_runs.items():
         assert len(runs) == 1, len(runs)
         run = runs[0]
         for attr in self.attributes:
             table.add_cell('%s:%s' % (domain, problem), attr, run.get(attr))
     return str(table)
Пример #2
0
 def get_markup(self):
     if not len(self.configs) == 1:
         logging.critical(
             'The number of configs has to be one for taskwise reports.')
     table = Table()
     for (domain, problem), runs in self.problem_runs.items():
         assert len(runs) == 1, len(runs)
         run = runs[0]
         for attr in self.attributes:
             table.add_cell('%s:%s' % (domain, problem), attr,
                            run.get(attr))
     return str(table)
Пример #3
0
    def _get_warnings_text_and_table(self):
        """
        Return a :py:class:`Table <lab.reports.Table>` containing one line for
        each run where an unexplained error occured.
        """
        if not self.ERROR_ATTRIBUTES:
            logging.critical("The list of error attributes must not be empty.")

        table = Table(title="Unexplained errors")
        table.set_column_order(self.ERROR_ATTRIBUTES)

        wrote_to_slurm_err = any(
            "output-to-slurm.err" in run.get("unexplained_errors", [])
            for run in self.runs.values()
        )

        for run in self.runs.values():
            error_message = tools.get_unexplained_errors_message(run)
            if error_message:
                logging.error(error_message)
                run_dir = run["run_dir"]
                for attr in self.ERROR_ATTRIBUTES:
                    value = run.get(attr, "?")
                    if attr == "unexplained_errors":
                        value = self._format_unexplained_errors(value)
                        # Use formatted value as-is.
                        table.cell_formatters[run_dir][attr] = CellFormatter()
                    table.add_cell(run_dir, attr, value)

        errors = []

        if wrote_to_slurm_err:
            src_dir = self.eval_dir.rstrip("/")[: -len("-eval")]
            slurm_err_file = src_dir + "-grid-steps/slurm.err"
            try:
                slurm_err_content = tools.get_slurm_err_content(src_dir)
            except OSError:
                slurm_err_content = (
                    "The slurm.err file was missing while creating the report."
                )
            else:
                slurm_err_content = tools.filter_slurm_err_content(slurm_err_content)

            logging.error("There was output to {slurm_err_file}.".format(**locals()))

            errors.append(
                ' Contents of {slurm_err_file} without "memory cg"'
                " errors:\n```\n{slurm_err_content}\n```".format(**locals())
            )

        if table:
            errors.append(str(table))

        infai_1_nodes = {f"ase{i:02d}.cluster.bc2.ch" for i in range(1, 25)}
        infai_2_nodes = {f"ase{i:02d}.cluster.bc2.ch" for i in range(31, 55)}
        nodes = self._get_node_names()
        if nodes & infai_1_nodes and nodes & infai_2_nodes:
            errors.append("Report combines runs from infai_1 and infai_2 partitions.")

        return "\n".join(errors)
Пример #4
0
    def get_markup(self):
        """
        Return `txt2tags <http://txt2tags.org/>`_ markup for the report.

        """
        table = Table()
        row_sort_module = RowSortModule(self._sort_spec)
        table.dynamic_data_modules.append(row_sort_module)
        for run_id, run in self.props.items():
            row = {}
            for key, value in run.items():
                if key not in self.attributes:
                    continue
                if isinstance(value, (list, tuple)):
                    key = '-'.join([str(item) for item in value])
                row[key] = value
            table.add_row(run_id, row)
        return str(table)
    def get_markup(self):
        tables = []
        for (domain, problem) in self.problems:
            for config_nick in self.config_nicks:
                runs = [self.runs[(domain, problem, rev + "-" + config_nick)]
                        for rev in self.revision_nicks]

                if any(runs[0][self.regression_attribute] >
                       runs[i][self.regression_attribute]
                       for i in range(1, len(self.revision_nicks))):
                    print "\"%s:%s\"," % (domain, problem)
                    table = Table()
                    for rev, run in zip(self.revision_nicks, runs):
                        for attr in self.attributes:
                            table.add_cell(rev, attr, run.get(attr))
                    table_name = ":".join((domain, problem, config_nick))
                    tables.append((table_name, table))
        return "\n".join(name + "\n" + str(table) for name, table in tables)
Пример #6
0
    def get_markup(self):
        tables = []
        for (domain, problem) in self.problems:
            for config_nick in self.config_nicks:
                runs = [
                    self.runs[(domain, problem, rev + "-" + config_nick)]
                    for rev in self.revision_nicks
                ]

                if any(runs[0][self.regression_attribute] > runs[i][
                        self.regression_attribute]
                       for i in range(1, len(self.revision_nicks))):
                    print "\"%s:%s\"," % (domain, problem)
                    table = Table()
                    for rev, run in zip(self.revision_nicks, runs):
                        for attr in self.attributes:
                            table.add_cell(rev, attr, run.get(attr))
                    table_name = ":".join((domain, problem, config_nick))
                    tables.append((table_name, table))
        return "\n".join(name + "\n" + str(table) for name, table in tables)
Пример #7
0
    def get_markup(self):
        num_tasks_better = defaultdict(int)
        num_tasks = 0
        for (domain, problem), runs in sorted(self.problem_runs.items()):
            if all(self.attribute in run for run in runs):
                num_tasks += 1
            else:
                continue

            for run1, run2 in itertools.combinations(runs, 2):
                algo1 = run1["algorithm"]
                algo2 = run2["algorithm"]
                val1 = run1.get(self.attribute)
                val2 = run2.get(self.attribute)
                if val1 is not None and val2 is not None:
                    order = None
                    if self.attribute.min_wins:
                        if val1 < val2:
                            order = (algo1, algo2)
                        elif val1 > val2:
                            order = (algo2, algo1)
                    else:
                        assert not self.attribute.min_wins
                        if val1 > val2:
                            order = (algo1, algo2)
                        elif val1 < val2:
                            order = (algo2, algo1)
                    if order is not None:
                        num_tasks_better[order] += 1

        def get_wins(algo1):
            num_wins = 0
            for algo2 in self.algorithms:
                if algo1 == algo2:
                    continue
                num_algo1_better = num_tasks_better[(algo1, algo2)]
                num_algo2_better = num_tasks_better[(algo2, algo1)]
                if num_algo1_better >= num_algo2_better:
                    num_wins += 1
            return num_wins

        algorithms = self.algorithms[:]
        if self.sort:
            algorithms.sort(key=get_wins)

        comparison_table = Table()
        comparison_table.set_row_order(algorithms)
        comparison_table.set_column_order(algorithms)
        for algo1, algo2 in itertools.permutations(algorithms, 2):
            num_algo1_better = num_tasks_better[(algo1, algo2)]
            num_algo2_better = num_tasks_better[(algo2, algo1)]
            if num_algo1_better >= num_algo2_better:
                if self.output_format == "tex":
                    content = r" ''\textbf{{{}}}''".format(num_algo1_better)
                else:
                    content = r" ''<b>{}</b>''".format(num_algo1_better)
            else:
                content = num_algo1_better
            comparison_table.add_cell(algo1, algo2, content)
        for algo in algorithms:
            comparison_table.add_cell(algo, algo, " ''--''")

        print("Number of tasks for which all algorithms report {}: {}".format(
            self.attribute, num_tasks))

        return str(comparison_table)
Пример #8
0
 def _get_table(self, domain, runs):
     table = Table(title=domain)
     for run in runs:
         for attr in self.attributes:
             table.add_cell(run['problem'], attr, run.get(attr))
     return table