Пример #1
0
 def update_runsets(self):
     for runSet in self.benchmark.run_sets:
         # Divide the defined run into multiple run if necessery. Check len of runs: If 0, the setup went wrong.
         if len(runSet.runs) > 0:
             old_run = runSet.runs[0]
             expected_result_filename = old_run.identifier
             if os.path.exists(expected_result_filename):
                 expected_dict = self._read_expected_result_json(
                     expected_result_filename)
             else:
                 logging.info(
                     "Could not identify expected result file. Assuming all test is true."
                 )
                 expected_dict = {}
             prop = old_run.properties
             runSet.runs = []
             for module_name in self.test_dict:
                 for test_name in self.test_dict[module_name]:
                     run = Run(f"{module_name}.{test_name}", "", "", "",
                               runSet)
                     prop = [result.Property("/", False, "Yo")]
                     run.properties = prop
                     if run.identifier in expected_dict and len(prop) > 0:
                         run.expected_results[
                             prop[0].filename] = result.ExpectedResult(
                                 expected_dict[run.identifier] == "True"
                                 or expected_dict[run.identifier] == "true",
                                 None,
                             )
                     else:
                         run.expected_results[
                             prop[0].filename] = result.ExpectedResult(
                                 True, None)
                     runSet.runs.append(run)
Пример #2
0
    def create_run_from_task_definition(
            self, task_def_file, options, propertyfile, required_files_pattern):
        """Create a Run from a task definition in yaml format"""
        task_def = load_task_definition_file(task_def_file)

        def expand_patterns_from_tag(tag):
            result = []
            patterns = task_def.get(tag, [])
            if isinstance(patterns, str) or not isinstance(patterns, collections.Iterable):
                # accept single string in addition to list of strings
                patterns = [patterns]
            for pattern in patterns:
                expanded = util.expand_filename_pattern(
                    str(pattern), os.path.dirname(task_def_file))
                if not expanded:
                    raise BenchExecException(
                        "Pattern '{}' in task-definition file {} did not match any paths."
                        .format(pattern, task_def_file))
                expanded.sort()
                result.extend(expanded)
            return result

        input_files = expand_patterns_from_tag("input_files")
        if not input_files:
            raise BenchExecException(
                "Task-definition file {} does not define any input files.".format(task_def_file))
        required_files = expand_patterns_from_tag("required_files")

        run = Run(
            task_def_file,
            input_files,
            options,
            self,
            propertyfile,
            required_files_pattern,
            required_files)

        # run.propertyfile of Run is fully determined only after Run is created,
        # thus we handle it and the expected results here.
        if not run.propertyfile:
            return run

        # TODO: support "property_name" attribute in yaml
        prop = result.Property.create(run.propertyfile, allow_unknown=True)
        run.properties = [prop]

        for prop_dict in task_def.get("properties", []):
            if not isinstance(prop_dict, dict) or "property_file" not in prop_dict:
                raise BenchExecException(
                    "Missing property file for property in task-definition file {}."
                    .format(task_def_file))
            expanded = util.expand_filename_pattern(
                prop_dict["property_file"], os.path.dirname(task_def_file))
            if len(expanded) != 1:
                raise BenchExecException(
                    "Property pattern '{}' in task-definition file {} does not refer to exactly one file."
                    .format(prop_dict["property_file"], task_def_file))

            # TODO We could reduce I/O by checking absolute paths and using os.path.samestat
            # with cached stat calls.
            if prop.filename == expanded[0] or os.path.samefile(prop.filename, expanded[0]):
                expected_result = prop_dict.get("expected_verdict")
                if expected_result is not None and not isinstance(expected_result, bool):
                    raise BenchExecException(
                        "Invalid expected result '{}' for property {} in task-definition file {}."
                        .format(expected_result, prop_dict["property_file"], task_def_file))
                run.expected_results[prop.filename] = \
                    result.ExpectedResult(expected_result, prop_dict.get("subproperty"))

        if not run.expected_results:
            logging.debug(
                "Ignoring run '%s' because it does not have the property from %s.",
                run.identifier, run.propertyfile)
            return None
        elif len(run.expected_results) > 1:
            raise BenchExecException(
                "Property '{}' specified multiple times in task-definition file {}."
                .format(prop.filename, task_def_file))
        else:
            return run