Exemplo n.º 1
0
 def update_runsets(self):
     for runSet in self.benchmark.run_sets:
         # Divide the defined run into multiple run if necessery. Check len of runs: If 0, the setup went wrong.
         if len(runSet.runs) > 0:
             old_run = runSet.runs[0]
             expected_result_filename = old_run.identifier
             if os.path.exists(expected_result_filename):
                 expected_dict = self._read_expected_result_json(
                     expected_result_filename)
             else:
                 logging.info(
                     "Could not identify expected result file. Assuming all test is true."
                 )
                 expected_dict = {}
             prop = old_run.properties
             runSet.runs = []
             for module_name in self.test_dict:
                 for test_name in self.test_dict[module_name]:
                     run = Run(f"{module_name}.{test_name}", "", "", "",
                               runSet)
                     prop = [result.Property("/", False, "Yo")]
                     run.properties = prop
                     if run.identifier in expected_dict and len(prop) > 0:
                         run.expected_results[
                             prop[0].filename] = result.ExpectedResult(
                                 expected_dict[run.identifier] == "True"
                                 or expected_dict[run.identifier] == "true",
                                 None,
                             )
                     else:
                         run.expected_results[
                             prop[0].filename] = result.ExpectedResult(
                                 True, None)
                     runSet.runs.append(run)
    def create_run(self, info_result=RESULT_UNKNOWN):
        runSet = types.SimpleNamespace()
        runSet.log_folder = "."
        runSet.result_files_folder = "."
        runSet.options = []
        runSet.real_name = None
        runSet.propertytag = None
        runSet.benchmark = lambda: None
        runSet.benchmark.base_dir = "."
        runSet.benchmark.benchmark_file = "Test.xml"
        runSet.benchmark.columns = []
        runSet.benchmark.name = "Test"
        runSet.benchmark.instance = "Test"
        runSet.benchmark.rlimits = {}
        runSet.benchmark.tool = BaseTool()

        def determine_result(run):
            return info_result

        runSet.benchmark.tool.determine_result = determine_result

        run = Run(
            identifier="test.c",
            sourcefiles=["test.c"],
            task_options=None,
            fileOptions=[],
            runSet=runSet,
        )
        run._cmdline = ["dummy.bin", "test.c"]
        return run
Exemplo n.º 3
0
    def create_run(self, info_result=RESULT_UNKNOWN):
        # lambdas are simple dummy objects
        runSet = lambda: None
        runSet.log_folder = '.'
        runSet.result_files_folder = '.'
        runSet.options = []
        runSet.real_name = None
        runSet.propertyfile = None
        runSet.benchmark = lambda: None
        runSet.benchmark.base_dir = '.'
        runSet.benchmark.benchmark_file = 'Test.xml'
        runSet.benchmark.columns = []
        runSet.benchmark.name = 'Test'
        runSet.benchmark.instance = 'Test'
        runSet.benchmark.rlimits = {}
        runSet.benchmark.tool = BaseTool()

        def determine_result(self,
                             returncode,
                             returnsignal,
                             output,
                             isTimeout=False):
            return info_result

        runSet.benchmark.tool.determine_result = determine_result

        return Run(sourcefiles=['test.c'], fileOptions=[], runSet=runSet)
Exemplo n.º 4
0
    def create_run(self, info_result=RESULT_UNKNOWN):
        runSet = types.SimpleNamespace()
        runSet.log_folder = "."
        runSet.result_files_folder = "."
        runSet.options = []
        runSet.real_name = None
        runSet.propertytag = None
        runSet.benchmark = lambda: None
        runSet.benchmark.base_dir = "."
        runSet.benchmark.benchmark_file = "Test.xml"
        runSet.benchmark.columns = []
        runSet.benchmark.name = "Test"
        runSet.benchmark.instance = "Test"
        runSet.benchmark.rlimits = {}
        runSet.benchmark.tool = BaseTool()

        def determine_result(self,
                             returncode,
                             returnsignal,
                             output,
                             isTimeout=False):
            return info_result

        runSet.benchmark.tool.determine_result = determine_result

        return Run(identifier="test.c",
                   sourcefiles=["test.c"],
                   fileOptions=[],
                   runSet=runSet)