예제 #1
0
    def test_single_value(self):
        rr = m.RunResult(4.0, 1)
        rr2 = m.RunResult(2.0, 1)

        self.assertFalse(rr.is_multi_value())
        self.assertEqual(rr.get_average(), 4.0)
        self.assertEqual(rr.compute_overhead(rr2), 2.0)
예제 #2
0
파일: Runner.py 프로젝트: rmmilewi/pira
    def do_baseline_run(self,
                        target_config: TargetConfiguration) -> ms.RunResult:
        log.get_logger().log('LocalRunner::do_baseline_run')
        accu_runtime = .0

        if not target_config.has_args_for_invocation():
            log.get_logger().log(
                'LocalRunner::do_baseline_run: BEGIN not target_config.has_args_for_invocation()'
            )
            # This runner only takes into account the first argument string (if not already set)
            args = self._config.get_args(target_config.get_build(),
                                         target_config.get_target())
            log.get_logger().log('LocalRunner::do_baseline_run: args: ' +
                                 str(args))
            target_config.set_args_for_invocation(args[0])
            log.get_logger().log(
                'LocalRunner::do_baseline_run: END not target_config.has_args_for_invocation()'
            )

        # TODO Better evaluation of the obtained timings.
        for y in range(0, self._num_repetitions):
            log.get_logger().log(
                'LocalRunner::do_baseline_run: Running iteration ' + str(y),
                level='debug')
            accu_runtime += self.run(target_config, InstrumentConfig(), True)

        run_result = ms.RunResult(accu_runtime, self._num_repetitions)
        log.get_logger().log('[Vanilla][RUNTIME] Vanilla avg: ' +
                             str(run_result.get_average()) + '\n',
                             level='perf')

        return run_result
예제 #3
0
    def test_multi_values(self):
        rr = m.RunResult()
        rr2 = m.RunResult()

        for v in range(1, 4):
            rr.add_values(float(v), 1)
            rr2.add_values(float(2 * v), 1)

        self.assertTrue(rr.is_multi_value())
        # Simple averages
        self.assertEqual(rr.get_average(0), 1)
        self.assertEqual(rr.get_average(1), 2)
        self.assertEqual(rr.get_average(2), 3)

        # Overheads
        self.assertEqual(rr.compute_overhead(rr2, 0), 0.5)
        self.assertEqual(rr.compute_overhead(rr2, 1), 0.5)
        self.assertEqual(rr.compute_overhead(rr2, 2), 0.5)
예제 #4
0
파일: ExporterTest.py 프로젝트: mority/pira
 def test_export(self):
     rre = E.RunResultExporter()
     rr = M.RunResult(1, 1)
     rr.add_values(2, 2)
     rre.add_row("test", rr)
     rre.export("test_file")
     with open("test_file", "r") as tf:
         data = tf.read()
         expected_data = '"Type of Run","Accumulated Runtime","Number of Runs","Accumulated Runtime","Number of Runs"\n"test","1","1","2","2"\n'
         self.assertEqual(data, expected_data)
     os.remove("test_file")
예제 #5
0
파일: Runner.py 프로젝트: rmmilewi/pira
    def do_baseline_run(self,
                        target_config: TargetConfiguration) -> ms.RunResult:
        log.get_logger().log('LocalScalingRunner::do_baseline_run')
        args = self._config.get_args(target_config.get_build(),
                                     target_config.get_target())
        run_result = ms.RunResult()
        for arg_cfg in args:
            target_config.set_args_for_invocation(arg_cfg)
            rr = super().do_baseline_run(target_config)
            run_result.add_from(rr)

        return run_result
예제 #6
0
파일: ExporterTest.py 프로젝트: mority/pira
 def test_add_row(self):
     rre = E.RunResultExporter()
     rr = M.RunResult(1, 1)
     rr.add_values(2, 2)
     rre.add_row("test", rr)
     self.assertEqual(len(rre.rows), 1)
     self.assertEqual(len(rre.rows[0]), 5)
     self.assertEqual(rre.rows[0][0], "test")
     self.assertEqual(rre.rows[0][1], 1)
     self.assertEqual(rre.rows[0][2], 1)
     self.assertEqual(rre.rows[0][3], 2)
     self.assertEqual(rre.rows[0][4], 2)
     self.assertEqual(rre.width, 5)
예제 #7
0
파일: Runner.py 프로젝트: rmmilewi/pira
    def do_profile_run(self,
                       target_config: TargetConfiguration,
                       instr_iteration: int,
                       compile_time_filtering: bool = True) -> ms.RunResult:
        log.get_logger().log('LocalScalingRunner::do_profile_run')
        # We run as many experiments as we have input data configs
        # TODO: How to handle the model parameter <-> input parameter relation, do we care?
        args = self._config.get_args(target_config.get_build(),
                                     target_config.get_target())
        # TODO: How to handle multiple MeasurementResult items? We get a vector of these after this function.
        run_result = ms.RunResult()
        for arg_cfg in args:
            # Call the runner method with the correct arguments.
            target_config.set_args_for_invocation(arg_cfg)
            rr = super().do_profile_run(target_config, instr_iteration,
                                        compile_time_filtering)
            run_result.add_from(rr)

        # At this point we have all the data we need to construct an Extra-P model

        return run_result
예제 #8
0
파일: Runner.py 프로젝트: rmmilewi/pira
    def do_profile_run(self,
                       target_config: TargetConfiguration,
                       instr_iteration: int,
                       compile_time_filtering: bool = True) -> ms.RunResult:
        log.get_logger().log(
            'LocalRunner::do_profile_run: Received instrumentation file: ' +
            target_config.get_instr_file(),
            level='debug')
        scorep_helper = ms.ScorepSystemHelper(self._config)
        instrument_config = InstrumentConfig(True, instr_iteration)
        scorep_helper.set_up(target_config, instrument_config,
                             compile_time_filtering)
        runtime = .0

        if not target_config.has_args_for_invocation():
            # This runner only takes into account the first argument string (if not already set)
            args = self._config.get_args(target_config.get_build(),
                                         target_config.get_target())
            target_config.set_args_for_invocation(args[0])

        for y in range(0, self._num_repetitions):
            log.get_logger().log(
                'LocalRunner::do_profile_run: Running instrumentation iteration '
                + str(y),
                level='debug')
            runtime = runtime + self.run(target_config, instrument_config,
                                         compile_time_filtering)
            # Enable further processing of the resulting profile
            self._sink.process(scorep_helper.get_exp_dir(), target_config,
                               instrument_config)

        run_result = ms.RunResult(runtime, self._num_repetitions)
        log.get_logger().log('[Instrument][RUNTIME] $' + str(instr_iteration) +
                             '$ ' + str(run_result.get_average()),
                             level='perf')
        return run_result
예제 #9
0
    def test_empty_init(self):
        rr = m.RunResult()

        self.assertFalse(rr.is_multi_value())
        self.assertRaises(RuntimeError, rr.get_average)
        self.assertRaises(RuntimeError, rr.compute_overhead, m.RunResult())