예제 #1
0
    def test_single_value(self):
        rr = m.RunResult(4.0, 1)
        rr2 = m.RunResult(2.0, 1)

        self.assertFalse(rr.is_multi_value())
        self.assertEqual(rr.get_average(), 4.0)
        self.assertEqual(rr.compute_overhead(rr2), 2.0)
예제 #2
0
파일: Runner.py 프로젝트: rmmilewi/pira
    def do_baseline_run(self,
                        target_config: TargetConfiguration) -> ms.RunResult:
        log.get_logger().log('LocalRunner::do_baseline_run')
        accu_runtime = .0

        if not target_config.has_args_for_invocation():
            log.get_logger().log(
                'LocalRunner::do_baseline_run: BEGIN not target_config.has_args_for_invocation()'
            )
            # This runner only takes into account the first argument string (if not already set)
            args = self._config.get_args(target_config.get_build(),
                                         target_config.get_target())
            log.get_logger().log('LocalRunner::do_baseline_run: args: ' +
                                 str(args))
            target_config.set_args_for_invocation(args[0])
            log.get_logger().log(
                'LocalRunner::do_baseline_run: END not target_config.has_args_for_invocation()'
            )

        # TODO Better evaluation of the obtained timings.
        for y in range(0, self._num_repetitions):
            log.get_logger().log(
                'LocalRunner::do_baseline_run: Running iteration ' + str(y),
                level='debug')
            accu_runtime += self.run(target_config, InstrumentConfig(), True)

        run_result = ms.RunResult(accu_runtime, self._num_repetitions)
        log.get_logger().log('[Vanilla][RUNTIME] Vanilla avg: ' +
                             str(run_result.get_average()) + '\n',
                             level='perf')

        return run_result
예제 #3
0
 def test_scorep_mh_init(self):
     s_mh = m.ScorepSystemHelper(PiraConfiguration())
     self.assertIn('.cubex', s_mh.known_files)
     self.assertDictEqual(s_mh.data, {})
     self.assertEqual('False', s_mh.cur_overwrite_exp_dir)
     self.assertEqual('', s_mh.cur_mem_size)
     self.assertEqual('', s_mh.cur_base_name)
     self.assertEqual('', s_mh.cur_filter_file)
     self.assertEqual('', s_mh.cur_exp_directory)
예제 #4
0
    def test_multi_values(self):
        rr = m.RunResult()
        rr2 = m.RunResult()

        for v in range(1, 4):
            rr.add_values(float(v), 1)
            rr2.add_values(float(2 * v), 1)

        self.assertTrue(rr.is_multi_value())
        # Simple averages
        self.assertEqual(rr.get_average(0), 1)
        self.assertEqual(rr.get_average(1), 2)
        self.assertEqual(rr.get_average(2), 3)

        # Overheads
        self.assertEqual(rr.compute_overhead(rr2, 0), 0.5)
        self.assertEqual(rr.compute_overhead(rr2, 1), 0.5)
        self.assertEqual(rr.compute_overhead(rr2, 2), 0.5)
예제 #5
0
    def test_scorep_mh_set_up_no_instr(self):
        s_mh = m.ScorepSystemHelper(self.cfg)
        self.instr_cfg._is_instrumentation_run = False
        s_mh.set_up(self.target_cfg, self.instr_cfg, True)

        self.assertDictEqual({}, s_mh.data)
        self.assertEqual('', s_mh.cur_mem_size)
        self.assertEqual('False', s_mh.cur_overwrite_exp_dir)
        self.assertEqual('', s_mh.cur_base_name)
        self.assertEqual('', s_mh.cur_exp_directory)
예제 #6
0
    def test_scorep_mh_set_up_instr(self):
        s_mh = m.ScorepSystemHelper(self.cfg)
        s_mh.set_up(self.target_cfg, self.instr_cfg, True)

        self.assertIn('cube_dir', s_mh.data)
        self.assertEqual('500M', s_mh.cur_mem_size)
        self.assertEqual('True', s_mh.cur_overwrite_exp_dir)
        self.assertEqual('item01-flavor01-item01', s_mh.cur_base_name)
        self.assertEqual('/tmp/where/cube/files/are/item01-item01-flavor01-0',
                         s_mh.cur_exp_directory)
예제 #7
0
    def test_scorep_mh_dir_invalid(self):
        s_mh = m.ScorepSystemHelper(self.cfg)
        s_mh.set_up(self.target_cfg, self.instr_cfg, True)

        self.assertEqual('/tmp/where/cube/files/are/item01-item01-flavor01-0',
                         s_mh.cur_exp_directory)
        self.assertRaises(m.MeasurementSystemException, s_mh.set_exp_dir,
                          '+/invalid/path/haha', 'item01-flavor01', 0)
        self.assertRaises(m.MeasurementSystemException, s_mh.set_exp_dir,
                          '/inv?alid/path/haha', 'item01-flavor01', 0)
예제 #8
0
    def test_get_no_instr_file_flags(self):
        s_mh = m.ScorepSystemHelper(self.cfg)
        s_mh.set_up(self.target_cfg, self.instr_cfg, False)
        instr_file = 'myFile.filt'
        ct_filter = False

        kw_dict = dff.BackendDefaults().get_default_kwargs()
        cc = kw_dict['CC']
        self.assertEqual('\"clang\"', cc)
        cpp = kw_dict['CXX']
        self.assertEqual('\"clang++\"', cpp)
예제 #9
0
파일: ExporterTest.py 프로젝트: mority/pira
 def test_export(self):
     rre = E.RunResultExporter()
     rr = M.RunResult(1, 1)
     rr.add_values(2, 2)
     rre.add_row("test", rr)
     rre.export("test_file")
     with open("test_file", "r") as tf:
         data = tf.read()
         expected_data = '"Type of Run","Accumulated Runtime","Number of Runs","Accumulated Runtime","Number of Runs"\n"test","1","1","2","2"\n'
         self.assertEqual(data, expected_data)
     os.remove("test_file")
예제 #10
0
파일: Runner.py 프로젝트: rmmilewi/pira
    def do_baseline_run(self,
                        target_config: TargetConfiguration) -> ms.RunResult:
        log.get_logger().log('LocalScalingRunner::do_baseline_run')
        args = self._config.get_args(target_config.get_build(),
                                     target_config.get_target())
        run_result = ms.RunResult()
        for arg_cfg in args:
            target_config.set_args_for_invocation(arg_cfg)
            rr = super().do_baseline_run(target_config)
            run_result.add_from(rr)

        return run_result
예제 #11
0
파일: ExporterTest.py 프로젝트: mority/pira
 def test_add_row(self):
     rre = E.RunResultExporter()
     rr = M.RunResult(1, 1)
     rr.add_values(2, 2)
     rre.add_row("test", rr)
     self.assertEqual(len(rre.rows), 1)
     self.assertEqual(len(rre.rows[0]), 5)
     self.assertEqual(rre.rows[0][0], "test")
     self.assertEqual(rre.rows[0][1], 1)
     self.assertEqual(rre.rows[0][2], 1)
     self.assertEqual(rre.rows[0][3], 2)
     self.assertEqual(rre.rows[0][4], 2)
     self.assertEqual(rre.width, 5)
예제 #12
0
파일: Runner.py 프로젝트: rmmilewi/pira
    def do_profile_run(self,
                       target_config: TargetConfiguration,
                       instr_iteration: int,
                       compile_time_filtering: bool = True) -> ms.RunResult:
        log.get_logger().log(
            'LocalRunner::do_profile_run: Received instrumentation file: ' +
            target_config.get_instr_file(),
            level='debug')
        scorep_helper = ms.ScorepSystemHelper(self._config)
        instrument_config = InstrumentConfig(True, instr_iteration)
        scorep_helper.set_up(target_config, instrument_config,
                             compile_time_filtering)
        runtime = .0

        if not target_config.has_args_for_invocation():
            # This runner only takes into account the first argument string (if not already set)
            args = self._config.get_args(target_config.get_build(),
                                         target_config.get_target())
            target_config.set_args_for_invocation(args[0])

        for y in range(0, self._num_repetitions):
            log.get_logger().log(
                'LocalRunner::do_profile_run: Running instrumentation iteration '
                + str(y),
                level='debug')
            runtime = runtime + self.run(target_config, instrument_config,
                                         compile_time_filtering)
            # Enable further processing of the resulting profile
            self._sink.process(scorep_helper.get_exp_dir(), target_config,
                               instrument_config)

        run_result = ms.RunResult(runtime, self._num_repetitions)
        log.get_logger().log('[Instrument][RUNTIME] $' + str(instr_iteration) +
                             '$ ' + str(run_result.get_average()),
                             level='perf')
        return run_result
예제 #13
0
    def test_get_instr_file_flags(self):
        s_mh = m.ScorepSystemHelper(self.cfg)
        s_mh.set_up(self.target_cfg, self.instr_cfg, True)
        instr_file = 'myFile.filt'
        ct_filter = True

        cc = m.ScorepSystemHelper.get_scorep_compliant_CC_command(
            instr_file, ct_filter)
        self.assertEqual(
            '\"clang -finstrument-functions -finstrument-functions-whitelist-inputfile='
            + instr_file + '\"', cc)
        cpp = m.ScorepSystemHelper.get_scorep_compliant_CXX_command(
            instr_file, ct_filter)
        self.assertEqual(
            '\"clang++ -finstrument-functions -finstrument-functions-whitelist-inputfile='
            + instr_file + '\"', cpp)
예제 #14
0
파일: Runner.py 프로젝트: rmmilewi/pira
    def do_profile_run(self,
                       target_config: TargetConfiguration,
                       instr_iteration: int,
                       compile_time_filtering: bool = True) -> ms.RunResult:
        log.get_logger().log('LocalScalingRunner::do_profile_run')
        # We run as many experiments as we have input data configs
        # TODO: How to handle the model parameter <-> input parameter relation, do we care?
        args = self._config.get_args(target_config.get_build(),
                                     target_config.get_target())
        # TODO: How to handle multiple MeasurementResult items? We get a vector of these after this function.
        run_result = ms.RunResult()
        for arg_cfg in args:
            # Call the runner method with the correct arguments.
            target_config.set_args_for_invocation(arg_cfg)
            rr = super().do_profile_run(target_config, instr_iteration,
                                        compile_time_filtering)
            run_result.add_from(rr)

        # At this point we have all the data we need to construct an Extra-P model

        return run_result
예제 #15
0
    def test_empty_init(self):
        rr = m.RunResult()

        self.assertFalse(rr.is_multi_value())
        self.assertRaises(RuntimeError, rr.get_average)
        self.assertRaises(RuntimeError, rr.compute_overhead, m.RunResult())