Exemple #1
0
def execute_with_config(runner: Runner, analyzer: A, pira_iters: int, target_config: TargetConfiguration) -> None:
  try:
    log.get_logger().log('run_setup phase.', level='debug')
    instrument = False
    pira_iterations = pira_iters 

    # Build without any instrumentation
    vanilla_builder = B(target_config, instrument)
    tracker = tt.TimeTracker()
    tracker.m_track('Vanilla Build', vanilla_builder, 'build')

    # Run without instrumentation for baseline
    log.get_logger().log('Running baseline measurements', level='info')
    vanilla_rr = runner.do_baseline_run(target_config)
    log.get_logger().log(
        'Pira::execute_with_config: RunResult: ' + str(vanilla_rr) + ' | avg: ' + str(vanilla_rr.get_average()),
        level='debug')
    instr_file = ''

    for x in range(0, pira_iterations):
      log.get_logger().log('Running instrumentation iteration ' + str(x), level='info')

      # Only run the pgoe to get the functions name
      iteration_tracker = tt.TimeTracker()

      # Analysis Phase
      instr_file = analyzer.analyze(target_config, x)
      log.get_logger().log('[WHITELIST] $' + str(x) + '$ ' + str(util.lines_in_file(instr_file)), level='perf')
      util.shell('stat ' + instr_file)

      # After baseline measurement is complete, do the instrumented build/run
      # This is only necessary in every iteration when run in compile-time mode.
      if x is 0 or target_config.is_compile_time_filtering():
        instrument = True
        instr_builder = B(target_config, instrument, instr_file)
        tracker.m_track('Instrument Build', instr_builder, 'build')

      #Run Phase
      log.get_logger().log('Running profiling measurements', level='info')
      instr_rr = runner.do_profile_run(target_config, x)

      # Compute overhead of instrumentation
      ovh_percentage = instr_rr.compute_overhead(vanilla_rr)
      log.get_logger().log('[RUNTIME] $' + str(x) + '$ ' + str(instr_rr.get_average()), level='perf')
      log.get_logger().log('[OVERHEAD] $' + str(x) + '$ ' + str(ovh_percentage), level='perf')

      iteration_tracker.stop()
      user_time, system_time = iteration_tracker.get_time()
      log.get_logger().log('[ITERTIME] $' + str(x) + '$ ' + str(user_time) + ', ' + str(system_time), level='perf')

  except Exception as e:
    log.get_logger().log(
        'Pira::execute_with_config: Problem during preparation of run.\nMessage:\n' + str(e), level='error')
    raise RuntimeError(str(e))
Exemple #2
0
 def test_m_track(self):
     tracker = tt.TimeTracker()
     obj = Dummy(1)
     r, time = tracker.m_track('obj invocation', obj, 'func')
     self.assertGreater(time[0], -1.0)
     self.assertGreater(time[1], -1.0)
     self.assertEqual(r, 2)
     self.assertEqual(obj.val, 2)
Exemple #3
0
def execute_with_config(runner: Runner, analyzer: A, pira_iters: int,
                        target_config: TargetConfiguration,
                        csv_config: CSVConfiguration) -> None:
    try:
        instrument = False
        pira_iterations = pira_iters
        hybrid_filtering = target_config.is_hybrid_filtering()
        compile_time_filtering = target_config.is_compile_time_filtering()
        hybrid_filter_iters = target_config.get_hybrid_filter_iters()

        rr_exporter = E.RunResultExporter()

        # Build without any instrumentation
        L.get_logger().log(
            'Building vanilla version for baseline measurements', level='info')
        vanilla_builder = BU(target_config, instrument)
        tracker = T.TimeTracker()
        tracker.m_track('Vanilla Build', vanilla_builder, 'build')

        # Run without instrumentation for baseline
        L.get_logger().log('Running baseline measurements', level='info')
        vanilla_rr = runner.do_baseline_run(target_config)
        L.get_logger().log('Pira::execute_with_config: RunResult: ' +
                           str(vanilla_rr) + ' | avg: ' +
                           str(vanilla_rr.get_average()),
                           level='debug')
        instr_file = ''

        if (csv_config.should_export()):
            rr_exporter.add_row('Vanilla', vanilla_rr)

        for x in range(0, pira_iterations):
            L.get_logger().log('Running instrumentation iteration ' + str(x),
                               level='info')

            # Only run the pgoe to get the functions name
            iteration_tracker = T.TimeTracker()

            # Analysis Phase
            instr_file = analyzer.analyze(target_config, x)
            L.get_logger().log('[WHITELIST] $' + str(x) + '$ ' +
                               str(U.lines_in_file(instr_file)),
                               level='perf')
            U.shell('stat ' + instr_file)

            # After baseline measurement is complete, do the instrumented build/run
            # This is only necessary in every iteration when run in compile-time mode.
            # For hybrid-filtering this is done after the specified amount of iterations
            if (hybrid_filtering and (x % hybrid_filter_iters is 0)
                ) or x is 0 or compile_time_filtering:
                instrument = True
                instr_builder = BU(target_config, instrument, instr_file)
                tracker.m_track('Instrument Build', instr_builder, 'build')

            #Run Phase
            L.get_logger().log('Running profiling measurements', level='info')
            instr_rr = runner.do_profile_run(target_config, x)

            if (csv_config.should_export()):
                rr_exporter.add_row('Instrumented ' + str(x), instr_rr)

            # Compute overhead of instrumentation
            ovh_percentage = instr_rr.compute_overhead(vanilla_rr)
            L.get_logger().log('[RUNTIME] $' + str(x) + '$ ' +
                               str(instr_rr.get_average()),
                               level='perf')
            L.get_logger().log('[OVERHEAD] $' + str(x) + '$ ' +
                               str(ovh_percentage),
                               level='perf')

            iteration_tracker.stop()
            user_time, system_time = iteration_tracker.get_time()
            L.get_logger().log('[ITERTIME] $' + str(x) + '$ ' +
                               str(user_time) + ', ' + str(system_time),
                               level='perf')

        if (csv_config.should_export()):
            file_name = target_config.get_target(
            ) + '_' + target_config.get_flavor() + '.csv'
            csv_file = os.path.join(csv_config.get_csv_dir(), file_name)
            try:
                U.make_dir(csv_config.get_csv_dir())
                rr_exporter.export(csv_file, csv_config.get_csv_dialect())
            except Exception as e:
                L.get_logger().log(
                    'Pira::execute_with_config: Problem writing CSV file\nMessage:\n'
                    + str(e),
                    level='error')

    except Exception as e:
        L.get_logger().log(
            'Pira::execute_with_config: Problem during preparation of run.\nMessage:\n'
            + str(e),
            level='error')
        raise RuntimeError(str(e))
Exemple #4
0
 def test_f_track_arg(self):
     tracker = tt.TimeTracker()
     r, time = tracker.f_track('invocation 2', func1, 2)
     self.assertGreater(time[0], -1.0)
     self.assertGreater(time[1], -1.0)
     self.assertEqual(r, 3)
Exemple #5
0
 def test_create(self):
     tracker = tt.TimeTracker()
Exemple #6
0
    def analyze_local(self, flavor: str, build: str, benchmark: str,
                      kwargs: dict, iterationNumber: int) -> str:
        fm = F.FunctorManager()
        analyze_functor = fm.get_or_load_functor(build, benchmark, flavor,
                                                 'analyze')
        analyzer_dir = self.config.get_analyzer_dir(build, benchmark)
        kwargs['analyzer_dir'] = analyzer_dir

        # The invoke args can be retrieved from the configuration object.
        # Since the invoke args are iterable, we can create all necessary argument tuples here.

        # We construct a json file that contains the necesary information to be parsed vy the
        # PGIS tool. That way, we can make it easily traceable and debug from manual inspection.
        # This will be the new standard way of pusing information to PGIS.
        pgis_cfg_file = None
        if self._profile_sink.has_config_output():
            pgis_cfg_file = self._profile_sink.output_config(
                benchmark, analyzer_dir)

        if analyze_functor.get_method()['active']:
            analyze_functor.active(benchmark, **kwargs)

        else:
            L.get_logger().log('Analyzer::analyze_local: Using passive mode')
            try:
                exp_dir = self.config.get_analyzer_exp_dir(build, benchmark)
                isdirectory_good = U.check_provided_directory(analyzer_dir)
                command = analyze_functor.passive(benchmark, **kwargs)

                L.get_logger().log('Analyzer::analyze_local: Command = ' +
                                   command)

                benchmark_name = self.config.get_benchmark_name(benchmark)

                if isdirectory_good:
                    U.change_cwd(analyzer_dir)
                    L.get_logger().log('Analyzer::analyzer_local: Flavor = ' +
                                       flavor + ' | benchmark_name = ' +
                                       benchmark_name)
                    instr_files = U.build_instr_file_path(
                        analyzer_dir, flavor, benchmark_name)
                    L.get_logger().log(
                        'Analyzer::analyzer_local: instrumentation file = ' +
                        instr_files)
                    prev_instr_file = U.build_previous_instr_file_path(
                        analyzer_dir, flavor, benchmark_name)

                tracker = T.TimeTracker()

                # TODO: Alternate between expansion and pure filtering.

                if iterationNumber > 0 and U.is_file(instr_files):
                    L.get_logger().log(
                        'Analyzer::analyze_local: instr_file available')
                    U.rename(instr_files, prev_instr_file)
                    tracker.m_track('Analysis', U, 'run_analyzer_command',
                                    command, analyzer_dir, flavor,
                                    benchmark_name, exp_dir, iterationNumber,
                                    pgis_cfg_file)
                    L.get_logger().log(
                        'Analyzer::analyze_local: command finished',
                        level='debug')
                else:

                    tracker.m_track('Initial analysis', U,
                                    'run_analyzer_command_noInstr', command,
                                    analyzer_dir, flavor, benchmark_name)

                self.tear_down(build, exp_dir)
                return instr_files

            except Exception as e:
                L.get_logger().log(str(e), level='error')
                raise Exception('Problem in Analyzer')
Exemple #7
0
 def test_f_track(self):
   tracker = T.TimeTracker()
   r, time = tracker.f_track('invocation', func)
   self.assertGreater(time[0], -1.0)
   self.assertGreater(time[1], -1.0)
   self.assertEqual(r, 0)