def execute_with_config(runner: Runner, analyzer: A, pira_iters: int, target_config: TargetConfiguration) -> None: try: log.get_logger().log('run_setup phase.', level='debug') instrument = False pira_iterations = pira_iters # Build without any instrumentation vanilla_builder = B(target_config, instrument) tracker = tt.TimeTracker() tracker.m_track('Vanilla Build', vanilla_builder, 'build') # Run without instrumentation for baseline log.get_logger().log('Running baseline measurements', level='info') vanilla_rr = runner.do_baseline_run(target_config) log.get_logger().log( 'Pira::execute_with_config: RunResult: ' + str(vanilla_rr) + ' | avg: ' + str(vanilla_rr.get_average()), level='debug') instr_file = '' for x in range(0, pira_iterations): log.get_logger().log('Running instrumentation iteration ' + str(x), level='info') # Only run the pgoe to get the functions name iteration_tracker = tt.TimeTracker() # Analysis Phase instr_file = analyzer.analyze(target_config, x) log.get_logger().log('[WHITELIST] $' + str(x) + '$ ' + str(util.lines_in_file(instr_file)), level='perf') util.shell('stat ' + instr_file) # After baseline measurement is complete, do the instrumented build/run # This is only necessary in every iteration when run in compile-time mode. if x is 0 or target_config.is_compile_time_filtering(): instrument = True instr_builder = B(target_config, instrument, instr_file) tracker.m_track('Instrument Build', instr_builder, 'build') #Run Phase log.get_logger().log('Running profiling measurements', level='info') instr_rr = runner.do_profile_run(target_config, x) # Compute overhead of instrumentation ovh_percentage = instr_rr.compute_overhead(vanilla_rr) log.get_logger().log('[RUNTIME] $' + str(x) + '$ ' + str(instr_rr.get_average()), level='perf') log.get_logger().log('[OVERHEAD] $' + str(x) + '$ ' + str(ovh_percentage), level='perf') iteration_tracker.stop() user_time, system_time = iteration_tracker.get_time() log.get_logger().log('[ITERTIME] $' + str(x) + '$ ' + str(user_time) + ', ' + str(system_time), level='perf') except Exception as e: log.get_logger().log( 'Pira::execute_with_config: Problem during preparation of run.\nMessage:\n' + str(e), level='error') raise RuntimeError(str(e))
def build_flavors(self, kwargs) -> None: L.get_logger().log( 'Builder::build_flavors: Building for ' + self.target_config.get_target() + ' in ' + self.target_config.get_flavor(), level='debug') build = self.target_config.get_build() benchmark = self.target_config.get_target() flavor = self.target_config.get_flavor() f_man = F.FunctorManager() # Returns the currently loaded FM clean_functor = f_man.get_or_load_functor(build, benchmark, flavor, 'clean') kwargs = {} if self.build_instr: L.get_logger().log('Builder::build_flavors: Instrumentation', level='debug') try: self.check_build_prerequisites() L.get_logger().log('Builder::build_flavors: Prerequisite check successfull.') except Exception as e: raise BuilderException('Precheck failed.\n' + str(e)) if not self.target_config.is_compile_time_filtering(): L.get_logger().log('Builder::build_flavors: Runtime filtering enabled.') self.target_config.set_instr_file(self.instrumentation_file) build_functor = f_man.get_or_load_functor(build, benchmark, flavor, 'build') kwargs = self.construct_pira_instr_kwargs() ScorepSystemHelper.prepare_MPI_filtering(self.instrumentation_file) else: L.get_logger().log('Builder::build_flavors: No instrumentation', level='debug') build_functor = f_man.get_or_load_functor(build, benchmark, flavor, 'basebuild') kwargs = self.construct_pira_kwargs() if build_functor.get_method()['active']: L.get_logger().log('Builder::build_flavors: Running the passive functor.', level='debug') build_functor.active(benchmark, **kwargs) else: try: L.get_logger().log('Builder::build_flavors: Running the passive functor.', level='debug') ''' The build command uses CC and CXX to pass flags that are needed by PIRA for the given toolchain. ''' build_command = build_functor.passive(benchmark, **kwargs) clean_command = clean_functor.passive(benchmark, **kwargs) L.get_logger().log( 'Builder::build_flavors: Clean in ' + benchmark + '\n Using ' + clean_command, level='debug') U.shell(clean_command) L.get_logger().log('Builder::build_flavors: Building: ' + build_command, level='debug') U.shell(build_command) except Exception as e: L.get_logger().log('Builder::build_flavors: ' + str(e), level='error')
def test_shell_invoc(self): command = 'echo "Hello World!"' expected_out = 'Hello World!\n' out, t = u.shell(command, time_invoc=False) self.assertEqual(out, expected_out) self.assertEqual(t, -1.0) # XXX This is already a little fishy!
def check_build_prerequisites(cls) -> None: scorep_init_file_name = 'scorep.init.c' L.get_logger().log( 'ScorepMeasurementSystem::check_build_prerequisites: global home dir: ' + U.get_home_dir()) pira_scorep_resource = U.get_home_dir() + '/resources/scorep.init.c' if not U.is_file(scorep_init_file_name): U.copy_file(pira_scorep_resource, U.get_cwd() + '/' + scorep_init_file_name) # In case something goes wrong with copying if U.is_file(scorep_init_file_name): U.shell('gcc -c ' + scorep_init_file_name) else: raise MeasurementSystemException( 'ScorepMeasurementSystem::check_build_prerequisites: Missing ' + scorep_init_file_name)
def test_shell_dry_run(self): command = 'echo "Hello world!"' expected_out = '[debug] Utility::shell: DRY RUN SHELL CALL: ' + command out, t = u.shell(command, dry=True) lm = log.get_logger().get_last_msg() self.assertEqual(lm, expected_out) self.assertEqual(t, 1.0) self.assertEqual(out, '')
def run(self, target_config: TargetConfiguration, instrument_config: InstrumentConfig, compile_time_filtering: bool) -> float: """ Implements the actual invocation """ functor_manager = fm.FunctorManager() run_functor = functor_manager.get_or_load_functor( target_config.get_build(), target_config.get_target(), target_config.get_flavor(), 'run') default_provider = defaults.BackendDefaults() kwargs = default_provider.get_default_kwargs() kwargs['util'] = util kwargs['LD_PRELOAD'] = default_provider.get_MPI_wrap_LD_PRELOAD() runtime = .0 if run_functor.get_method()['active']: run_functor.active(target_config.get_target(), **kwargs) log.get_logger().log( 'For the active functor we can barely measure runtime', level='warn') runtime = 1.0 try: util.change_cwd(target_config.get_place()) invoke_arguments = target_config.get_args_for_invocation() kwargs['args'] = invoke_arguments if invoke_arguments is not None: log.get_logger().log('LocalBaseRunner::run: (args) ' + str(invoke_arguments)) command = run_functor.passive(target_config.get_target(), **kwargs) _, runtime = util.shell(command, time_invoc=True) log.get_logger().log( 'LocalBaseRunner::run::passive_invocation -> Returned runtime: ' + str(runtime), level='debug') except Exception as e: log.get_logger().log('LocalBaseRunner::run Exception\n' + str(e), level='error') raise RuntimeError('LocalBaseRunner::run caught exception. ' + str(e)) # TODO: Insert the data into the database return runtime
def prepare_MPI_filtering(cls, filter_file: str) -> None: # Find which MPI functions to filter # Get all MPI functions (our filter_file is a WHITELIST) default_provider = D.BackendDefaults() mpi_funcs_dump = os.path.join(default_provider.instance.get_pira_dir(), 'mpi_funcs.dump') U.shell('wrap.py -d > ' + mpi_funcs_dump) all_MPI_functions_decls = U.read_file(mpi_funcs_dump).split('\n') all_MPI_functions = [] for fd in all_MPI_functions_decls: name = fd[fd.find(' '):fd.find('(')] all_MPI_functions.append(name.strip()) MPI_functions_to_filter = [] file_content = U.read_file(filter_file).split('\n') # We always want to measure MPI_Init and MPI_Finalize file_content.append('MPI_Init') file_content.append('MPI_Finalize') for l in file_content: if l.find('MPI_') > -1: L.get_logger().log( 'ScorepSystemHelper::prepare_MPI_filtering: Remove ' + l) # prevent double removal if l in all_MPI_functions: all_MPI_functions.remove(l) # Generate the .c file using the mpi wrap.py script L.get_logger().log( 'ScorepSystemHelper::prepare_MPI_filtering: About to filter ' + str(len(all_MPI_functions)) + ' MPI functions') wrap_script = '{{fn PIRA_Filter' for mpi_func in all_MPI_functions: wrap_script += ' ' + mpi_func wrap_script += '}}\n{{callfn}}\n{{endfn}}' default_provider = D.BackendDefaults() wrap_file = default_provider.get_wrap_w_file() if U.check_file(wrap_file): U.remove_file(wrap_file) U.write_file(wrap_file, wrap_script) wrap_c_path = default_provider.get_wrap_c_file() wrap_command = 'wrap.py -o ' + wrap_c_path + ' ' + wrap_file U.shell(wrap_command) # Compile it to .so file compile_mpi_wrapper_command = 'mpicc -shared -fPIC -o ' + default_provider.get_wrap_so_file( ) + ' ' + wrap_c_path U.shell(compile_mpi_wrapper_command)
def execute_with_config(runner: Runner, analyzer: A, pira_iters: int, target_config: TargetConfiguration, csv_config: CSVConfiguration) -> None: try: instrument = False pira_iterations = pira_iters hybrid_filtering = target_config.is_hybrid_filtering() compile_time_filtering = target_config.is_compile_time_filtering() hybrid_filter_iters = target_config.get_hybrid_filter_iters() rr_exporter = E.RunResultExporter() # Build without any instrumentation L.get_logger().log( 'Building vanilla version for baseline measurements', level='info') vanilla_builder = BU(target_config, instrument) tracker = T.TimeTracker() tracker.m_track('Vanilla Build', vanilla_builder, 'build') # Run without instrumentation for baseline L.get_logger().log('Running baseline measurements', level='info') vanilla_rr = runner.do_baseline_run(target_config) L.get_logger().log('Pira::execute_with_config: RunResult: ' + str(vanilla_rr) + ' | avg: ' + str(vanilla_rr.get_average()), level='debug') instr_file = '' if (csv_config.should_export()): rr_exporter.add_row('Vanilla', vanilla_rr) for x in range(0, pira_iterations): L.get_logger().log('Running instrumentation iteration ' + str(x), level='info') # Only run the pgoe to get the functions name iteration_tracker = T.TimeTracker() # Analysis Phase instr_file = analyzer.analyze(target_config, x) L.get_logger().log('[WHITELIST] $' + str(x) + '$ ' + str(U.lines_in_file(instr_file)), level='perf') U.shell('stat ' + instr_file) # After baseline measurement is complete, do the instrumented build/run # This is only necessary in every iteration when run in compile-time mode. # For hybrid-filtering this is done after the specified amount of iterations if (hybrid_filtering and (x % hybrid_filter_iters is 0) ) or x is 0 or compile_time_filtering: instrument = True instr_builder = BU(target_config, instrument, instr_file) tracker.m_track('Instrument Build', instr_builder, 'build') #Run Phase L.get_logger().log('Running profiling measurements', level='info') instr_rr = runner.do_profile_run(target_config, x) if (csv_config.should_export()): rr_exporter.add_row('Instrumented ' + str(x), instr_rr) # Compute overhead of instrumentation ovh_percentage = instr_rr.compute_overhead(vanilla_rr) L.get_logger().log('[RUNTIME] $' + str(x) + '$ ' + str(instr_rr.get_average()), level='perf') L.get_logger().log('[OVERHEAD] $' + str(x) + '$ ' + str(ovh_percentage), level='perf') iteration_tracker.stop() user_time, system_time = iteration_tracker.get_time() L.get_logger().log('[ITERTIME] $' + str(x) + '$ ' + str(user_time) + ', ' + str(system_time), level='perf') if (csv_config.should_export()): file_name = target_config.get_target( ) + '_' + target_config.get_flavor() + '.csv' csv_file = os.path.join(csv_config.get_csv_dir(), file_name) try: U.make_dir(csv_config.get_csv_dir()) rr_exporter.export(csv_file, csv_config.get_csv_dialect()) except Exception as e: L.get_logger().log( 'Pira::execute_with_config: Problem writing CSV file\nMessage:\n' + str(e), level='error') except Exception as e: L.get_logger().log( 'Pira::execute_with_config: Problem during preparation of run.\nMessage:\n' + str(e), level='error') raise RuntimeError(str(e))