def tear_down(self, old_dir, exp_dir): isdirectory_good = U.check_provided_directory(exp_dir) if isdirectory_good: try: U.change_cwd(old_dir) except Exception as e: L.get_logger().log(str(e), level='error')
def _get_callable(self, obj, name): try: obj_method = getattr(obj, name) return obj_method except Exception as e: L.get_logger().log('No such attribute', level='error') raise e
def get_args_for_invocation(self) -> str: if self._args_for_invocation is None: L.get_logger().log( 'TargetConfiguration::get_args_for_invocation: args are None.', level='warn') return self._args_for_invocation
def create_item_from_json(self, item_key: str, item_tree): pira_item = PiraItem(item_key) analyzer_dir = item_tree[item_key]['analyzer'] if analyzer_dir is '': analyzer_dir = U.get_base_dir( __file__) + '/../extern/install/pgis/bin' L.get_logger().log('Analyzer: using analyzer default: ' + analyzer_dir, level='debug') cubes_dir = item_tree[item_key]['cubes'] flavors = item_tree[item_key]['flavors'] functors_base_path = item_tree[item_key]['functors'] mode = item_tree[item_key]['mode'] run_opts = self.get_parameter(item_tree, item_key) run_options = ArgumentMapperFactory.get_mapper(run_opts) pira_item.set_analyzer_dir(analyzer_dir) pira_item.set_cubes_dir(cubes_dir) pira_item.set_flavors(flavors) pira_item.set_functors_base_path(functors_base_path) pira_item.set_mode(mode) pira_item.set_run_options(run_options) return pira_item
def parse_from_json(self, json_tree) -> None: # Top-level key elements // theoretically not required try: directories = U.json_to_canonic(json_tree[_DIRS]) except Exception as e: L.get_logger().log( 'SimplifiedConfigurationLoader::parse_from_json: ' + str(e)) directories = {} for tld_build in json_tree[_BUILDS]: # These are the elements, i.e., %astar and alike directory_for_item = U.json_to_canonic(tld_build) if self.is_escaped(directory_for_item): directory_for_item = directories[directory_for_item[1:]] item_tree = U.json_to_canonic( json_tree[_BUILDS][tld_build][_ITEMS]) for item_key in item_tree: L.get_logger().log( 'SimplifiedConfigurationLoader::parse_from_json: ' + str(item_key)) pira_item = self.create_item_from_json(item_key, item_tree) self._config.add_item(directory_for_item, pira_item) self._config._empty = False
def read_batch_queued_job(): if check_queued_job(): lines = [line.rstrip('\n') for line in open(queued_job_filename)] return lines else: log.get_logger().log('Batch system queued file does not exist. Exit.', level='error') exit(1)
def construct_pira_instr_kwargs(self) -> typing.Dict: L.get_logger().log('Builder::construct_pira_instr_keywords', level='debug') if not self.build_instr: raise BuilderException('Should not construct instrument kwargs in non-instrumentation mode.') pira_cc = ScorepSystemHelper.get_scorep_compliant_CC_command(self.instrumentation_file, self._compile_time_filtering) pira_cxx = ScorepSystemHelper.get_scorep_compliant_CXX_command(self.instrumentation_file, self._compile_time_filtering) pira_clflags = ScorepSystemHelper.get_scorep_needed_libs_c() pira_cxxlflags = ScorepSystemHelper.get_scorep_needed_libs_cxx() default_provider = D.BackendDefaults() pira_name = default_provider.get_default_exe_name() pira_kwargs = { 'CC': pira_cc, 'CXX': pira_cxx, 'CLFLAGS': pira_clflags, 'CXXLFLAGS': pira_cxxlflags, 'PIRANAME': pira_name, 'NUMPROCS': default_provider.get_default_number_of_processes(), 'filter-file': self.instrumentation_file } L.get_logger().log('Builder::construct_pira_instr_keywords Returning.', level='debug') return pira_kwargs
def run_analyzer_command(command: str, analyzer_dir: str, flavor: str, benchmark_name: str, exp_dir: str, iterationNumber: int, pgis_cfg_file: str) -> None: global export_performance_models global export_runtime_only export_str = ' ' if export_performance_models: export_str += ' --export' if export_runtime_only: export_str += ' --runtime-only' ipcg_file = get_ipcg_file_name(analyzer_dir, benchmark_name, flavor) cubex_dir = get_cube_file_path(exp_dir, flavor, iterationNumber - 1) cubex_file = get_cubex_file(cubex_dir, benchmark_name, flavor) # PIRA version 1 runner, i.e., only consider raw runtime of single rum if pgis_cfg_file is None: L.get_logger().log('Utility::run_analyzer_command: using PIRA 1 Analyzer', level='info') sh_cmd = command + ' --scorep-out ' + ipcg_file + ' -c ' + cubex_file L.get_logger().log('Utility::run_analyzer_command: INSTR: Run cmd: ' + sh_cmd) out, _ = shell(sh_cmd) L.get_logger().log('Utility::run_analyzer_command: Output of analyzer:\n' + out, level='debug') return extrap_cfg_file = pgis_cfg_file # extrap_file_path = analyzer_dir + '/' + extrap_cfg_file # sh_cmd = command + ' --model-filter -e ' + extrap_file_path + ' ' + ipcg_file sh_cmd = command + export_str + ' --scorep-out --extrap ' + pgis_cfg_file + ' ' + ipcg_file L.get_logger().log('Utility::run_analyzer_command: INSTR: Run cmd: ' + sh_cmd) out, _ = shell(sh_cmd) L.get_logger().log('Utility::run_analyzer_command: Output of analyzer:\n' + out, level='debug')
def get_or_load_functor(self, build: str, item: str, flavor: str, func: str): ''' We use the wholename, i.e. fully qualified path to the functor, as the key in our functor cache. ''' if func is 'basebuild': path, name, wnm = self.get_builder(build, item, flavor, True) elif func is 'build': path, name, wnm = self.get_builder(build, item, flavor) elif func is 'clean': path, name, wnm = self.get_cleaner(build, item, flavor) elif func is 'analyze': path, name, wnm = self.get_analyzer(build, item, flavor) elif func is 'run': path, name, wnm = self.get_runner(build, item, flavor) else: raise Exception( 'No such option available to load functor for. Value = ' + func) try: _ = self.functor_cache[name] except KeyError: self.functor_cache[name] = U.load_functor(path, name) L.get_logger().log('FunctorManager::get_or_load: The retrieved ' + func + ' functor: ' + str(self.functor_cache[name]), level='debug') return self.functor_cache[name]
def f_track(self, sec_name, function, *args): self._start() res = function(*args) self.stop() time_tuple = self.get_time() L.get_logger().log(sec_name + ' took %.3f seconds' % time_tuple[0], level='perf') return (res, time_tuple)
def show_pira_invoc_info(cmdline_args) -> None: invoc_cfg = process_args_for_invoc(cmdline_args) cf_str = 'compile-time filtering' if not invoc_cfg.is_compile_time_filtering(): cf_str = 'runtime filtering' log.get_logger().log( 'Pira::main: Running PIRA in ' + cf_str + ' with configuration\n ' + str(invoc_cfg.get_path_to_cfg()), level='info')
def lines_in_file(file_name: str) -> int: if is_file(file_name): content = read_file(file_name) lines = len(content.split('\n')) return lines L.get_logger().log('Utility::lines_in_file: No file ' + file_name + ' to read. Return 0 lines', level='debug') return 0
def m_track(self, sec_name, obj, method_name, *args): obj_method = self._get_callable(obj, method_name) self._start() res = obj_method(*args) self.stop() time_tuple = self.get_time() L.get_logger().log(sec_name + ' took %.3f seconds' % time_tuple[0], level='perf') return (res, time_tuple)
def set_up(self) -> None: L.get_logger().log('Builder::set_up for ' + self.directory) directory_good = U.check_provided_directory(self.directory) if directory_good: self.old_cwd = U.get_cwd() U.change_cwd(self.directory) else: self.error = True raise Exception('Builder::set_up: Could not change to directory')
def set_filter_file(self, file_name: str) -> None: L.get_logger().log( 'ScorepMeasurementSystem::set_filter_file: File for runtime filtering = ' + file_name) if not U.is_valid_file_name(file_name): raise MeasurementSystemException('Score-P filter file not valid.') self.cur_filter_file = file_name U.set_env('SCOREP_FILTERING_FILE', self.cur_filter_file)
def get_average(self, pos: int = 0) -> float: if self._nr_of_iterations == 0 or self._nr_of_iterations == []: L.get_logger().log( 'Calculating average based on 0 repetitions - assuming 1', level='warn') raise RuntimeError( 'Calculating average based on 0 repetitions impossible.') self._nr_of_iterations = 1 return self._accumulated_runtime[pos] / self._nr_of_iterations[pos]
def build(self) -> None: try: self.set_up() self.build_detail() self.tear_down() except BuilderException as e: L.get_logger().log('Builder::build: Caught exception ' + str(e), level='warn') if self.error: raise Exception('Severe Problem in Builder::build')
def construct_pira_kwargs(self) -> typing.Dict: log.get_logger().log('Builder::construct_pira_keywords', level='debug') default_provider = defaults.BackendDefaults() kwargs = default_provider.get_default_kwargs() kwargs['CLFLAGS'] = '' kwargs['CXXLFLAGS'] = '' log.get_logger().log('Builder::construct_pira_keywords Returning.', level='debug') return kwargs
def get_data_elem(self, key: str): try: if key in self.data.keys(): return self.data[key] except KeyError: pass L.get_logger().log('Key ' + key + ' was not found in ScorepSystemHelper') return ''
def __init__(self, configuration: PiraConfiguration, sink, num_repetitions: int = 5): if num_repetitions < 0: log.get_logger().log( 'REMEMBER TO REMOVE IN LocalScalingRunner::__init__', level='warn') raise RuntimeError( 'At least 3 repetitions are required for Extra-P modelling.') super().__init__(configuration, sink, num_repetitions)
def do_baseline_run(self, target_config: TargetConfiguration) -> ms.RunResult: log.get_logger().log('LocalScalingRunner::do_baseline_run') args = self._config.get_args(target_config.get_build(), target_config.get_target()) run_result = ms.RunResult() for arg_cfg in args: target_config.set_args_for_invocation(arg_cfg) rr = super().do_baseline_run(target_config) run_result.add_from(rr) return run_result
def construct_pira_kwargs(self) -> typing.Dict: L.get_logger().log('Builder::construct_pira_keywords', level='debug') if self.build_instr: raise BuilderException('Should not construct non-instrument kwargs in instrumentation mode.') default_provider = D.BackendDefaults() kwargs = default_provider.get_default_kwargs() kwargs['CLFLAGS'] = '' kwargs['CXXLFLAGS'] = '' L.get_logger().log('Builder::construct_pira_keywords Returning.', level='debug') return kwargs
def check_and_prepare(self, experiment_dir: str, target_config: TargetConfiguration, instr_config: InstrumentConfig) -> str: cur_ep_dir = self.get_extrap_dir_name( target_config, instr_config.get_instrumentation_iteration()) if not u.is_valid_file_name(cur_ep_dir): log.get_logger().log( 'ExtrapProfileSink::check_and_prepare: Generated directory name no good. Abort\n' + cur_ep_dir, level='error') else: if u.check_provided_directory(cur_ep_dir): new_dir_name = cur_ep_dir + '_' + u.generate_random_string() log.get_logger().log( 'ExtrapProfileSink::check_and_prepare: Moving old experiment directory to: ' + new_dir_name, level='info') u.rename(cur_ep_dir, new_dir_name) u.create_directory(cur_ep_dir) cubex_name = experiment_dir + '/' + target_config.get_flavor( ) + '-' + target_config.get_target() + '.cubex' log.get_logger().log(cubex_name) if not u.is_file(cubex_name): log.get_logger().log( 'ExtrapProfileSink::check_and_prepare: Returned experiment cube name is no file: ' + cubex_name) else: return cubex_name raise ProfileSinkException( 'ExtrapProfileSink: Could not create target directory or Cube dir bad.' )
def shell_for_submitter(command: str, silent: bool = True, dry: bool = False): if dry: L.get_logger().log('Utility::shell_for_submitter: SHELL CALL: ' + command, level='debug') return '' try: out = subprocess.check_output(command, shell=True) return out except subprocess.CalledProcessError as e: if e.returncode == 1: if command.find('grep '): return '' L.get_logger().log('Utility.shell: Caught Exception ' + str(e), level='error') raise Exception('Utility::shell_for_submitter: Running command ' + command + ' did not succeed')
def prepare_MPI_filtering(cls, filter_file: str) -> None: # Find which MPI functions to filter # Get all MPI functions (our filter_file is a WHITELIST) default_provider = D.BackendDefaults() mpi_funcs_dump = os.path.join(default_provider.instance.get_pira_dir(), 'mpi_funcs.dump') U.shell('wrap.py -d > ' + mpi_funcs_dump) all_MPI_functions_decls = U.read_file(mpi_funcs_dump).split('\n') all_MPI_functions = [] for fd in all_MPI_functions_decls: name = fd[fd.find(' '):fd.find('(')] all_MPI_functions.append(name.strip()) MPI_functions_to_filter = [] file_content = U.read_file(filter_file).split('\n') # We always want to measure MPI_Init and MPI_Finalize file_content.append('MPI_Init') file_content.append('MPI_Finalize') for l in file_content: if l.find('MPI_') > -1: L.get_logger().log( 'ScorepSystemHelper::prepare_MPI_filtering: Remove ' + l) # prevent double removal if l in all_MPI_functions: all_MPI_functions.remove(l) # Generate the .c file using the mpi wrap.py script L.get_logger().log( 'ScorepSystemHelper::prepare_MPI_filtering: About to filter ' + str(len(all_MPI_functions)) + ' MPI functions') wrap_script = '{{fn PIRA_Filter' for mpi_func in all_MPI_functions: wrap_script += ' ' + mpi_func wrap_script += '}}\n{{callfn}}\n{{endfn}}' default_provider = D.BackendDefaults() wrap_file = default_provider.get_wrap_w_file() if U.check_file(wrap_file): U.remove_file(wrap_file) U.write_file(wrap_file, wrap_script) wrap_c_path = default_provider.get_wrap_c_file() wrap_command = 'wrap.py -o ' + wrap_c_path + ' ' + wrap_file U.shell(wrap_command) # Compile it to .so file compile_mpi_wrapper_command = 'mpicc -shared -fPIC -o ' + default_provider.get_wrap_so_file( ) + ' ' + wrap_c_path U.shell(compile_mpi_wrapper_command)
def check_build_prerequisites(cls) -> None: scorep_init_file_name = 'scorep.init.c' L.get_logger().log( 'ScorepMeasurementSystem::check_build_prerequisites: global home dir: ' + U.get_home_dir()) pira_scorep_resource = U.get_home_dir() + '/resources/scorep.init.c' if not U.is_file(scorep_init_file_name): U.copy_file(pira_scorep_resource, U.get_cwd() + '/' + scorep_init_file_name) # In case something goes wrong with copying if U.is_file(scorep_init_file_name): U.shell('gcc -c ' + scorep_init_file_name) else: raise MeasurementSystemException( 'ScorepMeasurementSystem::check_build_prerequisites: Missing ' + scorep_init_file_name)
def get_scorep_compliant_CC_command(cls, instr_file: str, compile_time_filter: bool = True ) -> str: """ Returns instrumentation flags for the C compiler. :instr_file: str: The file name to use for filtering :compile_time_filter: bool: Should compile-time filtering be used (default) """ default_provider = D.BackendDefaults() L.get_logger().log( 'ScorepSystemHelper::get_scorep_compliant_CC_command: ', level='debug') cc_str = default_provider.get_default_c_compiler_name( ) + ' ' + cls.get_instrumentation_flags(instr_file, compile_time_filter) return '\"' + cc_str + '\"'
def load_conf(self, config_file: str) -> PiraConfiguration: if config_file in self.config_cache: return self.config_cache[config_file] try: file_content = U.read_file(config_file) json_tree = json.loads(file_content) configuration = self.construct_from_json(json_tree) self.config_cache[config_file] = configuration return configuration except PiraConfigurationErrorException as e: L.get_logger().log(str(e), level='error') sys.exit() except Exception as e: print('Exception occured ' + str(e))
def test_shell_dry_run(self): command = 'echo "Hello world!"' expected_out = '[debug] Utility::shell: DRY RUN SHELL CALL: ' + command out, t = u.shell(command, dry=True) lm = log.get_logger().get_last_msg() self.assertEqual(lm, expected_out) self.assertEqual(t, 1.0) self.assertEqual(out, '')
def create_batch_queued_temp_file(job_id, benchmark_name, iterationNumber, DBIntVal, DBCubeFilePath, itemID, build, benchmark, flavor): try: with open(queued_job_filename, 'w') as myfile: myfile.write(str(job_id) + '\n') myfile.write(benchmark_name + '\n') myfile.write(str(iterationNumber) + '\n') myfile.write(str(DBIntVal) + '\n') myfile.write(DBCubeFilePath + '\n') myfile.write(itemID + '\n') myfile.write(build + '\n') myfile.write(benchmark + '\n') myfile.write(flavor + '\n') myfile.close() except: log.get_logger().log('Unable to create batch system temporary file. Exit.', level='error') exit(1)