def create_plots(*, plots, xlabel, ylabel, save_path): legends = "[" xlist = "[" ylist = "[" for plot in plots: legends += "'" + plot['legend'] + "'" + ',' xlist += create_string_array(plot['points_x']) + ',' ylist += create_string_array(plot['points_y']) + ',' legends += ']' xlist += ']' ylist += ']' output = f''' import pylab legends = {legends} xlist = {xlist} ylist = {ylist} for (x, y, l) in zip(xlist, ylist, legends): pylab.plot(x, y, label=l) pylab.axhline(0, color='red', marker='o', linestyle='dashed') pylab.xlabel("{xlabel}") pylab.ylabel("{ylabel}") pylab.legend() pylab.tight_layout() pylab.show() ''' fh = open(save_path, 'w') fh.write(output) fh.close() LOG.info(f'Run: {save_path}, to look at {xlabel} x {ylabel} plot.')
def main(): config_ini = ConfigINI('etc/heap_comparison_benchmark.ini') old_versions = utils.load_run_config_ini(config_ini=config_ini, path=["OLD_VERSION", "Params"]) new_versions = utils.load_run_config_ini(config_ini=config_ini, path=["NEW_VERSION", "Params"]) all_versions = [] all_versions.extend(old_versions) all_versions.extend(new_versions) omim = Omim() try: # Run routes_builder_tool each route separately, save maximum memory usage for all versions. data_from_heap_comparison = run_heap_comparison(omim=omim, config_ini=config_ini, versions=all_versions) LOG.info("run_heap_comparison() Done.") utils.log_with_stars("Run run_results_comparison()") # Compare each version from |old_versions| to each version from |new_versions|, dump some useful info to log. # Like: maximum memory usage (mb), median, average, min etc. run_results_comparison( config_ini=config_ini, old_versions=old_versions, new_versions=new_versions, data_from_heap_comparison=data_from_heap_comparison) except Exception as e: LOG.error(f'Error in run_heap_comparison(): {e}') omim.checkout_to_init_state() sys.exit() omim.checkout_to_init_state()
def run(self, *, binary, binary_cache_suffix=None, args, env=None, output_file=None, log_error_code=True): binary_path = self._get_cached_binary_name( binary=binary, binary_cache_suffix=binary_cache_suffix) if not os.path.exists(binary_path): raise Exception( f'Cannot find {binary_path}, did you call build()?') args_string = "" for arg, value in args.items(): if value: args_string += f' --{arg}={value}' else: args_string += f' --{arg}' cmd = binary_path + args_string code, _ = self._run_system_unsafe(cmd=cmd, env=env, output_file=output_file) if log_error_code: LOG.info(f'Finish with exit code: {code}')
def run_routing_quality_tool(*, omim, config_ini, old_versions, new_versions, data_from_routes_builder_tool): args = {} is_benchmark = config_ini.read_value_by_path(path=['TOOL', 'Benchmark']) if is_benchmark: args['benchmark_stat'] = '' results_base_dir = config_ini.read_value_by_path( path=['PATHS', 'ResultsSaveDir']) for old_version in old_versions: for new_version in new_versions: utils.log_with_stars( f'Compare {old_version["name"]} VS {new_version["name"]}') old_branch_hash = utils.get_branch_hash_name( branch=old_version['branch'], hash=old_version['hash']) old_version_task = data_from_routes_builder_tool[old_branch_hash] new_branch_hash = utils.get_branch_hash_name( branch=new_version['branch'], hash=new_version['hash']) new_version_task = data_from_routes_builder_tool[new_branch_hash] args['mapsme_results'] = new_version_task['dump_path'] args['mapsme_old_results'] = old_version_task['dump_path'] args['save_results'] = os.path.join( results_base_dir, old_branch_hash + '__vs__' + new_branch_hash) omim.run(binary='routing_quality_tool', args=args) LOG.info(os.linesep, os.linesep)
def log_with_stars(string): LOG.info('') LOG.info('') stars_number = 96 LOG.info('*' * 96) first_stars = '*' * int((stars_number - len(string)) / 2) result = first_stars + ' ' + string + ' ' + '*' * ( stars_number - len(first_stars) - len(string) - 2) LOG.info(result) LOG.info('*' * 96)
def checkout(self, *, branch, hash=None): branch_hash_name = get_branch_hash_name(branch=branch, hash=hash) LOG.info(f'Do checkout to: {branch_hash_name}') repo = Repo(self.omim_path) repo.remote('origin').fetch() repo.git.checkout(branch) if hash: repo.git.reset('--hard', hash) else: hash = repo.head.object.hexsha LOG.info(f'Checkout to: {branch} {hash} done') self.branch = branch self.hash = hash
def run_routes_builder_tool(*, omim, config_ini, versions): is_benchmark = config_ini.read_value_by_path(path=['TOOL', 'Benchmark']) args = { 'resources_path': omim.data_path, 'routes_file': config_ini.read_value_by_path(path=['PATHS', 'RoutesFile']), 'threads': 1 if is_benchmark else utils.cpu_count(), 'timeout': config_ini.read_value_by_path(path=['TOOL', 'Timeout']), 'launches_number': config_ini.read_value_by_path( path=['TOOL', 'LaunchesNumber']) if is_benchmark else 1, 'vehicle_type': utils.get_vehicle_type(config_ini=config_ini) } data_from_routes_builder_tool = dict() for version in versions: name = version['name'] branch = version['branch'] hash = version['hash'] branch_hash = utils.get_branch_hash_name(branch=branch, hash=hash) LOG.info(f'Get: {name} {branch_hash}') omim.checkout(branch=branch, hash=hash) omim.build(aim='routes_builder_tool', binary_cache_suffix='cpu' if is_benchmark else '') args['dump_path'] = omim.get_or_create_unique_dir_path( prefix_path=config_ini.read_value_by_path( path=['PATHS', 'ResultsSaveDir'])) args['data_path'] = version['mwm_path'] data_from_routes_builder_tool[branch_hash] = args.copy() utils.log_with_stars('CPP Logs') omim.run(binary='routes_builder_tool', binary_cache_suffix='cpu' if is_benchmark else '', args=args) LOG.info(os.linesep, os.linesep) return data_from_routes_builder_tool
def create_distribution_script(*, values, title, save_path): values_string = create_string_array(values) output = f''' import numpy as np import matplotlib.pyplot as plt a = np.hstack({values_string}) plt.hist(a, bins='auto') # arguments are passed to np.histogram plt.title("{title}") plt.show()''' fh = open(save_path, 'w') fh.write(output) fh.close() LOG.info(f'Run: {save_path}, to look at {title} distribution.')
def main(): config_ini = ConfigINI('etc/comparison.ini') old_versions = utils.load_run_config_ini(config_ini=config_ini, path=["OLD_VERSION", "Params"]) new_versions = utils.load_run_config_ini(config_ini=config_ini, path=["NEW_VERSION", "Params"]) all_versions = [] all_versions.extend(old_versions) all_versions.extend(new_versions) omim = Omim() try: # Run sequentially all versions of routing and dumps results to some directory # based on ResultsSaveDir from config_ini file. data_from_routes_builder_tool = run_routes_builder_tool( omim=omim, config_ini=config_ini, versions=all_versions) except Exception as e: LOG.info(f'Error in run_routes_builder_tool(): {e}') omim.checkout_to_init_state() sys.exit() LOG.info("run_routes_builder_tool() Done.") omim.checkout(branch='master') omim.build(aim='routing_quality_tool') try: # Run routing_quality_tool, which compares results of routes_builder_tool. run_routing_quality_tool( omim=omim, config_ini=config_ini, old_versions=old_versions, new_versions=new_versions, data_from_routes_builder_tool=data_from_routes_builder_tool) except Exception as e: LOG.error(f'Error in run_routing_quality_tool(): {e}') omim.checkout_to_init_state() sys.exit() omim.checkout_to_init_state()
def build(self, *, aim, binary_cache_suffix=None, cmake_options=""): os.chdir(self.build_dir) binary_path = self._get_cached_binary_name( binary=aim, binary_cache_suffix=binary_cache_suffix) if os.path.exists(binary_path): LOG.info(f'Found cached binary: {binary_path}') return branch_hash = get_branch_hash_name(branch=self.branch, hash=self.hash) output_prefix = os.path.join(self.build_dir, branch_hash + '_') cmake_cmd = f'{self.cmake_cmd} {self.omim_path} {cmake_options}' self._run_system(cmd=cmake_cmd, output_file=output_prefix + 'cmake_run.log', log_cmd=True) make_cmd = f'make -j{self.cpu_count} {aim}' self._run_system(cmd=make_cmd, output_file=output_prefix + 'make_run.log', log_cmd=True) LOG.info(f'Build {aim} done') self._run_system(cmd=f'cp {aim} {binary_path}')
def _run_system_unsafe(self, *, cmd, env=None, output_file=None, log_cmd=False): env_params = "" if env is None: env = dict() else: env_params = "env " for key, value in env.items(): env_params += f'{key}={value} ' if output_file is None: output = "" else: output = f'> {output_file} 2>&1' full_cmd = env_params + cmd + output if log_cmd: LOG.info(f'Run: {full_cmd}') return {os.system(full_cmd), full_cmd}
def run_heap_comparison(*, omim, config_ini, versions): routes_file = config_ini.read_value_by_path(path=['PATHS', 'RoutesFile']) data_from_heap_comparison = dict() for version in versions: name = version['name'] branch = version['branch'] hash = version['hash'] branch_hash = utils.get_branch_hash_name(branch=branch, hash=hash) version_dump_path = get_version_dump_path(config_ini=config_ini, version=version) heapprof_dump_path = get_version_heapprof_dump_path( config_ini=config_ini, version=version) if not os.path.exists(version_dump_path): os.mkdir(version_dump_path) if not os.path.exists(heapprof_dump_path): os.mkdir(heapprof_dump_path) LOG.info(f'Get: {name} {branch_hash}') omim.checkout(branch=branch, hash=hash) omim.build(aim='routes_builder_tool', binary_cache_suffix='heap', cmake_options="-DUSE_HEAPPROF=ON") LOG.info(f'Start build routes from file: {routes_file}') pool_args = [] with open(routes_file) as routes_file_fh: for route_id, line in enumerate(routes_file_fh): args_tuple = (omim, version, config_ini, route_id, line) pool_args.append(args_tuple) with Pool(omim.cpu_count) as p: version_result = p.starmap(run_routes_builder_tool_one_route, pool_args) results = dict() for result in version_result: results[result['id']] = result['max_mb_usage'] data_from_heap_comparison[branch_hash] = { 'version': version, 'results': results } LOG.info(os.linesep, os.linesep) return data_from_heap_comparison
def compare_two_versions(*, config_ini, old_version_data, new_version_data): old_version = old_version_data['version'] new_version = new_version_data['version'] old_version_name = old_version['name'] new_version_name = new_version['name'] results_save_dir = config_ini.read_value_by_path( path=['PATHS', 'ResultsSaveDir']) results_path_prefix = os.path.join( results_save_dir, f'{old_version_name}__{new_version_name}') utils.log_with_stars(f'Compare {old_version_name} VS {new_version_name}') diff = [] old_version_results = old_version_data['results'] new_version_results = new_version_data['results'] for route_id, old_max_mb in old_version_results.items(): if route_id not in new_version_results: LOG.info( f'Cannot find: {route_id} route in {new_version_name} results.' ) continue new_max_mb = new_version_results[route_id] diff_mb = new_max_mb - old_max_mb diff_percent = round((new_max_mb - old_max_mb) / old_max_mb * 100.0, 2) diff.append({'mb': diff_mb, 'percent': diff_percent}) diff.sort(key=lambda item: item['mb']) min_mb = get_by_func_and_key_in_diff(diff=diff, key='mb', func=min) median_mb = get_median_by_key_in_diff(diff=diff, key='mb') max_mb = get_by_func_and_key_in_diff(diff=diff, key='mb', func=max) min_percent = get_by_func_and_key_in_diff(diff=diff, key='percent', func=min) median_percent = get_median_by_key_in_diff(diff=diff, key='percent') max_percent = get_by_func_and_key_in_diff(diff=diff, key='percent', func=max) LOG.info(f'Next semantic is used: {old_version_name} - {new_version_name}') LOG.info(f'min({min_mb}Mb), median({median_mb}Mb) max({max_mb}Mb)') LOG.info( f'min({min_percent}%), median({median_percent}%) max({max_percent}%)') diff_mb_script = f'{results_path_prefix}__diff_mb.py' diff_mb = list(map(lambda item: item['mb'], diff)) graph_scripts.create_distribution_script(values=diff_mb, title='Difference MB', save_path=diff_mb_script) plots_script = f'{results_path_prefix}__mb_percents.py' plots = create_diff_mb_percents_plots(diff) graph_scripts.create_plots(plots=plots, xlabel='Route number', ylabel='conventional units', save_path=plots_script) LOG.info(os.linesep, os.linesep)
def tearDown(self): LOG.info("------ Test: %s, TearDown", self.id())
def setUp(self): os.environ['LOG_LEVEL'] = 'DEBUG' LOG.info("====== Test: %s, SetUp", self.id())
def setUp(self): os.environ['LOG_LEVEL'] = 'DEBUG' self.loop = asyncio.get_event_loop() LOG.info("====== Test: %s, SetUp", self.id())