def test_aicpu_parser_file_not_exist(self): """Test the class of Aicpu Parser.""" profiling_dir = os.path.realpath( os.path.join(self.profiling_dir, 'data')) data = DataPreProcessParser(profiling_dir, self.output_file) data.execute() shutil.rmtree(self.output_path)
def test_aicpu_parser(self): """Test the class of Aicpu Parser.""" data = DataPreProcessParser(self.profiling_dir, self.output_file) data.execute() expect_result = get_result(self.expect_file) result = get_result(self.output_file) shutil.rmtree(self.output_path) assert expect_result == result
def _ascend_analyse(self): """Collect and analyse ascend performance data""" release() job_id = self._get_profiling_job_id() logger.info("Profiling: job id is %s ", job_id) source_path = os.path.join(self._output_path, job_id) # parse hwts.log.data.45.dev file, and get task profiling data hwts_output_filename = self._hwts_output_filename_target + self._dev_id + ".txt" hwts_output_filename = os.path.join(self._output_path, hwts_output_filename) source_path = validate_and_normalize_path(source_path) hwts_output_filename = validate_and_normalize_path( hwts_output_filename) hwtslog_parser = HWTSLogParser(source_path, hwts_output_filename) hwtslog_parser.execute() # parse Framework file, and get the relation of op and tasks framework_parser = FrameworkParser(job_id, self._dev_id, self._output_path) framework_parser.parse() op_task_dict = framework_parser.to_task_id_full_op_name_dict() if not op_task_dict: logger.error("Profiling: fail to parse framework files.") return # get op compute time from hwts data and framework data, write output_op_compute_time.txt opcompute_output_filename = self._opcompute_output_filename_target + self._dev_id + ".txt" opcompute_output_filename = os.path.join(self._output_path, opcompute_output_filename) opcompute_output_filename = validate_and_normalize_path( opcompute_output_filename) optime_parser = OPComputeTimeParser(hwts_output_filename, opcompute_output_filename, op_task_dict, self._output_path, self._dev_id) optime_parser.execute() # parse DATA_PREPROCESS.dev.AICPU file, write output_data_preprocess_aicpu_x.txt output_data_preprocess_aicpu = self._aicpu_op_output_filename_target + self._dev_id + ".txt" output_data_preprocess_aicpu = os.path.join( self._output_path, output_data_preprocess_aicpu) output_data_preprocess_aicpu = validate_and_normalize_path( output_data_preprocess_aicpu) aicpu_data_parser = DataPreProcessParser(source_path, output_data_preprocess_aicpu) aicpu_data_parser.execute() # Parsing minddata AICPU profiling MinddataParser.execute(source_path, self._output_path, self._dev_id) # parse minddata pipeline operator and queue try: pipeline_parser = MinddataPipelineParser(self._output_path, self._dev_id, self._output_path) pipeline_parser.parse() except ProfilerException as err: logger.warning(err.message) # analyse op compute time info try: self._analyser_op_info() except ProfilerException as err: logger.warning(err.message) # analyse step trace info points = None try: points = self._analyse_step_trace(source_path, framework_parser) except ProfilerException as err: logger.warning(err.message) # analyse timeline info try: self._analyse_timeline(aicpu_data_parser, optime_parser, source_path) except (ProfilerIOException, ProfilerFileNotFoundException, RuntimeError) as err: logger.warning('Fail to write timeline data: %s', err) # analyse memory usage info try: self._analyse_memory_usage(points) except (ProfilerIOException, ProfilerFileNotFoundException, ProfilerRawFileException) as err: logger.warning(err.message) os.environ['PROFILING_MODE'] = str("false") context.set_context(enable_profiling=False)
def analyse(self): """ Collect and analyse performance data, called after training or during training. Examples: >>> from mindspore.profiler import Profiler >>> import mindspore.context >>> context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", >>> device_id=int(os.environ["DEVICE_ID"])) >>> profiler = Profiler() >>> model = Model() >>> model.train() >>> profiler.analyse() """ if self._device_target and self._device_target == "GPU": if context.get_auto_parallel_context( 'device_num') > 1 and self._dev_id != get_rank(): self._dev_id = get_rank() logger.error( 'Please check the Profiler object initialized after set_auto_parallel_context() ' 'and init(). Profiler should be initialized after these code. ' ) self._gpu_profiler.stop() timeline_generator = self._generate_timeline() # parse minddata pipeline operator and queue for GPU try: pipeline_parser = MinddataPipelineParser( self._output_path, self._dev_id, self._output_path) pipeline_parser.parse() except ProfilerException as err: logger.warning(err.message) # analyse step trace info try: self._analyse_step_trace( is_training_mode_flag=timeline_generator.check_op_name( 'Gradients')) except ProfilerException as err: logger.warning(err.message) os.environ['PROFILING_MODE'] = str("false") elif self._device_target and self._device_target == "Ascend": release() job_id = self._get_profiling_job_id() logger.info("Profiling: job id is %s ", job_id) source_path = os.path.join(self._output_path, job_id) # parse hwts.log.data.45.dev file, and get task profiling data hwts_output_filename = self._hwts_output_filename_target + self._dev_id + ".txt" hwts_output_filename = os.path.join(self._output_path, hwts_output_filename) source_path = validate_and_normalize_path(source_path) hwts_output_filename = validate_and_normalize_path( hwts_output_filename) hwtslog_parser = HWTSLogParser(source_path, hwts_output_filename) _ = hwtslog_parser.execute() # parse Framework file, and get the relation of op and tasks framework_parser = FrameworkParser(job_id, self._dev_id, self._output_path) framework_parser.parse() op_task_dict = framework_parser.to_task_id_full_op_name_dict() if not op_task_dict: logger.error("Profiling: fail to parse framework files.") return # get op compute time from hwts data and framework data, write output_op_compute_time.txt opcompute_output_filename = self._opcompute_output_filename_target + self._dev_id + ".txt" opcompute_output_filename = os.path.join( self._output_path, opcompute_output_filename) opcompute_output_filename = validate_and_normalize_path( opcompute_output_filename) optime_parser = OPComputeTimeParser(hwts_output_filename, opcompute_output_filename, op_task_dict, self._output_path, self._dev_id) optime_parser.execute() # parse DATA_PREPROCESS.dev.AICPU file, write output_data_preprocess_aicpu_x.txt output_data_preprocess_aicpu = self._aicpu_op_output_filename_target + self._dev_id + ".txt" output_data_preprocess_aicpu = os.path.join( self._output_path, output_data_preprocess_aicpu) output_data_preprocess_aicpu = validate_and_normalize_path( output_data_preprocess_aicpu) aicpu_data_parser = DataPreProcessParser( source_path, output_data_preprocess_aicpu) aicpu_data_parser.execute() # Parsing minddata AICPU profiling MinddataParser.execute(source_path, self._output_path, self._dev_id) # parse minddata pipeline operator and queue try: pipeline_parser = MinddataPipelineParser( self._output_path, self._dev_id, self._output_path) pipeline_parser.parse() except ProfilerException as err: logger.warning(err.message) # analyse op compute time info try: self._analyser_op_info() except ProfilerException as err: logger.warning(err.message) # analyse step trace info try: self._analyse_step_trace(source_path, framework_parser) except ProfilerException as err: logger.warning(err.message) # analyse timeline info try: self._analyse_timeline(aicpu_data_parser, optime_parser) except (ProfilerIOException, ProfilerFileNotFoundException, RuntimeError) as err: logger.warning('Fail to write timeline data: %s', err) os.environ['PROFILING_MODE'] = str("false") context.set_context(enable_profiling=False)
def analyse(self): """ Collect and analyse performance data, called after training or during training. Examples: >>> from mindspore.profiler import Profiler >>> import mindspore.context >>> context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", >>> device_id=int(os.environ["DEVICE_ID"])) >>> profiler = Profiler(subgraph='all', is_detail=True, is_show_op_path=False, output_path='./data') >>> model = Model() >>> model.train() >>> profiler.analyse() """ release() job_id = self._get_profiling_job_id() logger.info("Profiling: job id is %s ", job_id) source_path = os.path.join(PROFILING_LOG_BASE_PATH, job_id) # parse hwts.log.data.45.dev file, and get task profiling data hwts_output_filename = self._hwts_output_filename_target + self._dev_id + ".txt" hwts_output_filename = os.path.join(self._output_path, hwts_output_filename) hwtslog_parser = HWTSLogParser(source_path, hwts_output_filename) result = hwtslog_parser.execute() if not result: logger.error("Profiling: fail to parse hwts log file.") return # parse Framework file, and get the relation of op and tasks framework_parser = FrameworkParser(job_id, self._dev_id, self._output_path) framework_parser.parse() op_task_dict = framework_parser.to_task_id_full_op_name_dict() if not op_task_dict: logger.error("Profiling: fail to parse framework files.") return # get op compute time from hwts data and framework data, write output_op_compute_time.txt opcompute_output_filename = self._opcompute_output_filename_target + self._dev_id + ".txt" opcompute_output_filename = os.path.join(self._output_path, opcompute_output_filename) optime_parser = OPComputeTimeParser(hwts_output_filename, opcompute_output_filename, op_task_dict, self._output_path, self._dev_id) optime_parser.execute() # parse DATA_PREPROCESS.dev.AICPU file, write output_data_preprocess_aicpu_x.txt output_data_preprocess_aicpu = self._aicpu_op_output_filename_target + self._dev_id + ".txt" output_data_preprocess_aicpu = os.path.join( self._output_path, output_data_preprocess_aicpu) aicpu_data_parser = DataPreProcessParser(source_path, output_data_preprocess_aicpu) aicpu_data_parser.execute() # Parsing minddata AICPU profiling MinddataParser.execute(source_path, self._output_path, self._dev_id) # parse minddata pipeline operator and queue try: pipeline_parser = MinddataPipelineParser(self._output_path, self._dev_id, self._output_path) pipeline_parser.parse() except ProfilerException as err: logger.warning(err.message) # analyse op compute time info try: self._analyser_op_info() except ProfilerException as err: logger.warning(err.message) # analyse step trace info try: self._analyse_step_trace(source_path, framework_parser) except ProfilerException as err: logger.warning(err.message) # analyse timeline info try: self._analyse_timeline(aicpu_data_parser, optime_parser) except (ProfilerIOException, ProfilerFileNotFoundException, RuntimeError) as err: logger.warning('Fail to write timeline data: %s', err)