def _evaluate_analyze_config(self, args, yaml_content): mock_config = MockConfig(args, yaml_content) mock_config.start() config = ConfigCommandAnalyze() cli = CLI() cli.add_subcommand( cmd='analyze', help='Collect and sort profiling results and generate data and ' 'summaries.', config=config) cli.parse() mock_config.stop() return config
def _evaluate_profile_config(self, args, yaml_content): mock_config = MockConfig(args, yaml_content) mock_config.start() config = ConfigCommandProfile() cli = CLI() cli.add_subcommand( cmd='profile', help='Run model inference profiling based on specified CLI or ' 'config options.', config=config) cli.parse() mock_config.stop() return config
def _evaluate_config(self, args, yaml_content): mock_config = MockConfig(args, yaml_content) mock_config.start() config_analyze = ConfigCommandAnalyze() config_report = ConfigCommandReport() cli = CLI() cli.add_subcommand( cmd="analyze", help= "Collect and sort profiling results and generate data and summaries.", config=config_analyze) cli.add_subcommand( cmd='report', help='Generate detailed reports for a single config', config=config_report) cli.parse() mock_config.stop() ret = config_analyze if config_analyze.export_path else config_report return ret