def setUp(self):
        args = [
            'model-analyzer', 'profile', '--model-repository',
            'cli_repository', '-f', 'path-to-config-file', '--profile-models',
            'test_model'
        ]
        yaml_content = convert_to_bytes("""
            export_path: /test_export_path/
        """)

        # start mocks
        self.mock_io = MockIOMethods(
            mock_paths=['model_analyzer.state.analyzer_state_manager'])
        self.mock_json = MockJSONMethods()
        self.mock_os = MockOSMethods(mock_paths=[
            'model_analyzer.state.analyzer_state_manager',
            'model_analyzer.config.input.config_utils'
        ])
        self.mock_glob = MockGlobMethods()

        self.mock_io.start()
        self.mock_json.start()
        self.mock_os.start()
        self.mock_glob.start()

        config = self._evaluate_config(args, yaml_content)

        # state manager
        self.state_manager = AnalyzerStateManager(config=config, server=None)
    def _init_managers(self,
                       models="test_model",
                       num_configs_per_model=10,
                       mode='online',
                       subcommand='analyze'):
        args = ["model-analyzer", subcommand, "-f", "path-to-config-file"]
        if subcommand == 'analyze':
            args.extend(["--analysis-models", models])
        else:
            args.extend(["--report-model-configs", models])

        yaml_content = convert_to_bytes("""
            num_configs_per_model: """ + str(num_configs_per_model) + """
            client_protocol: grpc
            export_path: /test/export/path
            constraints:
              perf_latency_p99:
                max: 100
        """)
        config = self._evaluate_config(args, yaml_content)
        state_manager = AnalyzerStateManager(config=config, server=None)
        gpu_info = {
            'gpu_uuid': {
                'name': 'fake_gpu_name',
                'total_memory': 1024000000
            }
        }
        self.result_manager = ResultManager(config=config,
                                            state_manager=state_manager)
        self.report_manager = ReportManager(mode=mode,
                                            config=config,
                                            gpu_info=gpu_info,
                                            result_manager=self.result_manager)
    def _test_model_manager(self, yaml_content, expected_ranges):
        """ 
        Test helper function that passes the given yaml_content into
        model_manager, runs the model, and confirms the result is as expected
        based on a full cartesian product of the lists in the input list of 
        dicts expected_ranges
        """

        # Use mock model config or else TritonModelAnalyzerException will be thrown as it tries to read from disk
        self.mock_model_config = MockModelConfig(self._model_config_protobuf)
        self.mock_model_config.start()
        config = self._evaluate_config(self._args, yaml_content)

        state_manager = AnalyzerStateManager(config, MagicMock())
        metrics_manager = MetricsManagerSubclass(config, MagicMock(),
                                                 MagicMock(), MagicMock(),
                                                 MagicMock(), state_manager)
        model_manager = ModelManager(config,
                                     MagicMock(), MagicMock(), metrics_manager,
                                     MagicMock(), state_manager)

        model_manager.run_model(config.profile_models[0])
        self.mock_model_config.stop()

        self._check_results(model_manager, expected_ranges)
    def test_create_inference_table_with_backend_parameters(self):
        args = ['model-analyzer', 'analyze', '-f', 'config.yml']
        yaml_content = convert_to_bytes("""
            analysis_models: analysis_models
            inference_output_fields: model_name,batch_size,backend_parameter/parameter_1,backend_parameter/parameter_2
        """)
        config = self._evaluate_config(args, yaml_content)
        state_manager = AnalyzerStateManager(config=config, server=None)
        result_manager = ResultManager(config=config,
                                       state_manager=state_manager)

        result_manager._create_inference_table()
        self.assertTrue(result_manager._inference_output_fields == [
            'model_name', 'batch_size', 'backend_parameter/parameter_1',
            'backend_parameter/parameter_2'
        ])
    def test_get_analyze_command_help_string(self):
        """
        Tests that the member function returning the analyze command help string
        works correctly.
        """

        args = [
            'model-analyzer', 'profile', '--model-repository', '/tmp',
            '--profile-models', 'model1', '--config-file',
            '/tmp/my_config.yml', '--checkpoint-directory',
            '/tmp/my_checkpoints'
        ]
        config = self._evaluate_profile_config(args, '')
        state_manager = AnalyzerStateManager(config, None)
        analyzer = Analyzer(config, None, state_manager)
        self.assertEqual(
            analyzer._get_analyze_command_help_string(),
            'To analyze the profile results and find the best configurations, '
            'run `model-analyzer analyze --analysis-models model1 '
            '--config-file /tmp/my_config.yml --checkpoint-directory '
            '/tmp/my_checkpoints`')
    def test_get_report_command_help_string(self):
        """
        Tests that the member function returning the report command help string
        works correctly.
        """

        args = [
            'model-analyzer', 'analyze', '--analysis-models', 'model1',
            '--config-file', '/tmp/my_config.yml', '--checkpoint-directory',
            '/tmp/my_checkpoints', '--export-path', '/tmp/my_export_path'
        ]
        config = self._evaluate_analyze_config(args, '')
        state_manager = AnalyzerStateManager(config, None)
        analyzer = Analyzer(config, None, state_manager)
        self.assertEqual(
            analyzer._get_report_command_help_string(),
            'To generate detailed reports for the 3 best configurations, run '
            '`model-analyzer report --report-model-configs '
            'config1,config3,config4 --export-path /tmp/my_export_path '
            '--config-file /tmp/my_config.yml --checkpoint-directory '
            '/tmp/my_checkpoints`')
class TestAnalyzerStateManagerMethods(trc.TestResultCollector):
    def _evaluate_config(self, args, yaml_content):
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = ConfigCommandProfile()
        cli = CLI()
        cli.add_subcommand(
            cmd='profile',
            help=
            'Run model inference profiling based on specified CLI or config options.',
            config=config)
        cli.parse()
        mock_config.stop()
        return config

    def setUp(self):
        args = [
            'model-analyzer', 'profile', '--model-repository',
            'cli_repository', '-f', 'path-to-config-file', '--profile-models',
            'test_model'
        ]
        yaml_content = convert_to_bytes("""
            export_path: /test_export_path/
        """)

        # start mocks
        self.mock_io = MockIOMethods(
            mock_paths=['model_analyzer.state.analyzer_state_manager'])
        self.mock_json = MockJSONMethods()
        self.mock_os = MockOSMethods(mock_paths=[
            'model_analyzer.state.analyzer_state_manager',
            'model_analyzer.config.input.config_utils'
        ])
        self.mock_glob = MockGlobMethods()

        self.mock_io.start()
        self.mock_json.start()
        self.mock_os.start()
        self.mock_glob.start()

        config = self._evaluate_config(args, yaml_content)

        # state manager
        self.state_manager = AnalyzerStateManager(config=config, server=None)

    def test_set_get_state_variables(self):
        self.mock_os.set_os_path_exists_return_value(False)
        self.state_manager.load_checkpoint()

        vars = [f"test_var{j}" for j in range(10)]
        for i, name in enumerate(vars):
            self.state_manager.set_state_variable(name, i)

        for i, name in enumerate(vars):
            self.assertEqual(self.state_manager.get_state_variable(name), i)

        for i, name in enumerate(vars):
            self.state_manager.set_state_variable(name, 9 - i)

        for i, name in enumerate(vars):
            self.assertEqual(self.state_manager.get_state_variable(name),
                             9 - i)

    def test_load_checkpoint(self):
        # Load checkpoint without ckpt files
        self.mock_os.set_os_path_exists_return_value(False)
        self.state_manager.load_checkpoint()
        self.assertTrue(self.state_manager.starting_fresh_run())

        # Load checkpoint files with ckpt files
        self.mock_os.set_os_path_exists_return_value(True)
        self.mock_os.set_os_path_join_return_value('0.ckpt')
        self.state_manager.load_checkpoint()
        self.assertFalse(self.state_manager.starting_fresh_run())

        # Load checkpoint throws error
        self.mock_json.set_json_load_side_effect(EOFError)
        with self.assertRaises(TritonModelAnalyzerException,
                               msg='Checkpoint file 0.ckpt is'
                               ' empty or corrupted. Remove it from checkpoint'
                               ' directory.'):
            self.mock_os.set_os_path_exists_return_value(True)
            self.mock_os.set_os_path_join_return_value('0.ckpt')
            self.state_manager.load_checkpoint()
            self.assertFalse(self.state_manager.starting_fresh_run())

    def test_latest_checkpoint(self):
        # No checkpoints
        self.mock_glob.set_glob_return_value([])
        self.assertEqual(self.state_manager._latest_checkpoint(), -1)

        # single checkpoint file
        for i in range(5):
            self.mock_glob.set_glob_return_value([f'{i}.ckpt'])
            self.assertEqual(self.state_manager._latest_checkpoint(), i)

        # Multiple checkpoint files consecutive, sorted
        self.mock_glob.set_glob_return_value([f'{i}.ckpt' for i in range(5)])
        self.assertEqual(self.state_manager._latest_checkpoint(), 4)

        # Multiple checkpoint files consecutive, unsorted
        self.mock_glob.set_glob_return_value(
            [f'{i}.ckpt' for i in range(5, 1, -1)])
        self.assertEqual(self.state_manager._latest_checkpoint(), 5)

        # Multiple files nonconsecutive unsorted
        self.mock_glob.set_glob_return_value(
            [f'{i}.ckpt' for i in [1, 3, 5, 2, 0, 4]])
        self.assertEqual(self.state_manager._latest_checkpoint(), 5)

        # Malformed checkpoint filename
        self.mock_glob.set_glob_return_value(['XYZ.ckpt'])
        with self.assertRaises(TritonModelAnalyzerException):
            self.state_manager._latest_checkpoint()
    def test_get_common_row_items_with_backend_parameters(self):
        """
        This tests that a metrics model inference table row can be created with
        backend parameters included. Each backend parameter gets its own column.
        The column name is the backend parameter key (prepended with a prefix
        to avoid potentially overlapping with an existing column). The column
        value is the backend parameter value.

        Here is an example table:

        Models (Inference):
        Model     Model Config Path   backend_parameter/add_sub_key_1   backend_parameter/add_sub_key_2  
        add_sub   add_sub_config_2    add_sub_value_1                   add_sub_value_2                  
        add_sub   add_sub_config_0    add_sub_value_1                   add_sub_value_2                  
        add_sub   add_sub_config_1    add_sub_value_1                   add_sub_value_2                  

        Each row of the metrics model inference table corresponds to one model
        config variant.

        It is possible for a user to run the analyze command with multiple
        models config variants from different models with potentially different
        backend parameters. This test includes backend parameters from two
        separate models, showing that for one particular row (for a 'model A'
        config variant), it only populates the backend parameter cells for
        'model A', and the backend parameter cells for 'model B' are empty
        (None).

        Here is an example table with backend parameters from different models:

        Models (Inference):
        Model       Model Config Path   backend_parameter/add_sub_key_1   backend_parameter/add_sub_key_2   backend_parameter/add_sub_2_key_1   backend_parameter/add_sub_2_key_2  
        add_sub     add_sub_config_2    add_sub_value_1                   add_sub_value_2                   None                                None                               
        add_sub     add_sub_config_0    add_sub_value_1                   add_sub_value_2                   None                                None                               
        add_sub     add_sub_config_1    add_sub_value_1                   add_sub_value_2                   None                                None                               
        add_sub_2   add_sub_2_config_2  None                              None                              add_sub_2_value_1                   add_sub_2_value_2                  
        add_sub_2   add_sub_2_config_1  None                              None                              add_sub_2_value_1                   add_sub_2_value_2                  
        add_sub_2   add_sub_2_config_0  None                              None                              add_sub_2_value_1                   add_sub_2_value_2       
        """

        args = ['model-analyzer', 'analyze', '-f', 'config.yml']
        yaml_content = convert_to_bytes("""
            analysis_models: analysis_models
            inference_output_fields: model_name,batch_size,backend_parameter/model_1_key_1,backend_parameter/model_1_key_2,backend_parameter/model_2_key_1
        """)
        config = self._evaluate_config(args, yaml_content)
        state_manager = AnalyzerStateManager(config=config, server=None)
        result_manager = ResultManager(config=config,
                                       state_manager=state_manager)

        model_config_str = """
            parameters: {
            key: "model_1_key_1"
                value: {
                string_value:"model_1_value_1"
                }
            }
            parameters: {
            key:"model_1_key_2"
                value: {
                string_value:"model_1_value_2"
                }
            }
            """
        backend_parameters = text_format.Parse(
            model_config_str, model_config_pb2.ModelConfig()).parameters
        row = result_manager._get_common_row_items(
            fields=[
                'model_name', 'batch_size', 'backend_parameter/model_1_key_1',
                'backend_parameter/model_1_key_2',
                'backend_parameter/model_2_key_1'
            ],
            batch_size='batch_size',
            concurrency=None,
            satisfies=None,
            model_name='model_name',
            model_config_path=None,
            dynamic_batching=None,
            instance_group=None,
            backend_parameters=backend_parameters)
        self.assertTrue(row == [
            'model_name', 'batch_size', 'model_1_value_1', 'model_1_value_2',
            None
        ])