Esempio n. 1
0
 def _evaluate_config(self, args, yaml_content):
     mock_config = MockConfig(args, yaml_content)
     mock_config.start()
     config = AnalyzerConfig()
     cli = CLI(config)
     cli.parse()
     mock_config.stop()
     return config
    def test_help_message_no_args(self, mock_print_help):
        """
        Tests that model-analyzer prints the help message when no arguments are
        given
        """

        sys.argv = ['/usr/local/bin/model-analyzer']

        cli = CLI()

        self.assertRaises(SystemExit, cli.parse)
        mock_print_help.assert_called()
Esempio n. 3
0
    def test_validation(self):
        args = [
            'model-analyzer', '--model-repository', 'cli_repository', '-f',
            'path-to-config-file'
        ]

        # end key should not be included in concurrency
        yaml_content = """
model_names:
  -
    vgg_16_graphdef:
      parameters:
        concurrency:
            start: 4
            stop: 12
            end: 2
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)

        with self.assertRaises(TritonModelAnalyzerException):
            cli.parse()
        mock_config.stop()

        args = [
            'model-analyzer', '--model-repository', 'cli_repository', '-f',
            'path-to-config-file'
        ]

        yaml_content = """
model_names:
  -
    vgg_16_graphdef:
      parameters:
        concurrency:
            start: 13
            stop: 12
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)

        with self.assertRaises(TritonModelAnalyzerException):
            cli.parse()
        mock_config.stop()
Esempio n. 4
0
    def test_config(self):
        args = [
            'model-analyzer', '--model-repository', 'cli_repository', '-f',
            'path-to-config-file', '--model-names', 'vgg11'
        ]
        yaml_content = 'model_repository: yaml_repository'
        config = self._evaluate_config(args, yaml_content)

        # CLI flag has the highest priority
        self.assertTrue(
            config.get_all_config()['model_repository'] == 'cli_repository')

        args = [
            'model-analyzer', '-f', 'path-to-config-file', '--model-names',
            'vgg11'
        ]
        yaml_content = 'model_repository: yaml_repository'
        config = self._evaluate_config(args, yaml_content)

        # If CLI flag doesn't exist, YAML config has the highest priority
        self.assertTrue(
            config.get_all_config()['model_repository'] == 'yaml_repository')

        args = ['model-analyzer', '-f', 'path-to-config-file']
        yaml_content = 'model_repository: yaml_repository'
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)

        # When a required field is not specified, parse will lead to an
        # exception
        with self.assertRaises(TritonModelAnalyzerException):
            cli.parse()

        mock_config.stop()
    def _evaluate_config(self, args, yaml_content):
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config_analyze = ConfigCommandAnalyze()
        config_report = ConfigCommandReport()
        cli = CLI()
        cli.add_subcommand(
            cmd="analyze",
            help=
            "Collect and sort profiling results and generate data and summaries.",
            config=config_analyze)
        cli.add_subcommand(
            cmd='report',
            help='Generate detailed reports for a single config',
            config=config_report)
        cli.parse()
        mock_config.stop()

        ret = config_analyze if config_analyze.export_path else config_report
        return ret
 def _evaluate_analyze_config(self, args, yaml_content):
     mock_config = MockConfig(args, yaml_content)
     mock_config.start()
     config = ConfigCommandAnalyze()
     cli = CLI()
     cli.add_subcommand(
         cmd='analyze',
         help='Collect and sort profiling results and generate data and '
         'summaries.',
         config=config)
     cli.parse()
     mock_config.stop()
     return config
 def _evaluate_profile_config(self, args, yaml_content):
     mock_config = MockConfig(args, yaml_content)
     mock_config.start()
     config = ConfigCommandProfile()
     cli = CLI()
     cli.add_subcommand(
         cmd='profile',
         help='Run model inference profiling based on specified CLI or '
         'config options.',
         config=config)
     cli.parse()
     mock_config.stop()
     return config
Esempio n. 8
0
    def test_constraints(self):
        args = [
            'model-analyzer', '--model-repository', 'cli_repository', '-f',
            'path-to-config-file'
        ]
        yaml_content = """
model_names:
  -
    vgg_16_graphdef:
      parameters:
        concurrency:
          - 1
          - 2
          - 3
          - 4
      objectives:
        perf_throughput: 10
        gpu_used_memory: 5
      constraints:
        gpu_used_memory:
          max: 80
  - vgg_19_graphdef
"""
        config = self._evaluate_config(args, yaml_content)
        expected_model_objects = [
            ConfigModel('vgg_16_graphdef',
                        parameters={
                            'batch_sizes': [1],
                            'concurrency': [1, 2, 3, 4]
                        },
                        objectives={
                            'perf_throughput': 10,
                            'gpu_used_memory': 5
                        },
                        constraints={'gpu_used_memory': {
                            'max': 80,
                        }}),
            ConfigModel('vgg_19_graphdef',
                        parameters={
                            'batch_sizes': [1],
                            'concurrency': [1]
                        },
                        objectives={'perf_throughput': 10})
        ]
        self._assert_equality_of_model_configs(
            config.get_all_config()['model_names'], expected_model_objects)

        # GPU Memory shouldn't have min
        yaml_content = """
model_names:
  -
    vgg_16_graphdef:
      parameters:
        concurrency:
          - 1
          - 2
          - 3
          - 4
      objectives:
        - perf_throughput
        - gpu_used_memory
      constraints:
        gpu_memory:
          max: 80
          min: 45
  - vgg_19_graphdef
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)

        with self.assertRaises(TritonModelAnalyzerException):
            cli.parse()
        mock_config.stop()

        # Test objective key that is not one of the supported metrics
        yaml_content = """
model_names:
  -
    vgg_16_graphdef:
      parameters:
        concurrency:
          - 1
          - 2
          - 3
          - 4
      objectives:
        - throughput
      constraints:
        gpu_used_memory:
          max: 80
  - vgg_19_graphdef
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)

        with self.assertRaises(TritonModelAnalyzerException):
            cli.parse()
        mock_config.stop()
Esempio n. 9
0
    def test_range_and_list_values(self):
        args = [
            'model-analyzer', '--model-repository', 'cli_repository', '-f',
            'path-to-config-file'
        ]
        yaml_content = 'model_names: model_1,model_2'
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)
        cli.parse()

        self.assertTrue(
            config.get_all_config()['model_names'] == ['model_1', 'model_2'])
        mock_config.stop()

        yaml_content = """
model_names:
    - model_1
    - model_2
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)
        cli.parse()

        self.assertTrue(
            config.get_all_config()['model_names'] == ['model_1', 'model_2'])
        mock_config.stop()

        args = [
            'model-analyzer', '--model-repository', 'cli_repository', '-f',
            'path-to-config-file', '--model-names', 'model_1,model_2'
        ]
        yaml_content = """
batch_sizes:
    - 2
    - 3
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)
        cli.parse()
        self.assertTrue(config.get_all_config()['batch_sizes'] == [2, 3])
        mock_config.stop()

        yaml_content = """
batch_sizes: 2
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)
        cli.parse()
        self.assertTrue(config.get_all_config()['batch_sizes'] == [2])
        mock_config.stop()

        yaml_content = """
concurrency: 2
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)
        cli.parse()
        self.assertTrue(config.get_all_config()['concurrency'] == [2])
        self.assertTrue(config.get_all_config()['batch_sizes'] == [1])
        mock_config.stop()

        yaml_content = """
batch_sizes:
    start: 2
    stop: 6
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)
        cli.parse()
        self.assertTrue(
            config.get_all_config()['batch_sizes'] == [2, 3, 4, 5, 6])
        mock_config.stop()

        yaml_content = """
batch_sizes:
    start: 2
    stop: 6
    step: 2
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)
        cli.parse()
        self.assertTrue(config.get_all_config()['batch_sizes'] == [2, 4, 6])
        mock_config.stop()
Esempio n. 10
0
    def test_constraints(self):
        args = [
            'model-analyzer', '--model-repository', 'cli_repository', '-f',
            'path-to-config-file'
        ]
        yaml_content = """
model_names:
  -
    vgg_16_graphdef:
      parameters:
        concurrency:
          - 1
          - 2
          - 3
          - 4
      objectives:
        - throughput
        - gpu_memory
      constraints:
        gpu_memory:
          max: 80
  - vgg_19_graphdef
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)
        cli.parse()

        self.assertTrue(config.get_all_config()['model_names'] == [{
            'vgg_16_graphdef': {
                'parameters': {
                    'concurrency': [1, 2, 3, 4]
                },
                'objectives': ['throughput', 'gpu_memory'],
                'constraints': {
                    'gpu_memory': {
                        'max': 80,
                    }
                }
            }
        }, 'vgg_19_graphdef'])
        mock_config.stop()

        # GPU Memory shouldn't have min
        yaml_content = """
model_names:
  -
    vgg_16_graphdef:
      parameters:
        concurrency:
          - 1
          - 2
          - 3
          - 4
      objectives:
        - throughput
        - gpu_memory
      constraints:
        gpu_memory:
          max: 80
          min: 45
  - vgg_19_graphdef
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)

        with self.assertRaises(TritonModelAnalyzerException):
            cli.parse()
        mock_config.stop()
Esempio n. 11
0
    def test_object(self):
        args = [
            'model-analyzer', '--model-repository', 'cli_repository', '-f',
            'path-to-config-file'
        ]
        yaml_content = """
model_names:
  -
    vgg_16_graphdef:
      parameters:
        concurrency:
          - 1
          - 2
          - 3
          - 4
  - vgg_19_graphdef
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)
        cli.parse()

        self.assertTrue(config.get_all_config()['model_names'] == [{
            'vgg_16_graphdef': {
                'parameters': {
                    'concurrency': [1, 2, 3, 4]
                }
            }
        }, 'vgg_19_graphdef'])
        mock_config.stop()

        yaml_content = """
model_names:
  vgg_16_graphdef:
    parameters:
      concurrency:
        - 1
        - 2
        - 3
        - 4
  vgg_19_graphdef:
    parameters:
      concurrency:
        - 1
        - 2
        - 3
        - 4
      batch_sizes:
          start: 2
          stop: 6
          step: 2
"""
        mock_config = MockConfig(args, yaml_content)
        mock_config.start()
        config = AnalyzerConfig()
        cli = CLI(config)
        cli.parse()

        self.assertTrue(
            config.get_all_config()['model_names'] == {
                'vgg_16_graphdef': {
                    'parameters': {
                        'concurrency': [1, 2, 3, 4]
                    }
                },
                'vgg_19_graphdef': {
                    'parameters': {
                        'concurrency': [1, 2, 3, 4],
                        'batch_sizes': [2, 4, 6]
                    }
                }
            })
        mock_config.stop()