Пример #1
0
    def test_process_folder(self, mock_upload):
        """Tests process folder and verifies args passed to upload_result."""
        report_config = self._report_config_example()
        reporting.process_folder(
            'test_runners/pytorch/unittest_files/results/basic',
            report_config=report_config)

        test_result = mock_upload.call_args[0][0]

        # Spot checks test_result.
        self.assertEqual(test_result['test_harness'], 'pytorch')
        self.assertEqual(test_result['test_environment'],
                         report_config['test_environment'])
        self.assertEqual(test_result['test_id'], 'resnet50.gpu_1.32.real')

        # Spot checks results and GCE project info used for reporting.
        results = mock_upload.call_args[0][1]
        self.assertEqual(results[0]['result'], 280.91838703453595)

        # Spot checks test_info.
        arg_test_info = mock_upload.call_args[1]['test_info']
        self.assertEqual(arg_test_info['accel_cnt'], 2)
        self.assertEqual(arg_test_info['cmd'], 'python blahblah.py -arg foo')

        # Very spotty check of extras to confirm a random field is passed.
        arg_extras = mock_upload.call_args[1]['extras']
        # Checks that the config is saved in the extras field.
        self.assertIn('config', arg_extras)
        self.assertIn('batches_sampled', arg_extras)
Пример #2
0
  def test_process_folder(
      self,
      mock_upload,
      mock_collect_results,
  ):
    """Tests process folder and verifies args passed to upload_result."""
    # Results to process
    test_id_0 = 'made.up.test_id'
    results_list = []
    results_list.append(self._mock_result(test_id_0, 10.5))
    results_list.append(self._mock_result(test_id_0, 20))
    results_list.append(self._mock_result(test_id_0, .44444))
    # Sets results_list to be returned from mock method.
    mock_collect_results.return_value = results_list

    report_config = self._report_config_example()

    reporting.process_folder('/foo/folder', report_config=report_config)

    test_result = mock_upload.call_args[0][0]

    # Spot checks test_result.
    self.assertEqual(test_result['test_harness'], 'tf_cnn_benchmark')
    self.assertEqual(test_result['test_environment'],
                     report_config['test_environment'])
    self.assertEqual(test_result['test_id'], test_id_0)

    # Spot checks results and GCE project info used for reporting.
    results = mock_upload.call_args[0][1]
    arg_report_project = mock_upload.call_args[0][2]
    arg_dataset = mock_upload.call_args[1]['dataset']
    arg_table = mock_upload.call_args[1]['table']
    self.assertEqual(results[0]['result'], 10.314813333333333)
    self.assertEqual(arg_report_project, 'google.com:tensorflow-performance')
    self.assertEqual(arg_dataset, 'benchmark_results_dev')
    self.assertEqual(arg_table, 'result')

    # Spot checks test_info.
    arg_test_info = mock_upload.call_args[1]['test_info']
    self.assertEqual(arg_test_info['framework_version'],
                     report_config['framework_version'])
    self.assertEqual(arg_test_info['framework_describe'],
                     report_config['framework_describe'])
    self.assertEqual(arg_test_info['cmd'],
                     'python some_script.py --arg0=foo --arg1=bar')

    self.assertEqual(arg_test_info['git_info']['benchmarks']['describe'],
                     'a2384503f')

    # Spot checks system_info.
    arg_system_info = mock_upload.call_args[1]['system_info']
    self.assertEqual(arg_system_info['accel_type'], report_config['accel_type'])

    # Very spotty check of extras to confirm a random field is passed.
    arg_extras = mock_upload.call_args[1]['extras']
    self.assertIn('config', arg_extras)
Пример #3
0
  def run_test_suite(self, full_config):
    """Run benchmarks defined by full_config.

    Args:
      full_config: Config representing tests to run.
    """

    # Left over from system that could have multiple instances for distributed
    # tests. Currently uses first and only instance from list.
    instance = cluster_local.UseLocalInstances(
        virtual_env_path=full_config.get('virtual_env_path'))

    # Folder to store suite results
    full_config['test_suite_start_time'] = datetime.datetime.now().strftime(
        '%Y%m%dT%H%M%S')

    # Configs for the test suite
    test_suite = command_builder.build_test_config_suite(
        full_config, self.debug_level)

    for _, test_configs in enumerate(test_suite):
      last_config = None
      for _, test_config in enumerate(test_configs):
        last_config = test_config
        # Executes oom test or the normal benchmark.
        if test_config.get('oom_test'):
          low = test_config['oom_low']
          high = test_config['oom_high']
          next_val = high
          lowest_oom = high
          while next_val != -1:
            print('OOM testing--> low:{} high:{} next_val:{}'.format(
                low, high, next_val))
            test_config['batch_size'] = next_val
            result_dir = self.run_benchmark(test_config, instance)
            oom = reporting.check_oom(
                os.path.join(result_dir, 'worker_0_stdout.log'))
            if oom and next_val < lowest_oom:
              lowest_oom = next_val
            low, high, next_val = reporting.oom_batch_size_search(
                low, high, next_val, oom)
            print('Lowest OOM Value:{}'.format(lowest_oom))
        else:
          result_dir = self.run_benchmark(test_config, instance)

      suite_dir_name = '{}_{}'.format(last_config['test_suite_start_time'],
                                      last_config['test_id'])
      reporting.process_folder(
          os.path.join(self.workspace, 'results', suite_dir_name),
          report_config=self.auto_test_config)
Пример #4
0
    def run_test_suite(self, test_config):
        """Run benchmarks defined by full_config.

    Args:
      test_config: Config representing tests to run.
    """
        # Folder to store suite results
        test_config['test_suite_start_time'] = datetime.datetime.now(
        ).strftime('%Y%m%dT%H%M%S')

        instance = cluster_local.UseLocalInstances()
        for i in xrange(test_config['repeat']):
            self.run_benchmark(test_config, instance, copy=i)

        suite_dir_name = '{}_{}'.format(test_config['test_suite_start_time'],
                                        test_config['test_id'])
        reporting.process_folder(os.path.join(self.workspace, 'results',
                                              suite_dir_name),
                                 report_config=self.auto_test_config)