def test_merge_json_test_results_optional_differs(self):
     with self.assertRaises(results_merger.MergeException):
         results_merger.merge_test_results([
             extend(GOOD_JSON_TEST_RESULT_0, {'path_delimiter': '.'}),
             extend(GOOD_JSON_TEST_RESULT_1, {'path_delimiter': '.'}),
             extend(GOOD_JSON_TEST_RESULT_2, {'path_delimiter': '/'}),
         ])
 def test_merge_metadata_raises_exception(self):
     metadata1 = {'metadata': {'tags': ['foo', 'bar']}}
     metadata2 = {'metadata': {'tags': ['foo', 'bat']}}
     with self.assertRaises(results_merger.MergeException):
         results_merger.merge_test_results([
             extend(GOOD_JSON_TEST_RESULT_0, metadata1),
             extend(GOOD_JSON_TEST_RESULT_1, metadata2)
         ])
    def test_merge_json_test_results_missing_required(self):
        with self.assertRaises(results_merger.MergeException):
            results_merger.merge_test_results([
                remove(GOOD_JSON_TEST_RESULT_0, ['interrupted']),
            ])

        with self.assertRaises(results_merger.MergeException):
            results_merger.merge_test_results([
                GOOD_JSON_TEST_RESULT_0,
                remove(GOOD_JSON_TEST_RESULT_1, ['interrupted']),
            ])
    def test_merge_json_test_results_missing_version(self):
        with self.assertRaises(results_merger.MergeException):
            results_merger.merge_test_results([
                remove(GOOD_JSON_TEST_RESULT_0, ['version']),
            ])

        with self.assertRaises(results_merger.MergeException):
            results_merger.merge_test_results([
                GOOD_JSON_TEST_RESULT_0,
                remove(GOOD_JSON_TEST_RESULT_1, ['version']),
            ])
    def test_merge_json_test_results_invalid_extra(self):
        with self.assertRaises(results_merger.MergeException):
            results_merger.merge_test_results([
                extend(GOOD_JSON_TEST_RESULT_0, {'extra': True}),
            ])

        with self.assertRaises(results_merger.MergeException):
            results_merger.merge_test_results([
                GOOD_JSON_TEST_RESULT_0,
                extend(GOOD_JSON_TEST_RESULT_1, {'extra': True}),
            ])
 def test_merge_json_test_results_optional_matches(self):
     self.assertEquals(
         results_merger.merge_test_results([
             extend(GOOD_JSON_TEST_RESULT_0, {'path_delimiter': '.'}),
             extend(GOOD_JSON_TEST_RESULT_1, {'path_delimiter': '.'}),
             extend(GOOD_JSON_TEST_RESULT_2, {'path_delimiter': '.'}),
         ]), extend(GOOD_JSON_TEST_RESULT_MERGED, {'path_delimiter': '.'}))
 def test_merge_json_test_results_optional_count(self):
     self.assertEquals(
         results_merger.merge_test_results([
             extend(GOOD_JSON_TEST_RESULT_0, {'fixable': 1}),
             extend(GOOD_JSON_TEST_RESULT_1, {'fixable': 2}),
             extend(GOOD_JSON_TEST_RESULT_2, {'fixable': 3}),
         ]), extend(GOOD_JSON_TEST_RESULT_MERGED, {'fixable': 6}))
 def test_merge_json_test_results_multiple(self):
     self.assertEquals(
         results_merger.merge_test_results([
             GOOD_JSON_TEST_RESULT_0,
             GOOD_JSON_TEST_RESULT_1,
             GOOD_JSON_TEST_RESULT_2,
         ]), GOOD_JSON_TEST_RESULT_MERGED)
示例#9
0
 def test_merge_metadata(self):
   metadata1 = {'metadata': {'tags': ['foo', 'bar']}}
   metadata2 = {'metadata': {'tags': ['foo', 'bat']}}
   merged_results = results_merger.merge_test_results(
       [extend(GOOD_JSON_TEST_RESULT_0, metadata1),
        extend(GOOD_JSON_TEST_RESULT_1, metadata2)])
   self.assertEquals(
       merged_results['metadata']['tags'], ['foo', 'bat'])
 def test_merge_json_test_results_nop(self):
     good_json_results = (GOOD_JSON_TEST_RESULT_0, GOOD_JSON_TEST_RESULT_1,
                          GOOD_JSON_TEST_RESULT_2,
                          GOOD_JSON_TEST_RESULT_MERGED)
     for j in good_json_results:
         # Clone so we can check the input dictionaries are not modified
         a = copy.deepcopy(j)
         self.assertEquals(results_merger.merge_test_results([a]), j)
         self.assertEquals(a, j)
def StandardIsolatedScriptMerge(output_json, summary_json, jsons_to_merge):
    """Merge the contents of one or more results JSONs into a single JSON.

  Args:
    output_json: A path to a JSON file to which the merged results should be
      written.
    jsons_to_merge: A list of paths to JSON files that should be merged.
  """
    # summary.json is produced by swarming client itself. We are mostly interested
    # in the number of shards.
    try:
        with open(summary_json) as f:
            summary = json.load(f)
    except (IOError, ValueError):
        print(
            ('summary.json is missing or can not be read',
             'Something is seriously wrong with swarming client or the bot.'),
            file=sys.stderr)
        return 1

    missing_shards = []
    shard_results_list = []
    for index, result in enumerate(summary['shards']):
        output_path = None
        if result:
            output_path = find_shard_output_path(index, result.get('task_id'),
                                                 jsons_to_merge)
        if not output_path:
            missing_shards.append(index)
            continue

        with open(output_path) as f:
            try:
                json_contents = json.load(f)
            except ValueError:
                raise ValueError('Failed to parse JSON from %s' % j)
            shard_results_list.append(json_contents)

    merged_results = results_merger.merge_test_results(shard_results_list)
    if missing_shards:
        merged_results['missing_shards'] = missing_shards
        if 'global_tags' not in merged_results:
            merged_results['global_tags'] = []
        merged_results['global_tags'].append('UNRELIABLE_RESULTS')

    with open(output_json, 'w') as f:
        json.dump(merged_results, f)

    return 0
示例#12
0
def StandardIsolatedScriptMerge(output_json, jsons_to_merge):
    """Merge the contents of one or more results JSONs into a single JSON.

  Args:
    output_json: A path to a JSON file to which the merged results should be
      written.
    jsons_to_merge: A list of paths to JSON files that should be merged.
  """
    shard_results_list = []
    for j in jsons_to_merge:
        with open(j) as f:
            shard_results_list.append(json.load(f))
    merged_results = results_merger.merge_test_results(shard_results_list)

    with open(output_json, 'w') as f:
        json.dump(merged_results, f)

    return 0
def main(sys_args):
  args = sys_args[1:]  # Skip program name.
  options = parse_arguments(args)
  isolated_out_dir = os.path.dirname(options.isolated_script_test_output)
  overall_return_code = 0
  # This is a list of test results files to be merged into a standard
  # output.json file for use by infrastructure including FindIt.
  # This list should not contain reference build runs
  # since we do not monitor those. Also, merging test reference build results
  # with standard build results may not work properly.
  test_results_files = []

  print('Running a series of performance test subprocesses. Logs, performance\n'
        'results, and test results JSON will be saved in a subfolder of the\n'
        'isolated output directory. Inside the hash marks in the following\n'
        'lines is the name of the subfolder to find results in.\n')

  if options.non_telemetry:
    benchmark_name = options.gtest_benchmark_name
    passthrough_args = options.passthrough_args
    # crbug/1146949#c15
    # In the case that pinpoint passes all arguments to swarming through http
    # request, the passthrough_args are converted into a comma-separated string.
    if passthrough_args and isinstance(passthrough_args, six.text_type):
      passthrough_args = passthrough_args.split(',')
    # With --non-telemetry, the gtest executable file path will be passed in as
    # options.executable, which is different from running on shard map. Thus,
    # we don't override executable as we do in running on shard map.
    command_generator = GtestCommandGenerator(
        options, additional_flags=passthrough_args, ignore_shard_env_vars=True)
    # Fallback to use the name of the executable if flag isn't set.
    # TODO(crbug.com/870899): remove fallback logic and raise parser error if
    # --non-telemetry is set but --gtest-benchmark-name is not set once pinpoint
    # is converted to always pass --gtest-benchmark-name flag.
    if not benchmark_name:
      benchmark_name = options.executable
    output_paths = OutputFilePaths(isolated_out_dir, benchmark_name).SetUp()
    print('\n### {folder} ###'.format(folder=benchmark_name))
    overall_return_code = execute_gtest_perf_test(
        command_generator, output_paths, options.xvfb)
    test_results_files.append(output_paths.test_results)
  else:
    if options.use_dynamic_shards:
      shard_map_str = options.dynamic_shardmap
      shard_map = json.loads(shard_map_str, object_pairs_hook=OrderedDict)
      shard_map_path = os.path.join(SHARD_MAPS_DIRECTORY,
                                    options.test_shard_map_filename)
      with open(shard_map_path, 'w') as f:
        json.dump(shard_map, f, indent=4, separators=(',', ': '))
      shutil.copyfile(
          shard_map_path,
          os.path.join(isolated_out_dir, 'benchmarks_shard_map.json'))
      overall_return_code = _run_benchmarks_on_shardmap(
          shard_map, options, isolated_out_dir, test_results_files
      )
    # If the user has supplied a list of benchmark names, execute those instead
    # of using the shard map.
    elif options.benchmarks:
      benchmarks = options.benchmarks.split(',')
      for benchmark in benchmarks:
        output_paths = OutputFilePaths(isolated_out_dir, benchmark).SetUp()
        command_generator = TelemetryCommandGenerator(
            benchmark, options)
        print('\n### {folder} ###'.format(folder=benchmark))
        return_code = execute_telemetry_benchmark(
            command_generator, output_paths, options.xvfb)
        overall_return_code = return_code or overall_return_code
        test_results_files.append(output_paths.test_results)
      if options.run_ref_build:
        print('Not running reference build. --run-ref-build argument is only '
              'supported for sharded benchmarks. It is simple to support '
              'this for unsharded --benchmarks if needed.')
    elif options.test_shard_map_filename:
      # First determine what shard we are running on to know how to
      # index into the bot map to get list of telemetry benchmarks to run.
      shard_map_path = os.path.join(SHARD_MAPS_DIRECTORY,
                                    options.test_shard_map_filename)
      # Copy sharding map file to isolated_out_dir so that the merge script
      # can collect it later.
      shutil.copyfile(
          shard_map_path,
          os.path.join(isolated_out_dir, 'benchmarks_shard_map.json'))
      with open(shard_map_path) as f:
        shard_map = json.load(f)
      overall_return_code = _run_benchmarks_on_shardmap(
          shard_map, options, isolated_out_dir, test_results_files
      )
    else:
      raise Exception('Telemetry tests must provide either a shard map or a '
                      '--benchmarks list so that we know which stories to run.')

  test_results_list = []
  for test_results_file in test_results_files:
    if os.path.exists(test_results_file):
      with open(test_results_file, 'r') as fh:
        test_results_list.append(json.load(fh))
  merged_test_results = results_merger.merge_test_results(test_results_list)
  with open(options.isolated_script_test_output, 'w') as f:
    json.dump(merged_test_results, f)

  return overall_return_code
示例#14
0
 def test_merge_test_name_prefix_raises_exception(self):
     with self.assertRaises(results_merger.MergeException):
         results_merger.merge_test_results([
             extend(GOOD_JSON_TEST_RESULT_0, {'test_name_prefix': 'a.d.c'}),
             extend(GOOD_JSON_TEST_RESULT_1, {'test_name_prefix': 'a.b.c'})
         ])
 def test_merge_nothing(self):
     self.assertEquals(results_merger.merge_test_results([]), {})
示例#16
0
def main(sys_args):
    args = sys_args[1:]  # Skip program name.
    options = parse_arguments(args)
    isolated_out_dir = os.path.dirname(options.isolated_script_test_output)
    overall_return_code = 0
    # This is a list of test results files to be merged into a standard
    # output.json file for use by infrastructure including FindIt.
    # This list should not contain reference build runs
    # since we do not monitor those. Also, merging test reference build results
    # with standard build results may not work properly.
    test_results_files = []

    print(
        'Running a series of performance test subprocesses. Logs, performance\n'
        'results, and test results JSON will be saved in a subfolder of the\n'
        'isolated output directory. Inside the hash marks in the following\n'
        'lines is the name of the subfolder to find results in.\n')

    if options.non_telemetry:
        command_generator = GtestCommandGenerator(options)
        benchmark_name = options.gtest_benchmark_name
        # Fallback to use the name of the executable if flag isn't set.
        # TODO(crbug.com/870899): remove fallback logic and raise parser error if
        # --non-telemetry is set but --gtest-benchmark-name is not set once pinpoint
        # is converted to always pass --gtest-benchmark-name flag.
        if not benchmark_name:
            benchmark_name = options.executable
        output_paths = OutputFilePaths(isolated_out_dir,
                                       benchmark_name).SetUp()
        print('\n### {folder} ###'.format(folder=benchmark_name))
        overall_return_code = execute_gtest_perf_test(command_generator,
                                                      output_paths,
                                                      options.xvfb)
        test_results_files.append(output_paths.test_results)
    else:
        # If the user has supplied a list of benchmark names, execute those instead
        # of using the shard map.
        if options.benchmarks:
            benchmarks = options.benchmarks.split(',')
            for benchmark in benchmarks:
                output_paths = OutputFilePaths(isolated_out_dir,
                                               benchmark).SetUp()
                command_generator = TelemetryCommandGenerator(
                    benchmark, options)
                print('\n### {folder} ###'.format(folder=benchmark))
                return_code = execute_telemetry_benchmark(
                    command_generator, output_paths, options.xvfb)
                overall_return_code = return_code or overall_return_code
                test_results_files.append(output_paths.test_results)
            if options.run_ref_build:
                print(
                    'Not running reference build. --run-ref-build argument is only '
                    'supported for sharded benchmarks. It is simple to support '
                    'this for unsharded --benchmarks if needed.')
        elif options.test_shard_map_filename:
            # First determine what shard we are running on to know how to
            # index into the bot map to get list of telemetry benchmarks to run.
            shard_index = None
            shard_map_path = os.path.join(SHARD_MAPS_DIRECTORY,
                                          options.test_shard_map_filename)
            # Copy sharding map file to isolated_out_dir so that the merge script
            # can collect it later.
            # TODO(crouleau): Move this step over to merge script
            # (process_perf_results.py).
            shutil.copyfile(
                shard_map_path,
                os.path.join(isolated_out_dir, 'benchmarks_shard_map.json'))
            with open(shard_map_path) as f:
                shard_map = json.load(f)
            env = os.environ.copy()
            if 'GTEST_SHARD_INDEX' in env:
                shard_index = env['GTEST_SHARD_INDEX']
            # TODO(crbug.com/972844): shard environment variables are not specified
            # for single-shard shard runs.
            if not shard_index:
                shard_map_has_multiple_shards = bool(shard_map.get('1', False))
                if not shard_map_has_multiple_shards:
                    shard_index = '0'
            if not shard_index:
                raise Exception(
                    'Sharded Telemetry perf tests must either specify --benchmarks '
                    'list or have GTEST_SHARD_INDEX environment variable present.'
                )
            benchmarks_and_configs = shard_map[shard_index]['benchmarks']

            for (benchmark,
                 story_selection_config) in benchmarks_and_configs.iteritems():
                # Need to run the benchmark on both latest browser and reference build.
                output_paths = OutputFilePaths(isolated_out_dir,
                                               benchmark).SetUp()
                command_generator = TelemetryCommandGenerator(
                    benchmark,
                    options,
                    story_selection_config=story_selection_config)
                print('\n### {folder} ###'.format(folder=benchmark))
                return_code = execute_telemetry_benchmark(
                    command_generator, output_paths, options.xvfb)
                overall_return_code = return_code or overall_return_code
                test_results_files.append(output_paths.test_results)
                if options.run_ref_build:
                    reference_benchmark_foldername = benchmark + '.reference'
                    reference_output_paths = OutputFilePaths(
                        isolated_out_dir,
                        reference_benchmark_foldername).SetUp()
                    reference_command_generator = TelemetryCommandGenerator(
                        benchmark,
                        options,
                        story_selection_config=story_selection_config,
                        is_reference=True)
                    print('\n### {folder} ###'.format(
                        folder=reference_benchmark_foldername))
                    # We intentionally ignore the return code and test results of the
                    # reference build.
                    execute_telemetry_benchmark(reference_command_generator,
                                                reference_output_paths,
                                                options.xvfb)
        else:
            raise Exception(
                'Telemetry tests must provide either a shard map or a '
                '--benchmarks list so that we know which stories to run.')

    test_results_list = []
    for test_results_file in test_results_files:
        if os.path.exists(test_results_file):
            with open(test_results_file, 'r') as fh:
                test_results_list.append(json.load(fh))
    merged_test_results = results_merger.merge_test_results(test_results_list)
    with open(options.isolated_script_test_output, 'w') as f:
        json.dump(merged_test_results, f)

    return overall_return_code
 def test_merge_metadata(self):
     metadata = {'metadata': {'tags': ['foo', 'bar']}}
     results_merger.merge_test_results([
         extend(GOOD_JSON_TEST_RESULT_0, metadata),
         extend(GOOD_JSON_TEST_RESULT_1, metadata)
     ])
示例#18
0
 def test_merge_test_name_prefix(self):
     results_merger.merge_test_results([
         extend(GOOD_JSON_TEST_RESULT_0, {'test_name_prefix': 'a.b.c'}),
         extend(GOOD_JSON_TEST_RESULT_1, {'test_name_prefix': 'a.b.c'})
     ])