def main():
    desc = "Merge profdata files in <--input-dir> into a single profdata."
    parser = _merge_steps_argument_parser(description=desc)
    params = parser.parse_args()
    merger.merge_profiles(params.input_dir, params.output_file, '.profdata',
                          params.llvm_profdata,
                          params.profdata_filename_pattern)
示例#2
0
    def test_merge_profdata(self, mock_validate_and_convert_profraws):
        mock_input_dir_walk = [
            ('/b/some/path', ['base_unittests',
                              'url_unittests'], ['summary.json']),
            ('/b/some/path/base_unittests', [],
             ['output.json', 'default.profdata']),
            ('/b/some/path/url_unittests', [],
             ['output.json', 'default.profdata']),
        ]
        with mock.patch.object(os, 'walk') as mock_walk:
            with mock.patch.object(os, 'remove'):
                mock_walk.return_value = mock_input_dir_walk
                with mock.patch.object(subprocess,
                                       'check_output') as mock_exec_cmd:
                    merger.merge_profiles('/b/some/path',
                                          'output/dir/default.profdata',
                                          '.profdata', 'llvm-profdata')
                    self.assertEqual(
                        mock.call(
                            [
                                'llvm-profdata',
                                'merge',
                                '-o',
                                'output/dir/default.profdata',
                                '-sparse=true',
                                '/b/some/path/base_unittests/default.profdata',
                                '/b/some/path/url_unittests/default.profdata',
                            ],
                            stderr=-2,
                        ), mock_exec_cmd.call_args)

        # The mock method should only apply when merging .profraw files.
        self.assertFalse(mock_validate_and_convert_profraws.called)
示例#3
0
    def test_profraw_skip_validation(self, mock_validate_and_convert_profraws):
        mock_input_dir_walk = [
            ('/b/some/path', ['0', '1', '2', '3'], ['summary.json']),
            ('/b/some/path/0', [],
             ['output.json', 'default-1.profraw', 'default-2.profraw']),
            ('/b/some/path/1', [],
             ['output.json', 'default-1.profraw', 'default-2.profraw']),
        ]

        with mock.patch.object(os, 'walk') as mock_walk:
            with mock.patch.object(os, 'remove'):
                mock_walk.return_value = mock_input_dir_walk
                with mock.patch.object(subprocess,
                                       'check_output') as mock_exec_cmd:
                    merger.merge_profiles('/b/some/path',
                                          'output/dir/default.profdata',
                                          '.profraw',
                                          'llvm-profdata',
                                          skip_validation=True)
                    self.assertEqual(
                        mock.call(
                            [
                                'llvm-profdata', 'merge', '-o',
                                'output/dir/default.profdata', '-sparse=true',
                                '/b/some/path/0/default-1.profraw',
                                '/b/some/path/0/default-2.profraw',
                                '/b/some/path/1/default-1.profraw',
                                '/b/some/path/1/default-2.profraw'
                            ],
                            stderr=-2,
                        ), mock_exec_cmd.call_args)

        # Skip validation should've passed all profraw files directly, and
        # this validate call should not have been invoked.
        self.assertFalse(mock_validate_and_convert_profraws.called)
示例#4
0
  def test_merge_profraw_skip_if_there_is_no_file(self):
    mock_input_dir_walk = [
        ('/b/some/path', ['0', '1', '2', '3'], ['summary.json']),
    ]

    with mock.patch.object(os, 'walk') as mock_walk:
      mock_walk.return_value = mock_input_dir_walk
      with mock.patch.object(subprocess, 'check_output') as mock_exec_cmd:
        merger.merge_profiles('/b/some/path', 'output/dir/default.profdata',
                              '.profraw', 'llvm-profdata')
        self.assertFalse(mock_exec_cmd.called)
示例#5
0
def main():
    desc = "Merge profraw files in <--task-output-dir> into a single profdata."
    parser = _MergeAPIArgumentParser(description=desc)
    params = parser.parse_args()

    output_prodata_filename = 'default.profdata'
    invalid_profiles, counter_overflows = coverage_merger.merge_profiles(
        params.task_output_dir,
        os.path.join(params.profdata_dir, output_prodata_filename), '.profraw',
        params.llvm_profdata)

    # At the moment counter overflows overlap with invalid profiles, but this is
    # not guaranteed to remain the case indefinitely. To avoid future conflicts
    # treat these separately.
    if counter_overflows:
        with open(
                os.path.join(params.profdata_dir,
                             'profiles_with_overflows.json'), 'w') as f:
            json.dump(counter_overflows, f)

    if invalid_profiles:
        with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
                  'w') as f:
            json.dump(invalid_profiles, f)

    return 1 if bool(invalid_profiles) else 0
示例#6
0
    def test_retry_profdata_merge_failures(self):
        mock_input_dir_walk = [
            ('/b/some/path', ['0', '1'], ['summary.json']),
            ('/b/some/path/0', [],
             ['output.json', 'default-1.profdata', 'default-2.profdata']),
            ('/b/some/path/1', [],
             ['output.json', 'default-1.profdata', 'default-2.profdata']),
        ]
        with mock.patch.object(os, 'walk') as mock_walk:
            with mock.patch.object(os, 'remove'):
                mock_walk.return_value = mock_input_dir_walk
                with mock.patch.object(subprocess,
                                       'check_output') as mock_exec_cmd:
                    invalid_profiles_msg = (
                        'error: /b/some/path/0/default-1.profdata: Malformed '
                        'instrumentation profile data.')

                    # Failed on the first merge, but succeed on the second attempt.
                    mock_exec_cmd.side_effect = [
                        subprocess.CalledProcessError(
                            returncode=1,
                            cmd='dummy cmd',
                            output=invalid_profiles_msg), None
                    ]

                    merger.merge_profiles('/b/some/path',
                                          'output/dir/default.profdata',
                                          '.profdata', 'llvm-profdata')

                    self.assertEqual(2, mock_exec_cmd.call_count)

                    # Note that in the second call, /b/some/path/0/default-1.profdata is
                    # excluded!
                    self.assertEqual(
                        mock.call(
                            [
                                'llvm-profdata',
                                'merge',
                                '-o',
                                'output/dir/default.profdata',
                                '-sparse=true',
                                '/b/some/path/0/default-2.profdata',
                                '/b/some/path/1/default-1.profdata',
                                '/b/some/path/1/default-2.profdata',
                            ],
                            stderr=-2,
                        ), mock_exec_cmd.call_args)
示例#7
0
    def test_merge_profraw(self, mock_validate_and_convert_profraws):
        mock_input_dir_walk = [
            ('/b/some/path', ['0', '1', '2', '3'], ['summary.json']),
            ('/b/some/path/0', [],
             ['output.json', 'default-1.profraw', 'default-2.profraw']),
            ('/b/some/path/1', [],
             ['output.json', 'default-1.profraw', 'default-2.profraw']),
        ]

        mock_validate_and_convert_profraws.return_value = [
            '/b/some/path/0/default-1.profdata',
            '/b/some/path/1/default-2.profdata',
        ], [
            '/b/some/path/0/default-2.profraw',
            '/b/some/path/1/default-1.profraw',
        ], [
            '/b/some/path/1/default-1.profraw',
        ]

        with mock.patch.object(os, 'walk') as mock_walk:
            with mock.patch.object(os, 'remove'):
                mock_walk.return_value = mock_input_dir_walk
                with mock.patch.object(subprocess,
                                       'check_output') as mock_exec_cmd:
                    merger.merge_profiles('/b/some/path',
                                          'output/dir/default.profdata',
                                          '.profraw', 'llvm-profdata')
                    self.assertEqual(
                        mock.call(
                            [
                                'llvm-profdata',
                                'merge',
                                '-o',
                                'output/dir/default.profdata',
                                '-sparse=true',
                                '/b/some/path/0/default-1.profdata',
                                '/b/some/path/1/default-2.profdata',
                            ],
                            stderr=-2,
                        ), mock_exec_cmd.call_args)

        self.assertTrue(mock_validate_and_convert_profraws.called)
示例#8
0
def main():
    desc = "Merge profraw files in <--task-output-dir> into a single profdata."
    parser = _MergeAPIArgumentParser(description=desc)
    params = parser.parse_args()

    logging.info('Merging %d test results', len(params.jsons_to_merge))

    failed = False

    if params.additional_merge_script:
        new_args = [
            '--build-properties',
            params.build_properties,
            '--summary-json',
            params.summary_json,
            '--task-output-dir',
            params.task_output_dir,
            '--output-json',
            params.output_json,
        ]
        if params.additional_merge_script_args:
            new_args += params.additional_merge_script_args

        new_args += params.jsons_to_merge

        rc = subprocess.call([sys.executable, params.additional_merge_script] +
                             new_args)
        if rc != 0:
            failed = True
            logging.warning('Additional merge script %s exited with %s' %
                            (params.additional_merge_script, p.returncode))

    invalid_profiles = coverage_merger.merge_profiles(
        params.task_output_dir,
        os.path.join(params.profdata_dir, 'default.profdata'), '.profraw',
        params.llvm_profdata)
    if invalid_profiles:
        with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
                  'w') as f:
            json.dump(invalid_profiles, f)

    return 1 if (failed or bool(invalid_profiles)) else 0
示例#9
0
def main():
    desc = "Merge profraw files in <--task-output-dir> into a single profdata."
    parser = _MergeAPIArgumentParser(description=desc)
    params = parser.parse_args()
    invalid_profiles = merger.merge_profiles(
        params.task_output_dir,
        os.path.join(params.profdata_dir, 'default.profdata'), '.profraw',
        params.llvm_profdata)
    if invalid_profiles:
        with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
                  'w') as f:
            json.dump(invalid_profiles, f)

    # TODO(crbug.com/921300) This script doesn't know how to merge test results,
    # and the correct solution should be taking other merge script as inputs to
    # perform the merge.
    # However, to work around the issue that fuzzer test steps are red, following
    # logic directly copy paste the output json if there is only one shard, and
    # this strategy should work for test targets that only have one shard, such
    # as fuzzer targets and simple gtests targets.
    if len(params.jsons_to_merge) == 1:
        with open(params.jsons_to_merge[0]) as f_read:
            with open(params.output_json, 'w') as f_write:
                f_write.write(f_read.read())
示例#10
0
def main():
    desc = "Merge profraw files in <--task-output-dir> into a single profdata."
    parser = _MergeAPIArgumentParser(description=desc)
    params = parser.parse_args()

    if params.java_coverage_dir:
        if not params.jacococli_path:
            parser.error(
                '--jacococli-path required when merging Java coverage')
        if not params.merged_jacoco_filename:
            parser.error(
                '--merged-jacoco-filename required when merging Java coverage')

        output_path = os.path.join(params.java_coverage_dir,
                                   '%s.exec' % params.merged_jacoco_filename)
        logging.info('Merging JaCoCo .exec files to %s', output_path)
        profile_merger.merge_java_exec_files(params.task_output_dir,
                                             output_path,
                                             params.jacococli_path)

    if params.javascript_coverage_dir:
        if not params.merged_js_cov_filename:
            parser.error('--merged-js-cov-filename required when merging '
                         'JavaScript coverage')

        output_path = os.path.join(
            params.javascript_coverage_dir,
            '%s_javascript.json' % params.merged_js_cov_filename)
        raw_coverage_folder = os.path.join(params.javascript_coverage_dir,
                                           'tests')
        logging.info('Merging v8 coverage output to %s', output_path)
        javascript_merger.merge_coverage_files(raw_coverage_folder,
                                               output_path)

    # Name the output profdata file name as {test_target}.profdata or
    # default.profdata.
    output_prodata_filename = (params.test_target_name
                               or 'default') + '.profdata'

    # NOTE: The profile data merge script must make sure that the profraw files
    # are deleted from the task output directory after merging, otherwise, other
    # test results merge script such as layout tests will treat them as json test
    # results files and result in errors.
    invalid_profiles, counter_overflows = profile_merger.merge_profiles(
        params.task_output_dir,
        os.path.join(params.profdata_dir, output_prodata_filename),
        '.profraw',
        params.llvm_profdata,
        sparse=params.sparse,
        skip_validation=params.skip_validation)

    # At the moment counter overflows overlap with invalid profiles, but this is
    # not guaranteed to remain the case indefinitely. To avoid future conflicts
    # treat these separately.
    if counter_overflows:
        with open(
                os.path.join(params.profdata_dir,
                             'profiles_with_overflows.json'), 'w') as f:
            json.dump(counter_overflows, f)

    if invalid_profiles:
        with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
                  'w') as f:
            json.dump(invalid_profiles, f)

    failed = False

    # If given, always run the additional merge script, even if we only have one
    # output json. Merge scripts sometimes upload artifacts to cloud storage, or
    # do other processing which can be needed even if there's only one output.
    if params.additional_merge_script:
        new_args = [
            '--build-properties',
            params.build_properties,
            '--summary-json',
            params.summary_json,
            '--task-output-dir',
            params.task_output_dir,
            '--output-json',
            params.output_json,
        ]

        if params.additional_merge_script_args:
            new_args += json.loads(params.additional_merge_script_args)

        new_args += params.jsons_to_merge

        args = [sys.executable, params.additional_merge_script] + new_args
        rc = subprocess.call(args)
        if rc != 0:
            failed = True
            logging.warning('Additional merge script %s exited with %s' %
                            (params.additional_merge_script, rc))
    elif len(params.jsons_to_merge) == 1:
        logging.info(
            "Only one output needs to be merged; directly copying it.")
        with open(params.jsons_to_merge[0]) as f_read:
            with open(params.output_json, 'w') as f_write:
                f_write.write(f_read.read())
    else:
        logging.warning(
            'This script was told to merge test results, but no additional merge '
            'script was given.')

    return 1 if (failed or bool(invalid_profiles)) else 0
示例#11
0
def main():
  desc = "Merge profraw files in <--task-output-dir> into a single profdata."
  parser = _MergeAPIArgumentParser(description=desc)
  params = parser.parse_args()

  if params.java_coverage_dir:
    logging.info('Moving JaCoCo .exec files to %s', params.java_coverage_dir)
    coverage_merger.move_java_exec_files(
        params.task_output_dir, params.java_coverage_dir)

  # NOTE: The coverage data merge script must make sure that the profraw files
  # are deleted from the task output directory after merging, otherwise, other
  # test results merge script such as layout tests will treat them as json test
  # results files and result in errors.
  logging.info('Merging code coverage profraw data')
  invalid_profiles = coverage_merger.merge_profiles(
      params.task_output_dir,
      os.path.join(params.profdata_dir, 'default.profdata'), '.profraw',
      params.llvm_profdata)
  if invalid_profiles:
    with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
              'w') as f:
      json.dump(invalid_profiles, f)

  logging.info('Merging %d test results', len(params.jsons_to_merge))
  failed = False

  # If given, always run the additional merge script, even if we only have one
  # output json. Merge scripts sometimes upload artifacts to cloud storage, or
  # do other processing which can be needed even if there's only one output.
  if params.additional_merge_script:
    new_args = [
        '--build-properties',
        params.build_properties,
        '--summary-json',
        params.summary_json,
        '--task-output-dir',
        params.task_output_dir,
        '--output-json',
        params.output_json,
    ]

    if params.additional_merge_script_args:
      new_args += json.loads(params.additional_merge_script_args)

    new_args += params.jsons_to_merge

    args = [sys.executable, params.additional_merge_script] + new_args
    rc = subprocess.call(args)
    if rc != 0:
      failed = True
      logging.warning('Additional merge script %s exited with %s' %
                      (params.additional_merge_script, rc))
    mark_invalid_shards(coverage_merger.get_shards_to_retry(invalid_profiles),
                        params.summary_json, params.output_json)
  elif len(params.jsons_to_merge) == 1:
    logging.info("Only one output needs to be merged; directly copying it.")
    with open(params.jsons_to_merge[0]) as f_read:
      with open(params.output_json, 'w') as f_write:
        f_write.write(f_read.read())
    mark_invalid_shards(coverage_merger.get_shards_to_retry(invalid_profiles),
                        params.summary_json, params.output_json)
  else:
    logging.warning(
        "This script was told to merge %d test results, but no additional "
        "merge script was given.")

  return 1 if (failed or bool(invalid_profiles)) else 0
def main():
    desc = "Merge profraw files in <--task-output-dir> into a single profdata."
    parser = _MergeAPIArgumentParser(description=desc)
    params = parser.parse_args()

    if params.java_coverage_dir:
        if not params.jacococli_path:
            parser.error(
                '--jacococli-path required when merging Java coverage')
        if not params.merged_jacoco_filename:
            parser.error(
                '--merged-jacoco-filename required when merging Java coverage')

        output_path = os.path.join(params.java_coverage_dir,
                                   '%s.exec' % params.merged_jacoco_filename)
        logging.info('Merging JaCoCo .exec files to %s', output_path)
        coverage_merger.merge_java_exec_files(params.task_output_dir,
                                              output_path,
                                              params.jacococli_path)

    # Name the output profdata file name as {test_target}.profdata or
    # default.profdata.
    output_prodata_filename = (params.test_target_name
                               or 'default') + '.profdata'

    # NOTE: The coverage data merge script must make sure that the profraw files
    # are deleted from the task output directory after merging, otherwise, other
    # test results merge script such as layout tests will treat them as json test
    # results files and result in errors.
    logging.info('Merging code coverage profraw data')
    invalid_profiles, counter_overflows = coverage_merger.merge_profiles(
        params.task_output_dir,
        os.path.join(params.profdata_dir, output_prodata_filename), '.profraw',
        params.llvm_profdata)

    # At the moment counter overflows overlap with invalid profiles, but this is
    # not guaranteed to remain the case indefinitely. To avoid future conflicts
    # treat these separately.
    if counter_overflows:
        with open(
                os.path.join(params.profdata_dir,
                             'profiles_with_overflows.json'), 'w') as f:
            json.dump(counter_overflows, f)

    if invalid_profiles:
        with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
                  'w') as f:
            json.dump(invalid_profiles, f)

        # We don't want to invalidate shards in a CQ build, because we should not
        # interfere with the actual test results of a CQ builder.
        # TODO(crbug.com/1050858) Remove patch_storage completely once recipe-side
        # change passes --per-cl-coverage.
        patch_storage = json.loads(
            params.build_properties).get('patch_storage')
        if not params.per_cl_coverage and not patch_storage:
            mark_invalid_shards(
                coverage_merger.get_shards_to_retry(invalid_profiles),
                params.jsons_to_merge)
    logging.info('Merging %d test results', len(params.jsons_to_merge))
    failed = False

    # If given, always run the additional merge script, even if we only have one
    # output json. Merge scripts sometimes upload artifacts to cloud storage, or
    # do other processing which can be needed even if there's only one output.
    if params.additional_merge_script:
        new_args = [
            '--build-properties',
            params.build_properties,
            '--summary-json',
            params.summary_json,
            '--task-output-dir',
            params.task_output_dir,
            '--output-json',
            params.output_json,
        ]

        if params.additional_merge_script_args:
            new_args += json.loads(params.additional_merge_script_args)

        new_args += params.jsons_to_merge

        args = [sys.executable, params.additional_merge_script] + new_args
        rc = subprocess.call(args)
        if rc != 0:
            failed = True
            logging.warning('Additional merge script %s exited with %s' %
                            (params.additional_merge_script, rc))
    elif len(params.jsons_to_merge) == 1:
        logging.info(
            "Only one output needs to be merged; directly copying it.")
        with open(params.jsons_to_merge[0]) as f_read:
            with open(params.output_json, 'w') as f_write:
                f_write.write(f_read.read())
    else:
        logging.warning(
            "This script was told to merge %d test results, but no additional "
            "merge script was given.")

    return 1 if (failed or bool(invalid_profiles)) else 0
示例#13
0
def main():
  desc = "Merge profraw files in <--task-output-dir> into a single profdata."
  parser = _MergeAPIArgumentParser(description=desc)
  params = parser.parse_args()

  # NOTE: The coverage data merge script must make sure that the profraw files
  # are deleted from the task output directory after merging, otherwise, other
  # test results merge script such as layout tests will treat them as json test
  # results files and result in errors.
  logging.info('Merging code coverage profraw data')
  invalid_profiles = coverage_merger.merge_profiles(
      params.task_output_dir,
      os.path.join(params.profdata_dir, 'default.profdata'), '.profraw',
      params.llvm_profdata)
  if invalid_profiles:
    with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
              'w') as f:
      json.dump(invalid_profiles, f)

  logging.info('Merging %d test results', len(params.jsons_to_merge))
  failed = False

  # If given, always run the additional merge script, even if we only have one
  # output json. Merge scripts sometimes upload artifacts to cloud storage, or
  # do other processing which can be needed even if there's only one output.
  if params.additional_merge_script:
    new_args = [
        '--build-properties',
        params.build_properties,
        '--summary-json',
        params.summary_json,
        '--task-output-dir',
        params.task_output_dir,
        '--output-json',
        params.output_json,
    ]

    # TODO(crbug.com/960994): Without specifying an output directory, the layout
    # merge script will use the CWD as the output directory and then tries to
    # wipe out the content in that directory, and unfortunately, the CWD is a
    # temporary directory that has been used to hold the coverage profdata, so
    # without the following hack, the merge script will deletes all the profdata
    # files and lead to build failures.
    #
    # This temporary workaround is only used for evaluating the stability of the
    # linux-coverage-rel trybot, it should be removed before merging into
    # linxu-rel as it's not reliable enough, for example, things could break if
    # the name or arguments of the script are changed.
    if params.additional_merge_script.endswith('merge_web_test_results.py'):
      new_args.extend([
        '--output-directory',
        tempfile.mkdtemp(),
        '--allow-existing-output-directory',
      ])

    if params.additional_merge_script_args:
      new_args += json.loads(params.additional_merge_script_args)

    new_args += params.jsons_to_merge

    args = [sys.executable, params.additional_merge_script] + new_args
    rc = subprocess.call(args)
    if rc != 0:
      failed = True
      logging.warning('Additional merge script %s exited with %s' %
                      (params.additional_merge_script, rc))
  elif len(params.jsons_to_merge) == 1:
    logging.info("Only one output needs to be merged; directly copying it.")
    with open(params.jsons_to_merge[0]) as f_read:
      with open(params.output_json, 'w') as f_write:
        f_write.write(f_read.read())
  else:
    logging.warning(
        "This script was told to merge %d test results, but no additional "
        "merge script was given.")

  return 1 if (failed or bool(invalid_profiles)) else 0