Example #1
0
  def test_merge_java_exec_files(self):
    mock_input_dir_walk = [
        ('/b/some/path', ['0', '1', '2', '3'], ['summary.json']),
        ('/b/some/path/0', [],
         ['output.json', 'default-1.exec', 'default-2.exec']),
        ('/b/some/path/1', [],
         ['output.json', 'default-3.exec', 'default-4.exec']),
    ]

    with mock.patch.object(os, 'walk') as mock_walk:
      mock_walk.return_value = mock_input_dir_walk
      with mock.patch.object(subprocess, 'check_output') as mock_exec_cmd:
        merger.merge_java_exec_files(
            '/b/some/path', 'output/path', 'path/to/jacococli.jar')
        self.assertEqual(
            mock.call(
                [
                    'java',
                    '-jar',
                    'path/to/jacococli.jar',
                    'merge',
                    '/b/some/path/0/default-1.exec',
                    '/b/some/path/0/default-2.exec',
                    '/b/some/path/1/default-3.exec',
                    '/b/some/path/1/default-4.exec',
                    '--destfile',
                    'output/path',
                ],
                stderr=-2,
            ), mock_exec_cmd.call_args)
Example #2
0
  def test_merge_java_exec_files_if_there_is_no_file(self):
    mock_input_dir_walk = [
        ('/b/some/path', ['0', '1', '2', '3'], ['summary.json']),
    ]

    with mock.patch.object(os, 'walk') as mock_walk:
      mock_walk.return_value = mock_input_dir_walk
      with mock.patch.object(subprocess, 'check_output') as mock_exec_cmd:
        merger.merge_java_exec_files(
            '/b/some/path', 'output/path', 'path/to/jacococli.jar')
        self.assertFalse(mock_exec_cmd.called)
Example #3
0
def main():
    desc = "Merge profraw files in <--task-output-dir> into a single profdata."
    parser = _MergeAPIArgumentParser(description=desc)
    params = parser.parse_args()

    if params.java_coverage_dir:
        if not params.jacococli_path:
            parser.error(
                '--jacococli-path required when merging Java coverage')
        if not params.merged_jacoco_filename:
            parser.error(
                '--merged-jacoco-filename required when merging Java coverage')

        output_path = os.path.join(params.java_coverage_dir,
                                   '%s.exec' % params.merged_jacoco_filename)
        logging.info('Merging JaCoCo .exec files to %s', output_path)
        profile_merger.merge_java_exec_files(params.task_output_dir,
                                             output_path,
                                             params.jacococli_path)

    if params.javascript_coverage_dir:
        if not params.merged_js_cov_filename:
            parser.error('--merged-js-cov-filename required when merging '
                         'JavaScript coverage')

        output_path = os.path.join(
            params.javascript_coverage_dir,
            '%s_javascript.json' % params.merged_js_cov_filename)
        raw_coverage_folder = os.path.join(params.javascript_coverage_dir,
                                           'tests')
        logging.info('Merging v8 coverage output to %s', output_path)
        javascript_merger.merge_coverage_files(raw_coverage_folder,
                                               output_path)

    # Name the output profdata file name as {test_target}.profdata or
    # default.profdata.
    output_prodata_filename = (params.test_target_name
                               or 'default') + '.profdata'

    # NOTE: The profile data merge script must make sure that the profraw files
    # are deleted from the task output directory after merging, otherwise, other
    # test results merge script such as layout tests will treat them as json test
    # results files and result in errors.
    invalid_profiles, counter_overflows = profile_merger.merge_profiles(
        params.task_output_dir,
        os.path.join(params.profdata_dir, output_prodata_filename),
        '.profraw',
        params.llvm_profdata,
        sparse=params.sparse,
        skip_validation=params.skip_validation)

    # At the moment counter overflows overlap with invalid profiles, but this is
    # not guaranteed to remain the case indefinitely. To avoid future conflicts
    # treat these separately.
    if counter_overflows:
        with open(
                os.path.join(params.profdata_dir,
                             'profiles_with_overflows.json'), 'w') as f:
            json.dump(counter_overflows, f)

    if invalid_profiles:
        with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
                  'w') as f:
            json.dump(invalid_profiles, f)

    failed = False

    # If given, always run the additional merge script, even if we only have one
    # output json. Merge scripts sometimes upload artifacts to cloud storage, or
    # do other processing which can be needed even if there's only one output.
    if params.additional_merge_script:
        new_args = [
            '--build-properties',
            params.build_properties,
            '--summary-json',
            params.summary_json,
            '--task-output-dir',
            params.task_output_dir,
            '--output-json',
            params.output_json,
        ]

        if params.additional_merge_script_args:
            new_args += json.loads(params.additional_merge_script_args)

        new_args += params.jsons_to_merge

        args = [sys.executable, params.additional_merge_script] + new_args
        rc = subprocess.call(args)
        if rc != 0:
            failed = True
            logging.warning('Additional merge script %s exited with %s' %
                            (params.additional_merge_script, rc))
    elif len(params.jsons_to_merge) == 1:
        logging.info(
            "Only one output needs to be merged; directly copying it.")
        with open(params.jsons_to_merge[0]) as f_read:
            with open(params.output_json, 'w') as f_write:
                f_write.write(f_read.read())
    else:
        logging.warning(
            'This script was told to merge test results, but no additional merge '
            'script was given.')

    return 1 if (failed or bool(invalid_profiles)) else 0
def main():
    desc = "Merge profraw files in <--task-output-dir> into a single profdata."
    parser = _MergeAPIArgumentParser(description=desc)
    params = parser.parse_args()

    if params.java_coverage_dir:
        if not params.jacococli_path:
            parser.error(
                '--jacococli-path required when merging Java coverage')
        if not params.merged_jacoco_filename:
            parser.error(
                '--merged-jacoco-filename required when merging Java coverage')

        output_path = os.path.join(params.java_coverage_dir,
                                   '%s.exec' % params.merged_jacoco_filename)
        logging.info('Merging JaCoCo .exec files to %s', output_path)
        coverage_merger.merge_java_exec_files(params.task_output_dir,
                                              output_path,
                                              params.jacococli_path)

    # Name the output profdata file name as {test_target}.profdata or
    # default.profdata.
    output_prodata_filename = (params.test_target_name
                               or 'default') + '.profdata'

    # NOTE: The coverage data merge script must make sure that the profraw files
    # are deleted from the task output directory after merging, otherwise, other
    # test results merge script such as layout tests will treat them as json test
    # results files and result in errors.
    logging.info('Merging code coverage profraw data')
    invalid_profiles, counter_overflows = coverage_merger.merge_profiles(
        params.task_output_dir,
        os.path.join(params.profdata_dir, output_prodata_filename), '.profraw',
        params.llvm_profdata)

    # At the moment counter overflows overlap with invalid profiles, but this is
    # not guaranteed to remain the case indefinitely. To avoid future conflicts
    # treat these separately.
    if counter_overflows:
        with open(
                os.path.join(params.profdata_dir,
                             'profiles_with_overflows.json'), 'w') as f:
            json.dump(counter_overflows, f)

    if invalid_profiles:
        with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
                  'w') as f:
            json.dump(invalid_profiles, f)

        # We don't want to invalidate shards in a CQ build, because we should not
        # interfere with the actual test results of a CQ builder.
        # TODO(crbug.com/1050858) Remove patch_storage completely once recipe-side
        # change passes --per-cl-coverage.
        patch_storage = json.loads(
            params.build_properties).get('patch_storage')
        if not params.per_cl_coverage and not patch_storage:
            mark_invalid_shards(
                coverage_merger.get_shards_to_retry(invalid_profiles),
                params.jsons_to_merge)
    logging.info('Merging %d test results', len(params.jsons_to_merge))
    failed = False

    # If given, always run the additional merge script, even if we only have one
    # output json. Merge scripts sometimes upload artifacts to cloud storage, or
    # do other processing which can be needed even if there's only one output.
    if params.additional_merge_script:
        new_args = [
            '--build-properties',
            params.build_properties,
            '--summary-json',
            params.summary_json,
            '--task-output-dir',
            params.task_output_dir,
            '--output-json',
            params.output_json,
        ]

        if params.additional_merge_script_args:
            new_args += json.loads(params.additional_merge_script_args)

        new_args += params.jsons_to_merge

        args = [sys.executable, params.additional_merge_script] + new_args
        rc = subprocess.call(args)
        if rc != 0:
            failed = True
            logging.warning('Additional merge script %s exited with %s' %
                            (params.additional_merge_script, rc))
    elif len(params.jsons_to_merge) == 1:
        logging.info(
            "Only one output needs to be merged; directly copying it.")
        with open(params.jsons_to_merge[0]) as f_read:
            with open(params.output_json, 'w') as f_write:
                f_write.write(f_read.read())
    else:
        logging.warning(
            "This script was told to merge %d test results, but no additional "
            "merge script was given.")

    return 1 if (failed or bool(invalid_profiles)) else 0
Example #5
0
def main():
    desc = "Merge profraw files in <--task-output-dir> into a single profdata."
    parser = _MergeAPIArgumentParser(description=desc)
    params = parser.parse_args()

    if params.java_coverage_dir:
        if not params.jacococli_path:
            parser.error(
                '--jacococli-path required when merging Java coverage')
        if not params.merged_jacoco_filename:
            parser.error(
                '--merged-jacoco-filename required when merging Java coverage')

        output_path = os.path.join(params.java_coverage_dir,
                                   '%s.exec' % params.merged_jacoco_filename)
        logging.info('Merging JaCoCo .exec files to %s', output_path)
        coverage_merger.merge_java_exec_files(params.task_output_dir,
                                              output_path,
                                              params.jacococli_path)

    # NOTE: The coverage data merge script must make sure that the profraw files
    # are deleted from the task output directory after merging, otherwise, other
    # test results merge script such as layout tests will treat them as json test
    # results files and result in errors.
    logging.info('Merging code coverage profraw data')
    invalid_profiles = coverage_merger.merge_profiles(
        params.task_output_dir,
        os.path.join(params.profdata_dir, 'default.profdata'), '.profraw',
        params.llvm_profdata)
    if invalid_profiles:
        with open(os.path.join(params.profdata_dir, 'invalid_profiles.json'),
                  'w') as f:
            json.dump(invalid_profiles, f)

    logging.info('Merging %d test results', len(params.jsons_to_merge))
    failed = False

    # If given, always run the additional merge script, even if we only have one
    # output json. Merge scripts sometimes upload artifacts to cloud storage, or
    # do other processing which can be needed even if there's only one output.
    if params.additional_merge_script:
        new_args = [
            '--build-properties',
            params.build_properties,
            '--summary-json',
            params.summary_json,
            '--task-output-dir',
            params.task_output_dir,
            '--output-json',
            params.output_json,
        ]

        if params.additional_merge_script_args:
            new_args += json.loads(params.additional_merge_script_args)

        new_args += params.jsons_to_merge

        args = [sys.executable, params.additional_merge_script] + new_args
        rc = subprocess.call(args)
        if rc != 0:
            failed = True
            logging.warning('Additional merge script %s exited with %s' %
                            (params.additional_merge_script, rc))
        mark_invalid_shards(
            coverage_merger.get_shards_to_retry(invalid_profiles),
            params.summary_json, params.output_json)
    elif len(params.jsons_to_merge) == 1:
        logging.info(
            "Only one output needs to be merged; directly copying it.")
        with open(params.jsons_to_merge[0]) as f_read:
            with open(params.output_json, 'w') as f_write:
                f_write.write(f_read.read())
        mark_invalid_shards(
            coverage_merger.get_shards_to_retry(invalid_profiles),
            params.summary_json, params.output_json)
    else:
        logging.warning(
            "This script was told to merge %d test results, but no additional "
            "merge script was given.")

    return 1 if (failed or bool(invalid_profiles)) else 0