Пример #1
0
def _run_tests(args, tests, extra_flags, env, screenshot_dir, results,
               test_results):
    keys = get_skia_gold_keys(args)

    with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
        gold_properties = angle_skia_gold_properties.ANGLESkiaGoldProperties(
            args)
        gold_session_manager = angle_skia_gold_session_manager.ANGLESkiaGoldSessionManager(
            skia_gold_temp_dir, gold_properties)
        gold_session = gold_session_manager.GetSkiaGoldSession(keys)

        traces = [trace.split(' ')[0] for trace in tests]
        for test in traces:

            # Apply test filter if present.
            if args.isolated_script_test_filter:
                full_name = 'angle_restricted_trace_gold_tests.%s' % test
                if not fnmatch.fnmatch(full_name,
                                       args.isolated_script_test_filter):
                    logging.info(
                        'Skipping test %s because it does not match filter %s'
                        % (full_name, args.isolated_script_test_filter))
                    continue

            with common.temporary_file() as tempfile_path:
                cmd = [
                    args.test_suite,
                    DEFAULT_TEST_PREFIX + test,
                    '--render-test-output-dir=%s' % screenshot_dir,
                    '--one-frame-only',
                    '--verbose-logging',
                ] + extra_flags

                result = None
                for iteration in range(0, args.flaky_retries + 1):
                    if result != PASS:
                        if iteration > 0:
                            logging.info('Retrying flaky test: "%s"...' % test)
                        result = PASS if run_wrapper(
                            args, cmd, env, tempfile_path) == 0 else FAIL

                artifacts = {}

                if result == PASS:
                    result = upload_test_result_to_skia_gold(
                        args, gold_session_manager, gold_session,
                        gold_properties, screenshot_dir, test, artifacts)

                expected_result = SKIP if result == SKIP else PASS
                test_results[test] = {
                    'expected': expected_result,
                    'actual': result
                }
                if result == FAIL:
                    test_results[test]['is_unexpected'] = True
                if len(artifacts) > 0:
                    test_results[test]['artifacts'] = artifacts
                results['num_failures_by_type'][result] += 1

        return results['num_failures_by_type'][FAIL] == 0
    def run_tests(args, tests, extra_flags, env, screenshot_dir):
        keys = get_skia_gold_keys(args)

        with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
            gold_properties = angle_skia_gold_properties.ANGLESkiaGoldProperties(
                args)
            gold_session_manager = angle_skia_gold_session_manager.ANGLESkiaGoldSessionManager(
                skia_gold_temp_dir, gold_properties)
            gold_session = gold_session_manager.GetSkiaGoldSession(keys)

            for test in tests['traces']:
                with common.temporary_file() as tempfile_path:
                    cmd = [
                        args.test_suite,
                        DEFAULT_TEST_PREFIX + test,
                        '--render-test-output-dir=%s' % screenshot_dir,
                        '--one-frame-only',
                        '--verbose-logging',
                    ] + extra_flags

                    result = PASS if run_wrapper(args, cmd, env,
                                                 tempfile_path) == 0 else FAIL

                    artifacts = {}

                    if result == PASS:
                        result = upload_test_result_to_skia_gold(
                            args, gold_session_manager, gold_session,
                            gold_properties, screenshot_dir, test, artifacts)

                    expected_result = SKIP if result == SKIP else PASS
                    result_tests[test] = {
                        'expected': expected_result,
                        'actual': result
                    }
                    if len(artifacts) > 0:
                        result_tests[test]['artifacts'] = artifacts
                    results['num_failures_by_type'][result] += 1

            return results['num_failures_by_type'][FAIL] == 0
Пример #3
0
def _run_tests(args, tests, extra_flags, env, screenshot_dir, results,
               test_results):
    keys = get_skia_gold_keys(args, env)

    with temporary_dir('angle_skia_gold_') as skia_gold_temp_dir:
        gold_properties = angle_skia_gold_properties.ANGLESkiaGoldProperties(
            args)
        gold_session_manager = angle_skia_gold_session_manager.ANGLESkiaGoldSessionManager(
            skia_gold_temp_dir, gold_properties)
        gold_session = gold_session_manager.GetSkiaGoldSession(keys)

        traces = [trace.split(' ')[0] for trace in tests]

        if args.isolated_script_test_filter:
            filtered = []
            for trace in traces:
                # Apply test filter if present.
                full_name = 'angle_restricted_trace_gold_tests.%s' % trace
                if not fnmatch.fnmatch(full_name,
                                       args.isolated_script_test_filter):
                    logging.info(
                        'Skipping test %s because it does not match filter %s'
                        % (full_name, args.isolated_script_test_filter))
                else:
                    filtered += [trace]
            traces = filtered

        batches = _get_batches(traces, args.batch_size)

        for batch in batches:
            for iteration in range(0, args.flaky_retries + 1):
                with common.temporary_file() as tempfile_path:
                    # This is how we signal early exit
                    if not batch:
                        logging.debug('All tests in batch completed.')
                        break
                    if iteration > 0:
                        logging.info('Test run failed, running retry #%d...' %
                                     iteration)

                    gtest_filter = _get_gtest_filter_for_batch(args, batch)
                    cmd = [
                        args.test_suite,
                        gtest_filter,
                        '--render-test-output-dir=%s' % screenshot_dir,
                        '--one-frame-only',
                        '--verbose-logging',
                        '--enable-all-trace-tests',
                    ] + extra_flags
                    batch_result = PASS if run_wrapper(
                        args, cmd, env, tempfile_path) == 0 else FAIL

                    next_batch = []
                    for trace in batch:
                        artifacts = {}

                        if batch_result == PASS:
                            logging.debug('upload test result: %s' % trace)
                            result = upload_test_result_to_skia_gold(
                                args, gold_session_manager, gold_session,
                                gold_properties, screenshot_dir, trace,
                                artifacts)
                        else:
                            result = batch_result

                        expected_result = SKIP if result == SKIP else PASS
                        test_results[trace] = {
                            'expected': expected_result,
                            'actual': result
                        }
                        if len(artifacts) > 0:
                            test_results[trace]['artifacts'] = artifacts
                        if result == FAIL:
                            next_batch.append(trace)
                    batch = next_batch

        # These properties are recorded after iteration to ensure they only happen once.
        for _, trace_results in test_results.items():
            result = trace_results['actual']
            results['num_failures_by_type'][result] += 1
            if result == FAIL:
                trace_results['is_unexpected'] = True

        return results['num_failures_by_type'][FAIL] == 0