示例#1
0
文件: flags_test.py 项目: zhangpf/arc
  def test_or(self):
    merged = flags.FlagSet(flags.PASS) | flags.FlagSet(flags.LARGE)
    self.assertEquals(flags.PASS, merged.status)
    self.assertEquals(flags.LARGE, merged.attribute)

    # If both operands has status, raise an AssertionError.
    with self.assertRaises(AssertionError):
      flags.FlagSet(flags.PASS) | flags.FlagSet(flags.NOT_SUPPORTED)
示例#2
0
 def _check_simple(self, expected, patterns):
     self.assertEquals(
         dict((key, flags.FlagSet(value))
              for key, value in expected.iteritems()),
         suite_runner_util.merge_expectation_map(
             dict.fromkeys(expected, flags.FlagSet(flags.PASS)),
             dict((key, flags.FlagSet(value))
                  for key, value in patterns.iteritems()),
             flags.FlagSet(flags.PASS)))
示例#3
0
 def _make_suite_runner(self, name):
     return suite_runner.SuiteRunnerBase(
         name, {
             'Class1#method1': flags.FlagSet(flags.PASS),
             'Class1#method2': flags.FlagSet(flags.PASS),
             'Class2#method1': flags.FlagSet(flags.PASS),
             'Class2#method2': flags.FlagSet(flags.PASS),
         },
         config=SuiteRunConfigIntegrationTests.my_config()[name])
示例#4
0
 def test_defaults_applied(self):
     result = _evaluate({'flags': flags.FlagSet(flags.PASS)},
                        defaults={
                            'bug': 'crbug.com/1234',
                            'flags': flags.FlagSet(flags.FAIL)
                        })
     self.assertEquals('crbug.com/1234', result['bug'])
     self.assertEquals(suite_runner_config._DEFAULT_OUTPUT_TIMEOUT,
                       result['deadline'])
     self.assertEquals(flags.PASS, result['flags'].status)
示例#5
0
    def test_flat_suite_test_expectations(self):
        result = _evaluate_test_expectations({'x': flags.FlagSet(flags.FLAKY)})
        self.assertEqual(flags.FlagSet(flags.FLAKY), result['x'])

        result = _evaluate_test_expectations({'*': flags.FlagSet(flags.FLAKY)})
        self.assertEqual(flags.FlagSet(flags.FLAKY), result['*'])

        # Only a simple '*' pattern is allowed.
        # (Though this pattern still allows us to do a prefix match later, we
        # disallow it.)
        with self.assertRaisesRegexp(AssertionError, r'"x\*" is not allowed'):
            _evaluate_test_expectations({'x*': flags.FlagSet(flags.PASS)})

        # Only a simple '*' pattern is allowed.
        # (This allows us to to a simple prefix match later)
        with self.assertRaisesRegexp(AssertionError, r'"\*x" is not allowed'):
            _evaluate_test_expectations({'*x': flags.FlagSet(flags.PASS)})

        # A "class#method" style name is allowed.
        result = _evaluate_test_expectations(
            {'x#y': flags.FlagSet(flags.FLAKY)})
        self.assertEqual(flags.FlagSet(flags.FLAKY), result['x#y'])

        # Only one '#' is allowed.
        with self.assertRaisesRegexp(AssertionError,
                                     r'"x#y#z" is not allowed'):
            _evaluate_test_expectations({'x#y#z': flags.FlagSet(flags.PASS)})
示例#6
0
文件: flags_test.py 项目: zhangpf/arc
  def test_eq_ne(self):
    self.assertEquals(flags.FlagSet(flags.PASS), flags.FlagSet(flags.PASS))
    self.assertNotEquals(flags.FlagSet(flags.PASS), flags.FlagSet(flags.FAIL))
    self.assertEquals(flags.FlagSet(flags.PASS | flags.LARGE),
                      flags.FlagSet(flags.PASS | flags.LARGE))
    self.assertNotEquals(flags.FlagSet(flags.PASS | flags.LARGE),
                         flags.FlagSet(flags.PASS))

    # Can be compared to None.
    self.assertNotEquals(flags.FlagSet(flags.PASS), None)
    self.assertNotEquals(None, flags.FlagSet(flags.PASS))
示例#7
0
def _evaluate_suite_test_expectations(raw_dict):
    """Flatten the (possibly-nested) suite_test_expectations dict."""
    result = {}
    for outer_name, outer_expectation in raw_dict.iteritems():
        if isinstance(outer_expectation, flags.FlagSet):
            result[outer_name] = (flags.FlagSet(
                flags.PASS).override_with(outer_expectation))
            continue
        for inner_name, inner_expectation in outer_expectation.iteritems():
            result['%s#%s' % (outer_name, inner_name)] = (flags.FlagSet(
                flags.PASS).override_with(inner_expectation))
    return result
示例#8
0
    def test_hierarchical_suite_test_expectations(self):
        result = _evaluate_test_expectations(
            {'x': {
                'y': flags.FlagSet(flags.FLAKY)
            }})
        self.assertEqual(flags.FlagSet(flags.FLAKY), result['x#y'])

        result = _evaluate_test_expectations(
            {'x': {
                '*': flags.FlagSet(flags.FLAKY)
            }})
        self.assertEqual(flags.FlagSet(flags.FLAKY), result['x#*'])

        # Only a simple '*' pattern is allowed.
        # (Though this pattern still allows us to do a prefix match later, we
        # disallow it.)
        with self.assertRaisesRegexp(AssertionError,
                                     r'"x#y\*" is not allowed'):
            _evaluate_test_expectations(
                {'x': {
                    'y*': flags.FlagSet(flags.FLAKY)
                }})

        # Only a simple '*' pattern is allowed.
        # (This allows us to use a simple prefix match later)
        with self.assertRaisesRegexp(AssertionError,
                                     r'"x#\*y" is not allowed'):
            _evaluate_test_expectations(
                {'x': {
                    '*y': flags.FlagSet(flags.FLAKY)
                }})

        # If there is an asterisk wildcard, it must be in the leaf.
        # (This allows us to to a simple prefix match later)
        with self.assertRaisesRegexp(AssertionError,
                                     r'"\*" is not a valid name'):
            _evaluate_test_expectations(
                {'*': {
                    'x': flags.FlagSet(flags.FLAKY)
                }})

        # If there is an asterisk wildcard, it must be in the leaf.
        # (This allows us to to a simple prefix match later)
        with self.assertRaisesRegexp(AssertionError,
                                     r'"\*" is not a valid name'):
            _evaluate_test_expectations(
                {'*': {
                    '*': flags.FlagSet(flags.FLAKY)
                }})

        # Only one '#' is allowed.
        with self.assertRaisesRegexp(AssertionError,
                                     r'"x#y#z" is not allowed'):
            _evaluate_test_expectations(
                {'x': {
                    'y#z': flags.FlagSet(flags.FLAKY)
                }})
示例#9
0
文件: flags_test.py 项目: zhangpf/arc
  def test_property(self):
    flagset = flags.FlagSet(flags.PASS)
    self.assertEquals(flags.PASS, flagset.status)
    self.assertEquals(0, flagset.attribute)

    flagset = flags.FlagSet(flags.PASS | flags.LARGE)
    self.assertEquals(flags.PASS, flagset.status)
    self.assertEquals(flags.LARGE, flagset.attribute)

    # Also, attribute only FlagSet is allowed.
    flagset = flags.FlagSet(flags.LARGE)
    self.assertEquals(0, flagset.status)
    self.assertEquals(flags.LARGE, flagset.attribute)
示例#10
0
    def test_configured_to_fail_for_target(self):
        result = _evaluate(
            {'configurations': [{
                'flags': flags.FlagSet(flags.FLAKY)
            }]})
        self.assertEquals(flags.FLAKY, result['flags'].status)

        result = _evaluate({
            'configurations': [{
                'enable_if': False,
                'flags': flags.FlagSet(flags.FLAKY)
            }]
        })
        self.assertEquals(flags.PASS, result['flags'].status)
示例#11
0
def _evaluate(raw_config, defaults=None):
    """Flatten the raw_config based on the configuration"""
    _validate(raw_config)

    result = {
        'flags': flags.FlagSet(flags.PASS),
        'bug': None,
        'deadline': _DEFAULT_OUTPUT_TIMEOUT,
        'test_order': {},
        'suite_test_expectations': {},
        'metadata': {},
    }
    if defaults:
        # We need to make a deep copy so that we do not modify any dictionary or
        # array data in place and affect the default values for subsequent use.
        result.update(copy.deepcopy(defaults))

    if not raw_config:
        return result

    # Merge configurations.
    _merge_config(raw_config, result)

    # Apply conditional configurations.
    for configuration in raw_config.get('configurations', []):
        if not configuration.get('enable_if', True):
            continue
        _merge_config(configuration, result)

    return result
示例#12
0
def default_run_configuration():
    return _evaluate({
        'configurations': [{
            'enable_if': OPTIONS.weird(),
            'flags': flags.FlagSet(flags.FLAKY),
        }]
    })
示例#13
0
文件: perf_test.py 项目: zhangpf/arc
    def _run(self, benchmark):
        runner = art_test_runner.ArtTestRunner(
            '901-perf', config={'flags': flags.FlagSet(flags.PASS)})
        args = _prepare_integration_tests_args(100)

        # We reuse scripts for integration tests in vm tests, and they expect
        # that out/integration_tests exists. It is true if integration tests ran
        # before calling perf_test.py, but not true for perf builders.
        # Let's create it if it does not exist.
        run_integration_tests.setup_output_directory(args.output_dir)

        # Call setup_work_root() and prepare_to_run() iff source files
        # to build tests exist. Perf builders do not have them, and can skip it.
        # The builders have downloaded pre-built files.
        if os.path.exists(os.path.join(runner.get_source_root(), 'etc')):
            runner.setup_work_root()
            runner.prepare_to_run([], args)

        with contextlib.closing(
                suite_runner.SuiteRunnerLogger(
                    runner.name, os.path.join(args.output_dir, runner.name),
                    False)) as logger:
            runner.run_with_setup([benchmark], args, logger, None)
        with open(logger.path) as f:
            for line in f:
                # Result line format is 'Benchmark <name>: <result> ms'.
                match = _BENCHMARK_RESULT_RE.match(line)
                if not match or match.group(1) != benchmark:
                    continue
                return match.group(2)
        raise InvalidResultError(benchmark)
示例#14
0
    def test_get_expectations_works_with_named_tests(self):
        sb = scoreboard.Scoreboard(
            'suite', {
                'testPasses': flags.FlagSet(flags.PASS),
                'testFails': flags.FlagSet(flags.FAIL),
                'testTimesOut': flags.FlagSet(flags.TIMEOUT),
                'testFlaky': flags.FlagSet(flags.FLAKY),
            })
        expectations = sb.get_expectations()

        self.assertEquals(4, len(expectations))
        self.assertEquals(scoreboard_constants.EXPECTED_PASS,
                          expectations['testPasses'])
        self.assertEquals(scoreboard_constants.EXPECTED_FAIL,
                          expectations['testFails'])
        self.assertEquals(scoreboard_constants.SKIPPED,
                          expectations['testTimesOut'])
        self.assertEquals(scoreboard_constants.EXPECTED_FLAKE,
                          expectations['testFlaky'])
示例#15
0
 def _make_flaky_suite_configuration(*unused, **kwunused):
   # We must return a new dictionary for every call.
   return {
       'flags': flags.FlagSet(flags.FLAKY),
       'bug': None,
       'metadata': {},
       'deadline': 300,
       'test_order': {},
       'suite_test_expectations': {}
   }
示例#16
0
    def test_flake_restart_pass(self):
        expectations = {'flake': flags.FlagSet(flags.FLAKY)}
        sb = scoreboard.Scoreboard('suite', expectations)

        tests = ['flake']
        self._register_tests(sb, tests)
        sb.start(tests)

        # Fail the test the first time.
        actuals = {'flake': test_method_result.TestMethodResult.FAIL}
        self._update_tests(sb, actuals)
        results = {
            'total': 1,
            'completed': 1,
            'get_flaky_tests': ['flake'],
            'overall_status': scoreboard_constants.EXPECTED_PASS,
        }
        self._check_scoreboard(sb, results)

        # Restart the tests.
        sb.restart(len(tests))
        sb.start(tests)

        # Pass the test the second time.
        actuals = {'flake': test_method_result.TestMethodResult.PASS}
        self._update_tests(sb, actuals)
        results = {
            'total': 2,
            'restarts': 1,
            'completed': 2,
            'passed': 1,
            'expected_passed': 1,
            'get_expected_passing_tests': ['flake'],
            'overall_status': scoreboard_constants.EXPECTED_PASS,
        }
        self._check_scoreboard(sb, results)

        # Verify finalized results.
        sb.finalize()
        results = {
            'total': 2,
            'restarts': 1,
            'completed': 2,
            'passed': 1,
            'expected_passed': 1,
            'get_expected_passing_tests': ['flake'],
            'overall_status': scoreboard_constants.EXPECTED_PASS,
        }
        self._check_scoreboard(sb, results)

        # Note: the number of completed tests cannot be determined by simply
        # adding up the individual results.  This is because flaky tests that
        # failed are neither passing nor failing nor skipped (but they most
        # definitely were completed).
        self.assertNotEqual(sb.completed, sb.passed + sb.failed + sb.skipped)
示例#17
0
    def test_flake_incomplete(self):
        expectations = {'flake': flags.FlagSet(flags.FLAKY)}
        sb = scoreboard.Scoreboard('suite', expectations)

        tests = ['flake']
        self._register_tests(sb, tests)
        sb.start(tests)

        # The test is never run.
        self.assertEquals(self._test_counter[scoreboard_constants.INCOMPLETE],
                          0)
        sb.finalize()
        self.assertEquals(self._test_counter[scoreboard_constants.INCOMPLETE],
                          1)
示例#18
0
    def test_should_run(self):
        # Check the default behavior.
        instance = test_filter.TestRunFilter()
        self.assertTrue(instance.should_run(flags.FlagSet(flags.PASS)))
        self.assertTrue(instance.should_run(flags.FlagSet(flags.FLAKY)))
        self.assertFalse(instance.should_run(flags.FlagSet(flags.FAIL)))
        self.assertFalse(instance.should_run(flags.FlagSet(flags.TIMEOUT)))
        self.assertFalse(
            instance.should_run(flags.FlagSet(flags.NOT_SUPPORTED)))
        self.assertFalse(
            instance.should_run(flags.FlagSet(flags.PASS | flags.LARGE)))

        instance = test_filter.TestRunFilter(include_fail=True)
        self.assertTrue(instance.should_run(flags.FlagSet(flags.FAIL)))

        instance = test_filter.TestRunFilter(include_large=True)
        self.assertTrue(
            instance.should_run(flags.FlagSet(flags.PASS | flags.LARGE)))

        instance = test_filter.TestRunFilter(include_timeout=True)
        self.assertTrue(instance.should_run(flags.FlagSet(flags.TIMEOUT)))
示例#19
0
def _merge(base_expectation, override_expectation, default_expectation):
  if override_expectation:
    return default_expectation.override_with(override_expectation)

  # We choose 'worse' status here. Considering;
  # |base_expectation| should be either |PASS| or |FAIL|, because it is based
  # on the original Android's test config. On the other hand,
  # default_expectation is what we defined for each test suite. Specifically,
  # it's |PASS| by default.
  # It should make sense that a test case, which is marked |FAIL| by base
  # but |PASS| by default, fails.
  # On the other hand, if we annotate NOT_SUPPORTED or TIMEOUT for a test
  # case, we do not want to run (by default) regardless of whether it is
  # marked as PASS or FAIL by base expectation.
  status = max(base_expectation.status, default_expectation.status)
  return flags.FlagSet(status | default_expectation.attribute)
示例#20
0
    def test_flake_restart_fail(self):
        expectations = {'flake': flags.FlagSet(flags.FLAKY)}
        sb = scoreboard.Scoreboard('suite', expectations)

        tests = ['flake']
        self._register_tests(sb, tests)
        sb.start(tests)

        # Fail the test the first time.
        actuals = {'flake': test_method_result.TestMethodResult.FAIL}
        self._update_tests(sb, actuals)
        results = {
            'total': 1,
            'completed': 1,
            'get_flaky_tests': ['flake'],
            'overall_status': scoreboard_constants.EXPECTED_PASS,
        }
        self._check_scoreboard(sb, results)

        # Restart the tests.
        sb.restart(len(tests))
        sb.start(tests)

        # Pass the test the second time.
        actuals = {'flake': test_method_result.TestMethodResult.FAIL}
        self._update_tests(sb, actuals)
        results = {
            'total': 2,
            'restarts': 1,
            'completed': 2,
            'get_flaky_tests': ['flake'],
            'overall_status': scoreboard_constants.EXPECTED_PASS,
        }
        self._check_scoreboard(sb, results)

        # Verify finalized results.
        sb.finalize()
        results = {
            'total': 2,
            'completed': 2,
            'failed': 1,
            'restarts': 1,
            'unexpected_failed': 1,
            'get_unexpected_failing_tests': ['flake'],
            'overall_status': scoreboard_constants.UNEXPECTED_FAIL,
        }
        self._check_scoreboard(sb, results)
示例#21
0
def _read_test_config(path, on_bot, use_gpu, remote_host_type):
    """Reads the file, and eval() it with the test config context."""
    if not os.path.exists(path):
        return {}

    with open(path) as stream:
        content = stream.read()
    test_context = {
        '__builtin__': None,  # Do not inherit the current context.

        # Expectation flags.
        'PASS': flags.FlagSet(flags.PASS),
        'FLAKY': flags.FlagSet(flags.FLAKY),
        'FAIL': flags.FlagSet(flags.FAIL),
        'TIMEOUT': flags.FlagSet(flags.TIMEOUT),
        'NOT_SUPPORTED': flags.FlagSet(flags.NOT_SUPPORTED),
        'LARGE': flags.FlagSet(flags.LARGE),

        # OPTIONS is commonly used for the conditions.
        'OPTIONS': OPTIONS,

        # Variables which can be used to check runtime configurations.
        'ON_BOT': on_bot,
        'USE_GPU': use_gpu,
        'USE_NDK_DIRECT_EXECUTION': build_common.use_ndk_direct_execution(),

        # Platform information of the machine on which the test runs for
        # remote execution. If it is not the remote execution, all variables
        # below will be False.
        'ON_CYGWIN': remote_host_type == 'cygwin',
        'ON_MAC': remote_host_type == 'mac',
        'ON_CHROMEOS': remote_host_type == 'chromeos',
    }

    try:
        raw_config = eval(content, test_context)
    except Exception as e:
        e.args = (e.args[0] + '\neval() failed: ' + path, ) + e.args[1:]
        raise

    try:
        _validate(raw_config)
    except Exception as e:
        e.args = (e.args[0] + '\nValidation failed: ' + path, ) + e.args[1:]
        raise
    return raw_config
示例#22
0
 def __init__(self, name, expect, actual):
     self._name = name
     self._expect = flags.FlagSet(expect)
     self._actual = actual
示例#23
0
    def test_blacklist(self):
        expectations = {
            'alpha': flags.FlagSet(flags.PASS),
            'beta': flags.FlagSet(flags.PASS),
            'gamma': flags.FlagSet(flags.PASS),
        }
        sb = scoreboard.Scoreboard('suite', expectations)
        self._register_tests(sb, ['alpha', 'beta', 'gamma'])

        # Run and pass just the first test.
        sb.start(['alpha', 'beta', 'gamma'])
        actuals = {'alpha': test_method_result.TestMethodResult.PASS}
        self._update_tests(sb, actuals)
        results = {
            'total': 3,
            'completed': 1,
            'incompleted': 2,
            'passed': 1,
            'expected_passed': 1,
            'get_expected_passing_tests': ['alpha'],
            'get_incomplete_tests': ['beta', 'gamma'],
            'overall_status': scoreboard_constants.INCOMPLETE,
        }
        self._check_scoreboard(sb, results)

        # Restart.
        sb.restart(2)
        results = {
            'total': 3,
            'restarts': 1,
            'completed': 1,
            'incompleted': 2,
            'passed': 1,
            'expected_passed': 1,
            'get_expected_passing_tests': ['alpha'],
            'get_incomplete_tests': ['beta', 'gamma'],
            'overall_status': scoreboard_constants.INCOMPLETE,
        }
        self._check_scoreboard(sb, results)

        # Run the remaining two tests.
        sb.start(['beta', 'gamma'])
        actuals = {'beta': test_method_result.TestMethodResult.PASS}
        self._update_tests(sb, actuals)
        results = {
            'total': 3,
            'restarts': 1,
            'completed': 2,
            'incompleted': 1,
            'passed': 2,
            'expected_passed': 2,
            'get_expected_passing_tests': ['alpha', 'beta'],
            'get_incomplete_tests': ['gamma'],
            'overall_status': scoreboard_constants.INCOMPLETE,
        }
        self._check_scoreboard(sb, results)

        # After this restart, 'gamma' will have been incomplete twice, so
        # it should get added to the blacklist.
        sb.restart(1)
        results = {
            'total': 3,
            'restarts': 2,
            'completed': 2,
            'incompleted': 1,
            'passed': 2,
            'expected_passed': 2,
            'get_expected_passing_tests': ['alpha', 'beta'],
            'get_incomplete_tests': ['gamma'],
            'get_incomplete_blacklist': ['gamma'],
            'overall_status': scoreboard_constants.INCOMPLETE,
        }
        self._check_scoreboard(sb, results)

        # Finally run the last test.  Now 'gamma' should no longer be in the
        # blacklist since it ran.
        sb.start(['gamma'])
        actuals = {'gamma': test_method_result.TestMethodResult.PASS}
        self._update_tests(sb, actuals)
        results = {
            'total': 3,
            'completed': 3,
            'passed': 3,
            'expected_passed': 3,
            'restarts': 2,
            'get_expected_passing_tests': ['alpha', 'beta', 'gamma'],
            'overall_status': scoreboard_constants.EXPECTED_PASS,
        }
        self._check_scoreboard(sb, results)

        # Verify all results.
        sb.finalize()
        results = {
            'total': 3,
            'completed': 3,
            'passed': 3,
            'expected_passed': 3,
            'restarts': 2,
            'get_expected_passing_tests': ['alpha', 'beta', 'gamma'],
            'overall_status': scoreboard_constants.EXPECTED_PASS,
        }
        self._check_scoreboard(sb, results)
示例#24
0
 def __init__(self, test_name, **kwargs):
     super(UnittestRunner, self).__init__(
         test_name, {UnittestRunner._TEST_NAME: flags.FlagSet(flags.PASS)},
         **kwargs)
示例#25
0
 def test_simple_failing_test(self):
     result = _evaluate({'flags': flags.FlagSet(flags.FAIL)})
     self.assertEquals(flags.FAIL, result['flags'].status)
示例#26
0
 def test_simple_passing_test(self):
     self.assertEquals(flags.PASS, _evaluate(None)['flags'].status)
     self.assertEquals(flags.PASS, _evaluate({})['flags'].status)
     self.assertEquals(
         flags.PASS,
         _evaluate({'flags': flags.FlagSet(flags.PASS)})['flags'].status)
示例#27
0
    def test_merge_expectation_map(self):
        base_map = {
            'c#test1': flags.FlagSet(flags.PASS),
            'c#test2': flags.FlagSet(flags.FAIL),
        }

        # With no override expectations, the base expectations should be used.
        self.assertEquals(
            {
                'c#test1': flags.FlagSet(flags.PASS),
                'c#test2': flags.FlagSet(flags.FAIL),
            },
            suite_runner_util.merge_expectation_map(base_map, {},
                                                    flags.FlagSet(flags.PASS)))

        # test1 should be overridden to FAIL, test2 should keep the base FAIL.
        self.assertEquals(
            {
                'c#test1': flags.FlagSet(flags.FAIL),
                'c#test2': flags.FlagSet(flags.FAIL),
            },
            suite_runner_util.merge_expectation_map(
                base_map, {'c#test1': flags.FlagSet(flags.FAIL)},
                flags.FlagSet(flags.PASS)))

        # The pure expectation from the default expectation should end up in the
        # output expectation map.
        self.assertEquals(
            {
                'c#test1': flags.FlagSet(flags.FLAKY),
                'c#test2': flags.FlagSet(flags.FAIL),
            },
            suite_runner_util.merge_expectation_map(base_map, {},
                                                    flags.FlagSet(
                                                        flags.FLAKY)))

        # If the default expectation is TIMEOUT, all the tests inside should be too
        # if no other test-level overrides are given
        self.assertEquals(
            {
                'c#test1': flags.FlagSet(flags.FLAKY),
                'c#test2': flags.FlagSet(flags.TIMEOUT),
            },
            suite_runner_util.merge_expectation_map(
                base_map, {'c#test1': flags.FlagSet(flags.FLAKY)},
                flags.FlagSet(flags.TIMEOUT)))

        # A suite level FLAKY flag should cause all tests to be marked FLAKY,
        # regardless of whether the base or override expectation is used.
        self.assertEquals(
            {
                'c#test1': flags.FlagSet(flags.FAIL | flags.LARGE),
                'c#test2': flags.FlagSet(flags.FAIL),
            },
            suite_runner_util.merge_expectation_map(
                base_map, {'c#test1': flags.FlagSet(flags.FAIL | flags.LARGE)},
                flags.FlagSet(flags.FLAKY)))

        # A suite level LARGE flag should cause all tests to be marked LARGE,
        # regardless of whether the base or override expectation is used.
        self.assertEquals(
            {
                'c#test1': flags.FlagSet(flags.PASS | flags.LARGE),
                'c#test2': flags.FlagSet(flags.PASS | flags.LARGE),
            },
            suite_runner_util.merge_expectation_map(
                base_map, {'c#test2': flags.FlagSet(flags.PASS)},
                flags.FlagSet(flags.PASS | flags.LARGE)))

        with self.assertRaises(AssertionError):
            # Raise an exception if suite_expectations contains an unknown test name.
            suite_runner_util.merge_expectation_map(
                base_map, {'c#test3': flags.FlagSet(flags.PASS)},
                flags.FlagSet(flags.PASS))
示例#28
0
    def test_works_as_intended(self):
        runner = self._make_suite_runner('dummy_suite_1')
        self.assertEquals(60, runner.deadline)
        self.assertEquals(
            {
                'Class1#method1': flags.FlagSet(flags.PASS),
                'Class1#method2': flags.FlagSet(flags.PASS),
                'Class2#method1': flags.FlagSet(flags.PASS),
                'Class2#method2': flags.FlagSet(flags.PASS),
            }, runner.expectation_map)
        self.assertEquals(None, runner.bug)

        runner = self._make_suite_runner('dummy_suite_2')
        self.assertEquals(60, runner.deadline)
        self.assertEquals(
            {
                'Class1#method1': flags.FlagSet(flags.PASS),
                'Class1#method2': flags.FlagSet(flags.PASS),
                'Class2#method1': flags.FlagSet(flags.PASS),
                'Class2#method2': flags.FlagSet(flags.PASS),
            }, runner.expectation_map)
        self.assertEquals(None, runner.bug)

        runner = self._make_suite_runner('dummy_suite_3')
        self.assertEquals(60, runner.deadline)
        self.assertEquals(
            {
                'Class1#method1': flags.FlagSet(flags.FAIL),
                'Class1#method2': flags.FlagSet(flags.FAIL),
                'Class2#method1': flags.FlagSet(flags.FAIL),
                'Class2#method2': flags.FlagSet(flags.FAIL),
            }, runner.expectation_map)
        self.assertEquals('crbug.com/123123', runner.bug)

        runner = self._make_suite_runner('dummy_suite_4')
        self.assertEquals(60, runner.deadline)
        self.assertEquals(
            {
                'Class1#method1': flags.FlagSet(flags.LARGE | flags.FAIL),
                'Class1#method2': flags.FlagSet(flags.LARGE | flags.FLAKY),
                'Class2#method1': flags.FlagSet(flags.LARGE | flags.TIMEOUT),
                'Class2#method2': flags.FlagSet(flags.LARGE | flags.PASS),
            }, runner.expectation_map)
        self.assertEquals(None, runner.bug)
        self.assertEquals(['priMethod', 'abcMethod', 'xyzMethod'],
                          runner.apply_test_ordering(
                              ['xyzMethod', 'abcMethod', 'priMethod']))
示例#29
0
def read_test_list(path):
  """Reads a list of test methods from file, and returns an expectation map."""
  with open(path) as stream:
    data = stream.read()
  return dict.fromkeys(data.splitlines(), flags.FlagSet(flags.PASS))
示例#30
0
class SuiteRunConfigIntegrationTests(unittest.TestCase):
    """Uses the module interface as intended."""

    # This is the configuration the tests will use:
    my_config = staticmethod(
        suite_runner_config.make_suite_run_configs(
            lambda: {
                suite_runner_config.SUITE_DEFAULTS: {
                    'flags': flags.FlagSet(flags.PASS),
                    'deadline': 60,
                },
                'dummy_suite_1': None,
                'dummy_suite_2': {},
                'dummy_suite_3': {
                    'flags': flags.FlagSet(flags.FAIL),
                    'bug': 'crbug.com/123123',
                },
                'dummy_suite_4': {
                    'flags':
                    flags.FlagSet(flags.LARGE),
                    'configurations': [{
                        'test_order':
                        collections.OrderedDict([('priMethod', -1)]),
                        'suite_test_expectations': {
                            'Class1': {
                                'method1': flags.FlagSet(flags.FAIL),
                                'method2': flags.FlagSet(flags.FLAKY),
                            },
                            'Class2#method1': flags.FlagSet(flags.TIMEOUT),
                        },
                    }],
                },
            }))

    def setUp(self):
        OPTIONS.parse([])

    def _make_suite_runner(self, name):
        return suite_runner.SuiteRunnerBase(
            name, {
                'Class1#method1': flags.FlagSet(flags.PASS),
                'Class1#method2': flags.FlagSet(flags.PASS),
                'Class2#method1': flags.FlagSet(flags.PASS),
                'Class2#method2': flags.FlagSet(flags.PASS),
            },
            config=SuiteRunConfigIntegrationTests.my_config()[name])

    def test_works_as_intended(self):
        runner = self._make_suite_runner('dummy_suite_1')
        self.assertEquals(60, runner.deadline)
        self.assertEquals(
            {
                'Class1#method1': flags.FlagSet(flags.PASS),
                'Class1#method2': flags.FlagSet(flags.PASS),
                'Class2#method1': flags.FlagSet(flags.PASS),
                'Class2#method2': flags.FlagSet(flags.PASS),
            }, runner.expectation_map)
        self.assertEquals(None, runner.bug)

        runner = self._make_suite_runner('dummy_suite_2')
        self.assertEquals(60, runner.deadline)
        self.assertEquals(
            {
                'Class1#method1': flags.FlagSet(flags.PASS),
                'Class1#method2': flags.FlagSet(flags.PASS),
                'Class2#method1': flags.FlagSet(flags.PASS),
                'Class2#method2': flags.FlagSet(flags.PASS),
            }, runner.expectation_map)
        self.assertEquals(None, runner.bug)

        runner = self._make_suite_runner('dummy_suite_3')
        self.assertEquals(60, runner.deadline)
        self.assertEquals(
            {
                'Class1#method1': flags.FlagSet(flags.FAIL),
                'Class1#method2': flags.FlagSet(flags.FAIL),
                'Class2#method1': flags.FlagSet(flags.FAIL),
                'Class2#method2': flags.FlagSet(flags.FAIL),
            }, runner.expectation_map)
        self.assertEquals('crbug.com/123123', runner.bug)

        runner = self._make_suite_runner('dummy_suite_4')
        self.assertEquals(60, runner.deadline)
        self.assertEquals(
            {
                'Class1#method1': flags.FlagSet(flags.LARGE | flags.FAIL),
                'Class1#method2': flags.FlagSet(flags.LARGE | flags.FLAKY),
                'Class2#method1': flags.FlagSet(flags.LARGE | flags.TIMEOUT),
                'Class2#method2': flags.FlagSet(flags.LARGE | flags.PASS),
            }, runner.expectation_map)
        self.assertEquals(None, runner.bug)
        self.assertEquals(['priMethod', 'abcMethod', 'xyzMethod'],
                          runner.apply_test_ordering(
                              ['xyzMethod', 'abcMethod', 'priMethod']))