def setup_class(cls): """class fixture.""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res1.tests['bar'] = results.TestResult('fail') res1.tests['oink'] = results.TestResult('crash') res1.tests['bonk'] = results.TestResult('warn') res1.tests['bor'] = results.TestResult('crash') res1.tests['bor'].subtests['1'] = 'pass' res1.tests['bor'].subtests['2'] = 'skip' res1.tests['bor'].subtests['3'] = 'fail' res1.tests['bor'].subtests['4'] = 'pass' res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('fail') res2.tests['bar'] = results.TestResult('pass') res2.tests['oink'] = results.TestResult('crash') res2.tests['tonk'] = results.TestResult('incomplete') res2.tests['bor'] = results.TestResult('crash') res2.tests['bor'].subtests['1'] = 'fail' res2.tests['bor'].subtests['2'] = 'skip' res2.tests['bor'].subtests['3'] = 'pass' res2.tests['bor'].subtests['5'] = 'pass' cls.test = summary.Results([res1, res2])
def test_values(self, index, capsys): """Create both an expected value and an actual value. The expected value makes use of the template, this helps to minimize the number of changes that need to be made if the output is altered. """ names = [ grouptools.join('foo', 'bar', 'oink', 'foobar', 'boink'), 'foo', 'bar' ] template = '{: >20.20} {: >12.12}' expected = console_._SUMMARY_TEMPLATE.format( names=' '.join(['this is a really rea', 'another name']), divider=' '.join(['--------------------', '------------']), pass_=template.format('1', '2'), fail=template.format('2', '0'), crash=template.format('0', '0'), skip=template.format('0', '1'), timeout=template.format('0', '0'), warn=template.format('0', '0'), incomplete=template.format('0', '0'), dmesg_warn=template.format('0', '0'), dmesg_fail=template.format('0', '0'), changes=template.format('0', '2'), fixes=template.format('0', '1'), regressions=template.format('0', '0'), total=template.format('3', '3'), time=template.format('00:01:39', '02:14:05')).split('\n') res1 = results.TestrunResult() res1.name = 'this is a really really really really long name' res1.tests[names[0]] = results.TestResult('pass') res1.tests[names[1]] = results.TestResult('fail') res1.tests[names[2]] = results.TestResult('notrun') res1.tests[names[2]].subtests['1'] = 'fail' res1.time_elapsed = results.TimeAttribute(1509747121.4873962, 1509747220.544042) res1.calculate_group_totals() res2 = results.TestrunResult() res2.name = 'another name' res2.tests[names[0]] = results.TestResult('pass') res2.tests[names[1]] = results.TestResult('pass') res2.tests[names[2]] = results.TestResult('notrun') res2.tests[names[2]].subtests['1'] = 'skip' res2.time_elapsed = results.TimeAttribute(1464820707.4581327, 1464828753.201948) res2.calculate_group_totals() reses = common.Results([res1, res2]) console_._print_summary(reses) actual = capsys.readouterr()[0].splitlines() assert actual[index] == expected[index]
def result(self): """Returns a result object with no subtests.""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('fail') res2.tests['bar'] = results.TestResult('fail') return summary.Results([res1, res2])
def test_Results_get_results(): """summary.Results.get_results: returns list of statuses""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('fail') res = summary.Results([res1, res2]) nt.eq_(res.get_result('foo'), [status.PASS, status.FAIL])
def test_Results_get_results_missing(): """summary.Results.get_results: handles KeyErrors""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res2 = results.TestrunResult() res2.tests['bar'] = results.TestResult('fail') res = summary.Results([res1, res2]) nt.eq_(res.get_result('foo'), [status.PASS, status.NOTRUN])
def test_Results_get_results_missing_subtest(): """summary.Results.get_results (subtest): handles KeyErrors""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res1.tests['foo'].subtests['1'] = 'pass' res2 = results.TestrunResult() res2.tests['bar'] = results.TestResult('fail') res = summary.Results([res1, res2]) nt.eq_(res.get_result(grouptools.join('foo', '1')), [status.PASS, status.NOTRUN])
def subtest(self): """results a Result object with subtests.""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('notrun') res1.tests['foo'].subtests['1'] = 'pass' res1.tests['bar'] = results.TestResult('notrun') res1.tests['bar'].subtests['1'] = 'pass' res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('notrun') res2.tests['foo'].subtests['1'] = 'fail' return summary.Results([res1, res2])
def test_Results_get_results_subtest(): """summary.Results.get_results (subtest): returns list of statuses""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('notrun') res1.tests['foo'].subtests['1'] = 'pass' res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('notrun') res2.tests['foo'].subtests['1'] = 'fail' res = summary.Results([res1, res2]) nt.eq_(res.get_result(grouptools.join('foo', '1')), [status.PASS, status.FAIL])
def setup_class(cls): """Create both an expected value and an actual value. The expected value makes use of the template, this helps to minimize the number of changes that need to be made if the output is altered. """ names = [ grouptools.join('foo', 'bar', 'oink', 'foobar', 'boink'), 'foo', 'bar' ] template = '{: >20.20} {: >6.6}' cls.expected = console_._SUMMARY_TEMPLATE.format( names=' '.join(['this is a really rea', 'a name']), divider=' '.join(['--------------------', '------']), pass_=template.format('1', '2'), fail=template.format('2', '0'), crash=template.format('0', '0'), skip=template.format('0', '1'), timeout=template.format('0', '0'), warn=template.format('0', '0'), incomplete=template.format('0', '0'), dmesg_warn=template.format('0', '0'), dmesg_fail=template.format('0', '0'), changes=template.format('0', '2'), fixes=template.format('0', '1'), regressions=template.format('0', '0'), total=template.format('3', '3')).split('\n') res1 = results.TestrunResult() res1.name = 'this is a really really really really long name' res1.tests[names[0]] = results.TestResult('pass') res1.tests[names[1]] = results.TestResult('fail') res1.tests[names[2]] = results.TestResult('notrun') res1.tests[names[2]].subtests['1'] = 'fail' res1.calculate_group_totals() res2 = results.TestrunResult() res2.name = 'a name' res2.tests[names[0]] = results.TestResult('pass') res2.tests[names[1]] = results.TestResult('pass') res2.tests[names[2]] = results.TestResult('notrun') res2.tests[names[2]].subtests['1'] = 'skip' res2.calculate_group_totals() reses = common.Results([res1, res2]) cls.actual = get_stdout(lambda: console_._print_summary(reses)).split( '\n')
def test_print_result(): """summary.console_._print_result: prints expected values""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('fail') reses = common.Results([res1, res2]) expected = 'foo: pass fail\n' actual = get_stdout(lambda: console_._print_result(reses, reses.names.all)) nt.eq_(expected, actual)
def test_basic(self, capsys): """summary.console_._print_result: prints expected values.""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('fail') reses = common.Results([res1, res2]) expected = 'foo: pass fail\n' console_._print_result(reses, reses.names.all) actual, _ = capsys.readouterr() assert expected == actual
def test_update_results_old(): """ update_results() updates results Because of the design of the our updates (namely that they silently incrementally update from x to y) it's impossible to konw exactly what we'll get at th end without having tests that have to be reworked each time updates are run. Since there already is (at least for v0 -> v1) a fairly comprehensive set of tests, this test only tests that update_results() has been set equal to the CURRENT_JSON_VERSION, (which is one of the effects of runing update_results() with the assumption that there is sufficient other testing of the update process. """ data = utils.JSON_DATA.copy() data['results_version'] = 0 with utils.tempdir() as d: with open(os.path.join(d, 'main'), 'w') as f: json.dump(data, f) with open(os.path.join(d, 'main'), 'r') as f: base = results.TestrunResult(f) res = results.update_results(base, f.name) nt.assert_equal(res.results_version, results.CURRENT_JSON_VERSION)
def test_find_diffs(): """summary.find_diffs: calculates correct set of diffs""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res1.tests['bar'] = results.TestResult('fail') res1.tests['oink'] = results.TestResult('crash') res1.tests['bonk'] = results.TestResult('warn') res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('fail') res2.tests['bar'] = results.TestResult('pass') res2.tests['oink'] = results.TestResult('crash') diffs = summary.find_diffs([res1, res2], {'foo', 'bar', 'oink', 'bonk'}, lambda x, y: x != y) nt.eq_(diffs, [{'foo', 'bar'}])
def _resume(results_dir): """Loads a partially completed json results directory.""" # Pylint can't infer that the json being loaded is a dict # pylint: disable=maybe-no-member assert os.path.isdir(results_dir), \ "TestrunResult.resume() requires a directory" # Load the metadata with open(os.path.join(results_dir, 'metadata.json'), 'r') as f: meta = json.load(f) assert meta['results_version'] == CURRENT_JSON_VERSION, \ "Old results version, resume impossible" testrun = results.TestrunResult() testrun.name = meta['name'] testrun.options = meta['options'] testrun.uname = meta.get('uname') testrun.glxinfo = meta.get('glxinfo') testrun.lspci = meta.get('lspci') # Load all of the test names and added them to the test list for file_ in os.listdir(os.path.join(results_dir, 'tests')): with open(os.path.join(results_dir, 'tests', file_), 'r') as f: try: testrun.tests.update(json.load(f, object_hook=piglit_decoder)) except ValueError: continue return testrun
def test_find_single(): """summary.find_singlek: filters appropriately""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res1.tests['bar'] = results.TestResult('fail') res1.tests['oink'] = results.TestResult('crash') res1.tests['bonk'] = results.TestResult('warn') res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('fail') res2.tests['bar'] = results.TestResult('pass') res2.tests['oink'] = results.TestResult('crash') diffs = summary.find_single([res1, res2], {'foo', 'bar', 'oink', 'bonk'}, lambda x: x > status.PASS) nt.eq_(diffs, [{'oink', 'bonk', 'bar'}, {'foo', 'oink'}])
def setup_class(cls): res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res1.tests['bar'] = results.TestResult('fail') res1.tests['oink'] = results.TestResult('crash') res1.tests['bonk'] = results.TestResult('warn') res1.tests['bor'] = results.TestResult('skip') res1.tests['red'] = results.TestResult('skip') res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('fail') res2.tests['bar'] = results.TestResult('pass') res2.tests['oink'] = results.TestResult('crash') res2.tests['tonk'] = results.TestResult('incomplete') res2.tests['red'] = results.TestResult('pass') cls.test = summary.Results([res1, res2])
def setup_class(cls): res = results.TestrunResult() res.tests['foo'] = results.TestResult('fail') res.tests['bar'] = results.TestResult('pass') res.tests['oink'] = results.TestResult('skip') res.tests['tonk'] = results.TestResult('incomplete') cls.test = summary.Results([res])
def test(self): """Set some values to be used by all tests.""" res1 = results.TestrunResult() res1.tests['foo'] = results.TestResult('pass') res1.tests['bar'] = results.TestResult('fail') res1.tests['oink'] = results.TestResult('crash') res1.tests['bonk'] = results.TestResult('warn') res1.tests['bor'] = results.TestResult('skip') res1.tests['red'] = results.TestResult('skip') res2 = results.TestrunResult() res2.tests['foo'] = results.TestResult('fail') res2.tests['bar'] = results.TestResult('pass') res2.tests['oink'] = results.TestResult('crash') res2.tests['tonk'] = results.TestResult('incomplete') res2.tests['red'] = results.TestResult('pass') return summary.Results([res1, res2])
def setup_class(cls): tr = results.TestResult('crash') tr.subtests['foo'] = status.PASS run = results.TestrunResult() run.tests['sub'] = tr run.tests['test'] = results.TestResult('pass') run.calculate_group_totals() cls.inst = run
def test_print_result_replaces(): """summary.console_._print_result: Replaces separtaor with /""" res1 = results.TestrunResult() res1.tests[grouptools.join('foo', 'bar')] = results.TestResult('pass') reses = common.Results([res1]) expected = 'foo/bar: pass\n' actual = get_stdout(lambda: console_._print_result(reses, reses.names.all)) nt.eq_(expected, actual)
def _load(results_file): """Load a json results instance and return a TestrunResult. This function converts an existing, fully completed json run. """ result = results.TestrunResult() result.results_vesrion = 0 # This should get overwritten result.__dict__.update(json.load(results_file, object_hook=piglit_decoder)) return result
def setup_class(cls): tr = results.TestResult('crash') tr.subtests['foo'] = status.PASS tr.subtests['bar'] = status.CRASH tr.subtests['oink'] = status.FAIL run = results.TestrunResult() run.tests[grouptools.join('sub', 'test')] = tr run.calculate_group_totals() cls.test = run.totals
def test_replaces_separator(self, capsys): """summary.console_._print_result: Replaces separator with /.""" res1 = results.TestrunResult() res1.tests[grouptools.join('foo', 'bar')] = results.TestResult('pass') reses = common.Results([res1]) expected = 'foo/bar: pass\n' console_._print_result(reses, reses.names.all) actual, _ = capsys.readouterr() assert expected == actual
def setup_class(cls): test = results.TestrunResult() test.name = 'name' test.uname = 'this is uname' test.options = {'some': 'option'} test.glxinfo = 'glxinfo' test.clinfo = 'clinfo' test.wglinfo = 'wglinfo' test.lspci = 'this is lspci' test.time_elapsed.end = 1.23 test.tests = {'a test': results.TestResult('pass')} cls.test = test.to_json()
def setup_class(cls): """class fixture.""" res = results.TestrunResult() res.tests['foo'] = results.TestResult('pass') res.tests['bar'] = results.TestResult('fail') res.tests['oink'] = results.TestResult('crash') res.tests['bonk'] = results.TestResult('warn') res.tests['bor'] = results.TestResult('crash') res.tests['bor'].subtests['1'] = 'pass' res.tests['bor'].subtests['2'] = 'skip' res.tests['bor'].subtests['3'] = 'fail' res.tests['bor'].subtests['4'] = 'pass' cls.results = res
def feature(self, tmpdir_factory): p = tmpdir_factory.mktemp('feature').join('p') # each write to p will replace the contents, so using json.dump which # makes a number of small writes will fail to produce anything useful. p.write(json.dumps(DATA)) result = results.TestrunResult() for n, s in PROFILE.test_list.items(): result.tests[n] = s.result result.options['profile'] = [None] result.name = 'foo' with mock.patch('framework.summary.feature.profile.load_test_profile', mock.Mock(return_value=PROFILE)): return feature.FeatResults([result], str(p))
def test_update_results_current(): """ update_results() returns early when the results_version is current """ data = utils.JSON_DATA.copy() data['results_version'] = results.CURRENT_JSON_VERSION with utils.tempdir() as d: with open(os.path.join(d, 'main'), 'w') as f: json.dump(data, f) with open(os.path.join(d, 'main'), 'r') as f: base = results.TestrunResult(f) res = results.update_results(base, f.name) nt.assert_dict_equal(res.__dict__, base.__dict__)
def setup_class(cls): pass_ = results.TestResult('pass') fail = results.TestResult('fail') #warn = results.TestResult('warn') crash = results.TestResult('crash') skip = results.TestResult('skip') tr = results.TestrunResult() tr.tests = { 'oink': pass_, grouptools.join('foo', 'bar'): fail, grouptools.join('foo', 'foo', 'bar'): crash, grouptools.join('foo', 'foo', 'oink'): skip, } tr.calculate_group_totals() cls.test = tr.totals
def _load(results_file): """Load a junit results instance and return a TestrunResult. It's worth noting that junit is not as descriptive as piglit's own json format, so some data structures will be empty compared to json. This tries to not make too many assumptions about the strucuter of the JUnit document. """ run_result = results.TestrunResult() splitpath = os.path.splitext(results_file)[0].split(os.path.sep) if splitpath[-1] != 'results': run_result.name = splitpath[-1] elif len(splitpath) > 1: run_result.name = splitpath[-2] else: run_result.name = 'junit result' tree = etree.parse(results_file).getroot().find('.//testsuite[@name="piglit"]') for test in tree.iterfind('testcase'): result = results.TestResult() # Take the class name minus the 'piglit.' element, replace junit's '.' # separator with piglit's separator, and join the group and test names name = test.attrib['classname'].split('.', 1)[1] name = name.replace('.', grouptools.SEPARATOR) name = grouptools.join(name, test.attrib['name']) # Remove the trailing _ if they were added (such as to api and search) if name.endswith('_'): name = name[:-1] result['result'] = status.status_lookup(test.attrib['status']) result['time'] = float(test.attrib['time']) result['err'] = test.find('system-err').text # The command is prepended to system-out, so we need to separate those # into two separate elements out = test.find('system-out').text.split('\n') result['command'] = out[0] result['out'] = '\n'.join(out[1:]) run_result.tests[name] = result return run_result
def setup_class(cls): """Setup values used by all tests.""" test = results.TestrunResult() test.info = { 'system': { 'uname': 'this is uname', 'glxinfo': 'glxinfo', 'clinfo': 'clinfo', 'wglinfo': 'wglinfo', 'lspci': 'this is lspci', } } test.name = 'name' test.options = {'some': 'option'} test.time_elapsed.end = 1.23 test.tests = {'a test': results.TestResult('pass')} cls.test = test.to_json()