コード例 #1
0
def test_init_skipped():
    tc = Case("Skipped-Message-and-Output")
    tc.add_skipped_info("skipped message", "I skipped!")
    ts, tcs = serialize_and_read(Suite("test", [tc]))[0]
    verify_test_case(tcs[0], {"name": "Skipped-Message-and-Output"},
                     skipped_message="skipped message",
                     skipped_output="I skipped!")
コード例 #2
0
ファイル: report.py プロジェクト: tronxd/checkov
    def get_test_suites(self, use_bc_ids=False) -> List[TestSuite]:
        test_cases = defaultdict(list)
        test_suites = []
        records = self.passed_checks + self.failed_checks + self.skipped_checks
        for record in records:
            check_name = f"{record.get_output_id(use_bc_ids)}/{record.check_name}"

            test_name = f"{self.check_type} {check_name} {record.resource}"
            test_case = TestCase(name=test_name,
                                 file=record.file_path,
                                 classname=record.check_class)
            if record.check_result["result"] == CheckResult.FAILED:
                if record.file_path and record.file_line_range:
                    test_case.add_failure_info(
                        f"Resource {record.resource} failed in check {check_name} - {record.file_path}:{record.file_line_range} - Guideline: {record.guideline}"
                    )
                else:
                    test_case.add_failure_info(
                        f"Resource {record.resource} failed in check {check_name}"
                    )
            if record.check_result["result"] == CheckResult.SKIPPED:
                test_case.add_skipped_info(
                    f'Resource {record.resource} skipped in check {check_name} \n Suppress comment: {record.check_result["suppress_comment"]} - Guideline: {record.guideline}'
                )

            test_cases[check_name].append(test_case)
        for key in test_cases.keys():
            test_suites.append(
                TestSuite(
                    name=key,
                    test_cases=test_cases[key],
                    package=test_cases[key][0].classname,
                ))
        return test_suites
コード例 #3
0
def test_init_skipped_output():
    tc = Case("Skipped-Output")
    tc.add_skipped_info(output="I skipped!")
    #pylint: disable=unused-variable
    ts, tcs = serialize_and_read(Suite("test", [tc]))[0]
    verify_test_case(tcs[0], {"name": "Skipped-Output"},
                     skipped_output="I skipped!")
コード例 #4
0
def test_init_utf8():
    tc = Case(
        name="Test äöü",
        classname="some.class.name.äöü",
        elapsed_sec=123.345,
        stdout="I am stdöüt!",
        stderr="I am stdärr!",
    )
    tc.add_skipped_info(message="Skipped äöü", output="I skippäd!")
    tc.add_error_info(message="Skipped error äöü",
                      output="I skippäd with an error!")
    test_suite = Suite("Test UTF-8", [tc])
    #pylint: disable=unused-variable
    ts, tcs = serialize_and_read(test_suite, encoding="utf-8")[0]
    verify_test_case(
        tcs[0],
        {
            "name": decode("Test äöü", "utf-8"),
            "classname": decode("some.class.name.äöü", "utf-8"),
            "time": ("%f" % 123.345),
        },
        stdout=decode("I am stdöüt!", "utf-8"),
        stderr=decode("I am stdärr!", "utf-8"),
        skipped_message=decode("Skipped äöü", "utf-8"),
        skipped_output=decode("I skippäd!", "utf-8"),
        error_message=decode("Skipped error äöü", "utf-8"),
        error_output=decode("I skippäd with an error!", "utf-8"),
    )
コード例 #5
0
def test_init_skipped_message():
    tc = Case("Skipped-Message")
    tc.add_skipped_info("skipped message")
    #pylint: disable=unused-variable
    ts, tcs = serialize_and_read(Suite("test", [tc]))[0]
    verify_test_case(tcs[0], {"name": "Skipped-Message"},
                     skipped_message="skipped message")
コード例 #6
0
ファイル: nmos-test.py プロジェクト: thosil/nmos-testing
def write_test_results(results, args):
    exit_code = ExitCodes.OK
    test_cases = []
    for test_result in results["result"]:
        test_case = TestCase(test_result.name,
                             elapsed_sec=test_result.elapsed_time,
                             timestamp=test_result.timestamp)
        if test_result.name in args.ignore or test_result.state in [
                TestStates.DISABLED, TestStates.UNCLEAR, TestStates.MANUAL,
                TestStates.NA, TestStates.OPTIONAL
        ]:
            test_case.add_skipped_info(test_result.detail)
        elif test_result.state in [TestStates.WARNING, TestStates.FAIL]:
            test_case.add_failure_info(test_result.detail,
                                       failure_type=str(test_result.state))
            if test_result.state == TestStates.FAIL:
                exit_code = max(exit_code, ExitCodes.FAIL)
            elif test_result.state == TestStates.WARNING:
                exit_code = max(exit_code, ExitCodes.WARNING)
        elif test_result.state != TestStates.PASS:
            test_case.add_error_info(test_result.detail,
                                     error_type=str(test_result.state))
        test_cases.append(test_case)

    ts = TestSuite(results["name"] + ": " + results["base_url"], test_cases)
    with open(args.output, "w") as f:
        TestSuite.to_file(f, [ts], prettyprint=False)
        print(" * Test results written to file: {}".format(args.output))
    return exit_code
コード例 #7
0
ファイル: runner.py プロジェクト: BuzzBurrowes/Arduino
 def run_tests(self):
     test_cases = []
     for test in self.tests:
         desc = test['desc']
         name = test['name']
         index = test['id']
         test_case = TestCase(name, self.name)
         if '[.]' in desc:
             print('skipping test "{}"'.format(name))
             test_case.add_skipped_info(message="Skipped test marked with [.]")
         else:
             test_output = StringIO()
             self.sp.logfile = test_output
             t_start = time.time()
             result = self.run_test(index)
             t_stop = time.time()
             self.sp.logfile = None
             test_case.elapsed_sec = t_stop - t_start
             debug_print('test output was:')
             debug_print(test_output.getvalue())
             if result == BSTestRunner.SUCCESS:
                 test_case.stdout = test_output.getvalue()
                 print('test "{}" passed'.format(name))
             else:
                 print('test "{}" failed'.format(name))
                 test_case.add_failure_info('Test failed', output=test_output.getvalue())
             test_output.close()
         test_cases += [test_case];
     return TestSuite(self.name, test_cases)
コード例 #8
0
ファイル: report.py プロジェクト: shaneutt/checkov
    def get_test_suites(self):
        test_cases = {}
        test_suites = []
        records = self.passed_checks + self.failed_checks + self.skipped_checks
        for record in records:
            check_name = record.check_name
            if check_name not in test_cases:
                test_cases[check_name] = []

            test_name = "{} {} {}".format(self.check_type, check_name,
                                          record.resource)
            test_case = TestCase(name=test_name,
                                 file=record.file_path,
                                 classname=record.check_class)
            if record.check_result['result'] == CheckResult.FAILED:
                test_case.add_failure_info(
                    "Resource \"{}\" failed in check \"{}\"".format(
                        record.resource, check_name))
            if record.check_result['result'] == CheckResult.SKIPPED:
                test_case.add_skipped_info(
                    "Resource \"{}\" skipped in check \"{}\"\n Suppress comment: {}"
                    .format(record.resource, check_name,
                            record.check_result['suppress_comment']))
            test_cases[check_name].append(test_case)
        for key in test_cases.keys():
            test_suites.append(
                TestSuite(name=key,
                          test_cases=test_cases[key],
                          package=test_cases[key][0].classname))
        return test_suites
コード例 #9
0
ファイル: test_data.py プロジェクト: pombredanne/ci-tools
    def simple_report(self):
        """empty test report"""
        test_case_1 = TestCase("testcase1", elapsed_sec=1.5)

        test_case_2 = TestCase("testcase2", elapsed_sec=0.5)
        test_case_2.add_skipped_info("was skipped")

        test_case_3 = TestCase("testcase3", elapsed_sec=1.0)
        test_case_3.add_failure_info("failed")

        test_case_4 = TestCase("testcase4", elapsed_sec=0.25)
        test_case_4.add_error_info("errored")

        test_case_5 = TestCase("testcase5", elapsed_sec=0.1)

        test_cases = [
            test_case_1,
            test_case_2,
            test_case_3,
            test_case_4,
            test_case_5
        ]
        test_suites = [
            TestSuite('testsuite1', test_cases, timestamp=datetime.datetime.utcnow())
            ]
        return TestReport(NAME, {"module": test_suites}, BUILD_NUMBER, True)
コード例 #10
0
def test_init_unicode():
    tc = Case(
        name=decode("Test äöü", "utf-8"),
        classname=decode("some.class.name.äöü", "utf-8"),
        elapsed_sec=123.345,
        stdout=decode("I am stdöüt!", "utf-8"),
        stderr=decode("I am stdärr!", "utf-8"),
    )
    tc.add_skipped_info(message=decode("Skipped äöü", "utf-8"),
                        output=decode("I skippäd!", "utf-8"))
    tc.add_error_info(message=decode("Skipped error äöü", "utf-8"),
                      output=decode("I skippäd with an error!", "utf-8"))

    ts, tcs = serialize_and_read(Suite("Test Unicode", [tc]))[0]
    verify_test_case(
        tcs[0],
        {
            "name": decode("Test äöü", "utf-8"),
            "classname": decode("some.class.name.äöü", "utf-8"),
            "time": ("%f" % 123.345),
        },
        stdout=decode("I am stdöüt!", "utf-8"),
        stderr=decode("I am stdärr!", "utf-8"),
        skipped_message=decode("Skipped äöü", "utf-8"),
        skipped_output=decode("I skippäd!", "utf-8"),
        error_message=decode("Skipped error äöü", "utf-8"),
        error_output=decode("I skippäd with an error!", "utf-8"),
    )
コード例 #11
0
def test_multiple_skipped():
    """Tests multiple skipped messages in one test case"""
    tc = Case("Multiple skipped", allow_multiple_subelements=True)
    tc.add_skipped_info("First skipped", "First skipped message")
    (_, tcs) = serialize_and_read(Suite("test", [tc]))[0]
    verify_test_case(tcs[0], {"name": "Multiple skipped"},
                     skipped=[{
                         "message": "First skipped",
                         "output": "First skipped message"
                     }])
    tc.add_skipped_info("Second skipped", "Second skipped message")
    (_, tcs) = serialize_and_read(Suite("test", [tc]))[0]
    verify_test_case(
        tcs[0],
        {"name": "Multiple skipped"},
        skipped=[
            {
                "message": "First skipped",
                "output": "First skipped message"
            },
            {
                "message": "Second skipped",
                "output": "Second skipped message"
            },
        ],
    )
コード例 #12
0
ファイル: test_exporters.py プロジェクト: jaustin/mbed
    def exporter_junit_ioper(self, test_result_ext, test_suite_properties=None):
        from junit_xml import TestSuite, TestCase

        test_suites = []
        test_cases = []

        for platform in sorted(test_result_ext.keys()):
            # {platform : ['Platform', 'Result', 'Scope', 'Description'])
            test_cases = []
            for tr_result in test_result_ext[platform]:
                result, name, scope, description = tr_result

                classname = "test.ioper.%s.%s.%s" % (platform, name, scope)
                elapsed_sec = 0
                _stdout = description
                _stderr = ""
                # Test case
                tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
                # Test case extra failure / error info
                if result == "FAIL":
                    tc.add_failure_info(description, _stdout)
                elif result == "ERROR":
                    tc.add_error_info(description, _stdout)
                elif result == "SKIP" or result == "NOT_SUPPORTED":
                    tc.add_skipped_info(description, _stdout)

                test_cases.append(tc)
            ts = TestSuite("test.suite.ioper.%s" % (platform), test_cases)
            test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
コード例 #13
0
    def print_matches(self, matches, rules=None, filenames=None):
        """Output all the matches"""

        if not rules:
            return None

        test_cases = []
        for rule in rules.all_rules:
            if not rules.is_rule_enabled(rule):
                if not rule.id:
                    continue
                test_case = TestCase(
                    name='{0} {1}'.format(rule.id, rule.shortdesc))

                if rule.experimental:
                    test_case.add_skipped_info(
                        message='Experimental rule - not enabled')
                else:
                    test_case.add_skipped_info(message='Ignored rule')
                test_cases.append(test_case)
            else:
                test_case = TestCase(name='{0} {1}'.format(
                    rule.id, rule.shortdesc),
                                     allow_multiple_subelements=True,
                                     url=rule.source_url)
                for match in matches:
                    if match.rule.id == rule.id:
                        test_case.add_failure_info(
                            message=self._failure_format(match),
                            failure_type=match.message)
                test_cases.append(test_case)

        test_suite = TestSuite('CloudFormation Lint', test_cases)

        return to_xml_report_string([test_suite], prettyprint=True)
コード例 #14
0
 def test_init_skipped_output(self):
     tc = TestCase('Skipped-Output')
     tc.add_skipped_info(output="I skipped!")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Skipped-Output'},
                      skipped_output="I skipped!")
コード例 #15
0
 def test_init_skipped_message(self):
     tc = TestCase('Skipped-Message')
     tc.add_skipped_info("skipped message")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Skipped-Message'},
                      skipped_message="skipped message")
コード例 #16
0
    def test_init_unicode(self):
        tc = TestCase(name=decode('Test äöü', 'utf-8'),
                      classname=decode('some.class.name.äöü', 'utf-8'),
                      elapsed_sec=123.345,
                      stdout=decode('I am stdöüt!', 'utf-8'),
                      stderr=decode('I am stdärr!', 'utf-8'))
        tc.add_skipped_info(message=decode('Skipped äöü', 'utf-8'),
                            output=decode('I skippäd!', 'utf-8'))
        tc.add_error_info(message=decode('Skipped error äöü', 'utf-8'),
                          output=decode('I skippäd with an error!', 'utf-8'))

        (ts, tcs) = serialize_and_read(TestSuite('Test Unicode', [tc]))[0]
        verify_test_case(
            self,
            tcs[0], {
                'name': decode('Test äöü', 'utf-8'),
                'classname': decode('some.class.name.äöü', 'utf-8'),
                'time': ("%f" % 123.345)
            },
            stdout=decode('I am stdöüt!', 'utf-8'),
            stderr=decode('I am stdärr!', 'utf-8'),
            skipped_message=decode('Skipped äöü', 'utf-8'),
            skipped_output=decode('I skippäd!', 'utf-8'),
            error_message=decode('Skipped error äöü', 'utf-8'),
            error_output=decode('I skippäd with an error!', 'utf-8'))
コード例 #17
0
 def test_init_skipped(self):
     tc = TestCase('Skipped-Message-and-Output')
     tc.add_skipped_info("skipped message", "I skipped!")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(
         self, tcs[0], {'name': 'Skipped-Message-and-Output'},
         skipped_message="skipped message", skipped_output="I skipped!")
コード例 #18
0
def main():
    parser = argparse.ArgumentParser(description='dummy test')
    parser.add_argument('-classes', type=int, default=5, help='number of classes')
    parser.add_argument('-testcases', type=int, default=10, help='number of testcases')
    parser.add_argument('-pass_rate', type=int, default=75, help='pass rate')
    parser.add_argument('-error_rate', type=int, default=20, help='error rate')
    parser.add_argument('-failure_rate', type=int, default=10, help='failure rate')
    parser.add_argument('-skip_rate', type=int, default=10, help='skip rate')
    parser.add_argument('-outputfile', type=str, default='test_results.xml', help='output file')
    parser.add_argument('-print', action='store_true', help='print the test results')
    args = parser.parse_args()

    ts = TestSuite(name='my test suite', hostname=platform.node(), timestamp=datetime.now())
    for i in range(args.classes):
        for j in range(args.testcases):
            tc = TestCase(classname=f"myclass{i}",
                          name=f"mytest{j}",
                          elapsed_sec=random.randint(100, 1000),
                          stdout = "stdout output",
                          stderr = "stderr output")
            if random.randint(0, 100) < args.pass_rate:
                if random.randint(0, 100) < args.error_rate:
                    tc.add_error_info(message=f"error {i} {j}", output="error output message", error_type="ERR1")
                elif random.randint(0, 100) < args.failure_rate:
                    tc.add_failure_info(message=f"failure {i} {j}", output="failure output message", failure_type="FAIL1")
                elif random.randint(0, 100) < args.skip_rate:
                    tc.add_skipped_info(message=f"skipped {i} {j}", output="skipped output message")
            ts.test_cases.append(tc)

    # pretty printing is on by default but can be disabled using prettyprint=False
    if args.print:
        print(TestSuite.to_xml_string([ts]))

    with open(args.outputfile, 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=True)
コード例 #19
0
ファイル: test_exporters.py プロジェクト: yugo-ren/mbed
    def exporter_junit_ioper(self,
                             test_result_ext,
                             test_suite_properties=None):
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        for platform in sorted(test_result_ext.keys()):
            # {platform : ['Platform', 'Result', 'Scope', 'Description'])
            test_cases = []
            for tr_result in test_result_ext[platform]:
                result, name, scope, description = tr_result

                classname = 'test.ioper.%s.%s.%s' % (platform, name, scope)
                elapsed_sec = 0
                _stdout = description
                _stderr = ''
                # Test case
                tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
                # Test case extra failure / error info
                if result == 'FAIL':
                    tc.add_failure_info(description, _stdout)
                elif result == 'ERROR':
                    tc.add_error_info(description, _stdout)
                elif result == 'SKIP':
                    tc.add_skipped_info(description, _stdout)

                test_cases.append(tc)
            ts = TestSuite("test.suite.ioper.%s" % (platform), test_cases)
            test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
コード例 #20
0
ファイル: junitxml_utils.py プロジェクト: sam-falvo/tetra
def _gen_cases(n_passes, n_fails, n_skips, n_errors):
    result = []
    for i in range(n_passes):
        case = TestCase(name='TestPassed%s' % i,
                        classname='generated.xml.test.case.passes',
                        elapsed_sec=rand_duration())
        result.append(case)

    for i in range(n_skips):
        case = TestCase(name='TestSkipped%s' % i,
                        classname='generated.xml.test.case.skips',
                        elapsed_sec=rand_duration())
        case.add_skipped_info(message=rand_string('skipped!'))
        result.append(case)

    for i in range(n_fails):
        case = TestCase(name='TestFailed%s' % i,
                        classname='generated.xml.test.case.fails',
                        elapsed_sec=rand_duration())
        case.add_failure_info(message=rand_string('failure!'))
        result.append(case)

    for i in range(n_errors):
        case = TestCase(name='TestErrored%s' % i,
                        classname='generated.xml.test.case.errors',
                        elapsed_sec=rand_duration())
        case.add_error_info(message=rand_string('error!'))
        result.append(case)

    return result
コード例 #21
0
 def run_tests(self):
     test_cases = []
     should_update_env = True
     for test in self.tests:
         desc = test['desc']
         name = test['name']
         index = test['id']
         test_case = TestCase(name, self.name)
         if '[.]' in desc:
             print('skipping test "{}"'.format(name))
             test_case.add_skipped_info(
                 message="Skipped test marked with [.]")
         else:
             test_output = StringIO()
             self.sp.logfile = test_output
             print('running test "{}"'.format(name))
             if should_update_env:
                 res = self.update_env(self.env_vars)
                 if res != BSTestRunner.SUCCESS:
                     print('failed to set environment variables')
                     break
                 res = self.pretest()
                 if res != BSTestRunner.SUCCESS:
                     print('failed to run pretest init')
                     break
                 should_update_env = False
             if name in self.mocks:
                 debug_print('setting up mocks')
                 self.mocks[name]['request_env'] = self.request_env
                 self.mocks[name]['setup']()
                 extra_env = mock_decorators.get_all_envs(name)
                 if extra_env is not None:
                     self.update_env(extra_env)
             t_start = time.time()
             result = self.run_test(index)
             if name in self.mocks:
                 debug_print('tearing down mocks')
                 try:
                     self.mocks[name]['teardown']()
                 except AssertionError:
                     debug_print('teardown assert failure')
                     result = BSTestRunner.FAIL
             t_stop = time.time()
             self.sp.logfile = None
             test_case.elapsed_sec = t_stop - t_start
             debug_print('test output was:')
             debug_print(test_output.getvalue())
             if result == BSTestRunner.SUCCESS:
                 test_case.stdout = filter(lambda c: ord(c) < 128,
                                           test_output.getvalue())
                 print('test "{}" passed'.format(name))
             else:
                 print('test "{}" failed'.format(name))
                 test_case.add_failure_info('Test failed',
                                            output=test_output.getvalue())
                 should_update_env = True
             test_output.close()
         test_cases += [test_case]
     return TestSuite(self.name, test_cases)
コード例 #22
0
def test_init_skipped_err_output():
    tc = Case("Skipped-Output")
    tc.add_skipped_info(output="I skipped!")
    tc.add_error_info(output="I skipped with an error!")
    ts, tcs = serialize_and_read(Suite("test", [tc]))[0]
    verify_test_case(tcs[0], {"name": "Skipped-Output"},
                     skipped_output="I skipped!",
                     error_output="I skipped with an error!")
コード例 #23
0
def format_test_results(results, endpoints, format, args):
    formatted = None
    total_time = 0
    max_name_len = 0
    ignored_tests = []
    if "suite" in vars(args):
        ignored_tests = args.ignore
    for test_result in results["result"]:
        _check_test_result(test_result, results)
        total_time += test_result.elapsed_time
        max_name_len = max(max_name_len, len(test_result.name))
    if format == "json":
        formatted = {
            "suite": results["suite"],
            "timestamp": time.time(),
            "duration": total_time,
            "results": [],
            "config": _export_config(),
            "endpoints": endpoints
        }
        for test_result in results["result"]:
            formatted["results"].append({
                "name": test_result.name,
                "state": str(TestStates.DISABLED if test_result.name in ignored_tests else test_result.state),
                "detail": test_result.detail,
                "duration": test_result.elapsed_time
            })
        formatted = json.dumps(formatted, sort_keys=True, indent=4)
    elif format == "junit":
        test_cases = []
        for test_result in results["result"]:
            test_case = TestCase(test_result.name, classname=results["suite"],
                                 elapsed_sec=test_result.elapsed_time, timestamp=test_result.timestamp)
            if test_result.name in ignored_tests or test_result.state in [
                TestStates.DISABLED,
                TestStates.UNCLEAR,
                TestStates.MANUAL,
                TestStates.NA,
                TestStates.OPTIONAL
            ]:
                test_case.add_skipped_info(test_result.detail)
            elif test_result.state in [TestStates.WARNING, TestStates.FAIL]:
                test_case.add_failure_info(test_result.detail, failure_type=str(test_result.state))
            elif test_result.state != TestStates.PASS:
                test_case.add_error_info(test_result.detail, error_type=str(test_result.state))
            test_cases.append(test_case)
        formatted = TestSuite(results["def"]["name"] + ": " + ", ".join(results["urls"]), test_cases)
    elif format == "console":
        formatted = "\r\nPrinting test results for suite '{}' using API(s) '{}'\r\n" \
                    .format(results["suite"], ", ".join(results["urls"]))
        formatted += "----------------------------\r\n"
        for test_result in results["result"]:
            num_extra_dots = max_name_len - len(test_result.name)
            test_state = str(TestStates.DISABLED if test_result.name in ignored_tests else test_result.state)
            formatted += "{} ...{} {}\r\n".format(test_result.name, ("." * num_extra_dots), test_state)
        formatted += "----------------------------\r\n"
        formatted += "Ran {} tests in ".format(len(results["result"])) + "{0:.3f}s".format(total_time) + "\r\n"
    return formatted
コード例 #24
0
 def test_init_skipped_err_output(self):
     tc = TestCase('Skipped-Output')
     tc.add_skipped_info(output="I skipped!")
     tc.add_error_info(output="I skipped with an error!")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(
         self, tcs[0],
         {'name': 'Skipped-Output'},
         skipped_output="I skipped!",
         error_output="I skipped with an error!")
コード例 #25
0
ファイル: parse_results.py プロジェクト: a-ilango/libfabric
def generate_generic_test_case(name, classname, time, message, result):
    default_pass_message = 'Test passed, check build log for additional details'
    default_skip_message = 'Test skipped, check build log for additional details'
    tc = TestCase(name, classname, time,
                  default_pass_message if result == 'pass' else '')
    if result == 'fail':
        tc.add_failure_info(message=message)
    if result == 'skip':
        tc.add_skipped_info(message=default_skip_message)

    return tc
コード例 #26
0
ファイル: parse_results.py プロジェクト: glycerine/go-mpi
def generate_generic_test_case(name, classname, time, message, result):
    default_pass_message = 'Test passed, check build log for additional details'
    default_skip_message = 'Test skipped, check build log for additional details'
    tc = TestCase(name, classname, time,
                  default_pass_message if result == 'pass' else '')
    if result == 'fail':
        tc.add_failure_info(message=message)
    if result == 'skip':
        tc.add_skipped_info(message=default_skip_message)

    return tc
コード例 #27
0
ファイル: generate.py プロジェクト: dmsimard/ara
    def take_action(self, args):
        test_cases = []
        if args.playbook is not None:
            playbooks = args.playbook
            results = (models.TaskResult().query
                       .join(models.Task)
                       .filter(models.TaskResult.task_id == models.Task.id)
                       .filter(models.Task.playbook_id.in_(playbooks)))
        else:
            results = models.TaskResult().query.all()

        for result in results:
            task_name = result.task.name
            if not task_name:
                task_name = result.task.action
            additional_results = {
                'host': result.host.name,
                'playbook_path': result.task.playbook.path
            }
            result_str = jsonutils.dumps(additional_results)
            test_path = \
                u'{playbook_file}.{play_name}'.format(
                    playbook_file=os.path.basename(result.task.playbook.path),
                    play_name=result.task.play.name)
            test_case = TestCase(
                name=task_name,
                classname=test_path,
                elapsed_sec=result.duration.seconds,
                stdout=result_str)
            if result.status == 'skipped':
                test_case.add_skipped_info(message=result.result)
            elif ((result.status in ('failed', 'unreachable') and
                    result.ignore_errors is False and
                    'EXPECTED FAILURE' not in task_name and
                    'TOGGLE RESULT' not in task_name) or
                    (result.status == 'ok' and 'TOGGLE RESULT' in task_name)):
                test_case.add_failure_info(message=result.result)
            test_cases.append(test_case)
        test_suite = TestSuite('Ansible Tasks', test_cases)

        # TODO: junit_xml doesn't order the TestCase parameters.
        # This makes it so the order of the parameters for the same exact
        # TestCase is not guaranteed to be the same and thus results in a
        # different stdout (or file). This is easily reproducible on Py3.
        xml_string = six.text_type(test_suite.to_xml_string([test_suite]))
        if args.output_file == '-':
            if six.PY2:
                sys.stdout.write(encodeutils.safe_encode(xml_string))
            else:
                sys.stdout.buffer.write(encodeutils.safe_encode(xml_string))
        else:
            with open(args.output_file, 'wb') as f:
                f.write(encodeutils.safe_encode(xml_string))
コード例 #28
0
    def exporter_junit(self, test_result_ext, test_suite_properties=None):
        """ Export test results in JUnit XML compliant format
        """
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        targets = sorted(test_result_ext.keys())
        for target in targets:
            toolchains = sorted(test_result_ext[target].keys())
            for toolchain in toolchains:
                test_cases = []
                tests = sorted(test_result_ext[target][toolchain].keys())
                for test in tests:
                    test_results = test_result_ext[target][toolchain][test]
                    for test_res in test_results:
                        test_ids = sorted(test_res.keys())
                        for test_no in test_ids:
                            test_result = test_res[test_no]
                            name = test_result['description']
                            classname = '%s.%s.%s.%s' % (self.package, target,
                                                         toolchain,
                                                         test_result['id'])
                            elapsed_sec = test_result['elapsed_time']
                            _stdout = test_result['output']

                            if 'target_name_unique' in test_result:
                                _stderr = test_result['target_name_unique']
                            else:
                                _stderr = test_result['target_name']

                            # Test case
                            tc = TestCase(name, classname, elapsed_sec,
                                          _stdout, _stderr)

                            # Test case extra failure / error info
                            message = test_result['result']
                            if test_result['result'] == 'FAIL':
                                tc.add_failure_info(message, _stdout)
                            elif test_result['result'] == 'SKIP' or test_result[
                                    "result"] == 'NOT_SUPPORTED':
                                tc.add_skipped_info(message, _stdout)
                            elif test_result['result'] != 'OK':
                                tc.add_error_info(message, _stdout)

                            test_cases.append(tc)

                ts = TestSuite(
                    "test.suite.%s.%s" % (target, toolchain),
                    test_cases,
                    properties=test_suite_properties[target][toolchain])
                test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
コード例 #29
0
ファイル: runner.py プロジェクト: 4m1g0/Arduino
 def run_tests(self):
     test_cases = []
     should_update_env = True
     for test in self.tests:
         desc = test['desc']
         name = test['name']
         index = test['id']
         test_case = TestCase(name, self.name)
         if '[.]' in desc:
             print('skipping test "{}"'.format(name))
             test_case.add_skipped_info(message="Skipped test marked with [.]")
         else:
             test_output = StringIO()
             self.sp.logfile = test_output
             print('running test "{}"'.format(name))
             if should_update_env:
                 res = self.update_env(self.env_vars)
                 if res != BSTestRunner.SUCCESS:
                     print('failed to set environment variables')
                     break;
                 should_update_env = False
             if name in self.mocks:
                 debug_print('setting up mocks')
                 self.mocks[name]['request_env'] = self.request_env
                 self.mocks[name]['setup']()
                 extra_env = mock_decorators.get_all_envs(name)
                 if extra_env is not None:
                     self.update_env(extra_env)
             t_start = time.time()
             result = self.run_test(index)
             if name in self.mocks:
                 debug_print('tearing down mocks')
                 try:
                     self.mocks[name]['teardown']()
                 except AssertionError:
                     debug_print('teardown assert failure')
                     result = BSTestRunner.FAIL
             t_stop = time.time()
             self.sp.logfile = None
             test_case.elapsed_sec = t_stop - t_start
             debug_print('test output was:')
             debug_print(test_output.getvalue())
             if result == BSTestRunner.SUCCESS:
                 test_case.stdout = test_output.getvalue()
                 print('test "{}" passed'.format(name))
             else:
                 print('test "{}" failed'.format(name))
                 test_case.add_failure_info('Test failed', output=test_output.getvalue())
                 should_update_env = True
             test_output.close()
         test_cases += [test_case];
     return TestSuite(self.name, test_cases)
コード例 #30
0
ファイル: test_exporters.py プロジェクト: jaustin/mbed
    def exporter_junit(self, test_result_ext, test_suite_properties=None):
        """ Export test results in JUnit XML compliant format
        """
        from junit_xml import TestSuite, TestCase

        test_suites = []
        test_cases = []

        targets = sorted(test_result_ext.keys())
        for target in targets:
            toolchains = sorted(test_result_ext[target].keys())
            for toolchain in toolchains:
                test_cases = []
                tests = sorted(test_result_ext[target][toolchain].keys())
                for test in tests:
                    test_results = test_result_ext[target][toolchain][test]
                    for test_res in test_results:
                        test_ids = sorted(test_res.keys())
                        for test_no in test_ids:
                            test_result = test_res[test_no]
                            name = test_result["description"]
                            classname = "%s.%s.%s.%s" % (self.package, target, toolchain, test_result["id"])
                            elapsed_sec = test_result["elapsed_time"]
                            _stdout = test_result["output"]

                            if "target_name_unique" in test_result:
                                _stderr = test_result["target_name_unique"]
                            else:
                                _stderr = test_result["target_name"]

                            # Test case
                            tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)

                            # Test case extra failure / error info
                            message = test_result["result"]
                            if test_result["result"] == "FAIL":
                                tc.add_failure_info(message, _stdout)
                            elif test_result["result"] == "SKIP" or test_result["result"] == "NOT_SUPPORTED":
                                tc.add_skipped_info(message, _stdout)
                            elif test_result["result"] != "OK":
                                tc.add_error_info(message, _stdout)

                            test_cases.append(tc)

                ts = TestSuite(
                    "test.suite.%s.%s" % (target, toolchain),
                    test_cases,
                    properties=test_suite_properties[target][toolchain],
                )
                test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
コード例 #31
0
    def take_action(self, args):
        test_cases = []
        if args.playbook is not None:
            playbooks = args.playbook
            results = (models.TaskResult().query.join(models.Task).filter(
                models.TaskResult.task_id == models.Task.id).filter(
                    models.Task.playbook_id.in_(playbooks)))
        else:
            results = models.TaskResult().query.all()

        for result in results:
            task_name = result.task.name
            if not task_name:
                task_name = result.task.action
            additional_results = {
                'host': result.host.name,
                'playbook_path': result.task.playbook.path
            }
            result_str = jsonutils.dumps(additional_results)
            test_path = \
                u'{playbook_file}.{play_name}'.format(
                    playbook_file=os.path.basename(result.task.playbook.path),
                    play_name=result.task.play.name)
            test_case = TestCase(name=task_name,
                                 classname=test_path,
                                 elapsed_sec=result.duration.seconds,
                                 stdout=result_str)
            if result.status == 'skipped':
                test_case.add_skipped_info(message=result.result)
            elif ((result.status in ('failed', 'unreachable')
                   and result.ignore_errors is False
                   and 'EXPECTED FAILURE' not in task_name
                   and 'TOGGLE RESULT' not in task_name)
                  or (result.status == 'ok' and 'TOGGLE RESULT' in task_name)):
                test_case.add_failure_info(message=result.result)
            test_cases.append(test_case)
        test_suite = TestSuite('Ansible Tasks', test_cases)

        # TODO: junit_xml doesn't order the TestCase parameters.
        # This makes it so the order of the parameters for the same exact
        # TestCase is not guaranteed to be the same and thus results in a
        # different stdout (or file). This is easily reproducible on Py3.
        xml_string = six.text_type(test_suite.to_xml_string([test_suite]))
        if args.output_file == '-':
            if six.PY2:
                sys.stdout.write(encodeutils.safe_encode(xml_string))
            else:
                sys.stdout.buffer.write(encodeutils.safe_encode(xml_string))
        else:
            with open(args.output_file, 'wb') as f:
                f.write(encodeutils.safe_encode(xml_string))
コード例 #32
0
def format_test_results(results, format):
    formatted = None
    if format == "json":
        formatted = {
            "suite": results["suite"],
            "url": results["base_url"],
            "timestamp": time.time(),
            "results": []
        }
        for test_result in results["result"]:
            formatted["results"].append({
                "name": test_result.name,
                "state": str(test_result.state),
                "detail": test_result.detail
            })
        formatted = json.dumps(formatted, sort_keys=True, indent=4)
    elif format == "junit":
        test_cases = []
        for test_result in results["result"]:
            test_case = TestCase(test_result.name,
                                 classname=results["suite"],
                                 elapsed_sec=test_result.elapsed_time,
                                 timestamp=test_result.timestamp)
            if test_result.name in args.ignore or test_result.state in [
                    TestStates.DISABLED, TestStates.UNCLEAR, TestStates.MANUAL,
                    TestStates.NA, TestStates.OPTIONAL
            ]:
                test_case.add_skipped_info(test_result.detail)
            elif test_result.state in [TestStates.WARNING, TestStates.FAIL]:
                test_case.add_failure_info(test_result.detail,
                                           failure_type=str(test_result.state))
            elif test_result.state != TestStates.PASS:
                test_case.add_error_info(test_result.detail,
                                         error_type=str(test_result.state))
            test_cases.append(test_case)
        formatted = TestSuite(
            results["def"]["name"] + ": " + results["base_url"], test_cases)
    elif format == "console":
        formatted = "\r\nPrinting test results for suite '{}' using API '{}'\r\n" \
                        .format(results["suite"], results["base_url"])
        formatted += "----------------------------\r\n"
        total_time = 0
        for test_result in results["result"]:
            formatted += "{} ... {}\r\n".format(test_result.name,
                                                str(test_result.state))
            total_time += test_result.elapsed_time
        formatted += "----------------------------\r\n"
        formatted += "Ran {} tests in ".format(len(
            results["result"])) + "{0:.3f}s".format(total_time) + "\r\n"
    return formatted
コード例 #33
0
 def test_init_utf8(self):
     tc = TestCase('Test äöü', 'some.class.name.äöü', 123.345, 'I am stdöüt!', 'I am stdärr!')
     tc.add_skipped_info(message='Skipped äöü', output="I skippäd!")
     tc.add_error_info(message='Skipped error äöü', output="I skippäd with an error!")
     test_suite = TestSuite('Test UTF-8', [tc])
     (ts, tcs) = serialize_and_read(test_suite, encoding='utf-8')[0]
     verify_test_case(self, tcs[0], {'name': decode('Test äöü', 'utf-8'),
                                     'classname': decode('some.class.name.äöü', 'utf-8'),
                                     'time': ("%f" % 123.345)},
                     stdout=decode('I am stdöüt!', 'utf-8'), stderr=decode('I am stdärr!', 'utf-8'),
                     skipped_message=decode('Skipped äöü', 'utf-8'),
                     skipped_output=decode('I skippäd!', 'utf-8'),
                     error_message=decode('Skipped error äöü', 'utf-8'),
                     error_output=decode('I skippäd with an error!', 'utf-8'))
コード例 #34
0
ファイル: report_utils.py プロジェクト: InbarRose/irtools
def create_junit_results(data, output=None, **kwargs):
    """
    Creates a Junit result, can write to a file if desired, or return xml string. (used by Jenkins)
    input either dict(dict(dict())) or dict(list(dict()))
    dict = {suite: {test: {stderr,stdout,time,class,err,fail,skip}}}
    list = {suite: [(test, {stderr,stdout,time,class,err,fail,skip})]}
    :param data: A dictionary with dict or list hierarchy
    :param output: A filename to write results to  /path/to/file/*.junit.xml
    :return: Returns an XML string if no output, else nothing.
    """
    log.debug('creating junit results: output={}'.format(output))
    stdout_format = kwargs.pop('stdout_format', None)
    test_class = kwargs.pop('test_class', None)
    package = kwargs.pop('package', None)
    from junit_xml import TestSuite, TestCase
    test_suites = []
    for suite, tests in data.items():
        test_cases = []
        for test, result in (tests if isinstance(tests, list) else tests.items()):
            tc = TestCase(test)
            stdout = result.get('stdout')
            if stdout_format is not None and callable(stdout_format):
                if hasattr(stdout_format, 'func_code') and 'kwargs' in stdout_format.func_code.co_varnames:
                    stdout = stdout_format(stdout, suite_name=suite, test_name=test, **kwargs)
                else:
                    stdout = stdout_format(stdout)
            tc.stdout = stdout
            tc.stderr = result.get('stderr')
            tc.elapsed_sec = result.get('time')
            tc.classname = result.get('class', test_class)
            err = result.get('err')
            if err:
                tc.add_error_info(*err if isinstance(err, (list, tuple)) else [err])
            fail = result.get('fail')
            if fail:
                tc.add_failure_info(*fail if isinstance(fail, (list, tuple)) else [fail])
            skip = result.get('skip')
            if skip:
                tc.add_skipped_info(*skip if isinstance(skip, (list, tuple)) else [skip])
            test_cases.append(tc)
        ts = TestSuite(suite, test_cases, package=package)
        test_suites.append(ts)

    if output:
        check_makedir(os.path.dirname(output))
        with open(output, 'w') as out:
            TestSuite.to_file(out, test_suites)
        return output
    else:
        return TestSuite.to_xml_string(test_suites)
コード例 #35
0
ファイル: junit_reporter.py プロジェクト: JahanviB/Zopkio
 def _generate_junit_xml(self, config_name):
     testcases = []
     tests=self.data_source.get_test_results(config_name)
     for test in tests:
         test_time = 0
         if test.func_end_time != None and test.func_start_time != None:
             test_time = test.func_end_time - test.func_start_time
         tc = TestCase(test.name,config_name,test_time, test.description, test.message)
         if 'failed' in test.result:
             tc.add_failure_info(test.result)
         elif 'skipped' in test.result:
             tc.add_skipped_info(test.result)
         testcases.append(tc)
     testsuite = TestSuite(config_name+'_'+self.name, testcases)
     return testsuite
コード例 #36
0
 def _generate_junit_xml(self, config_name):
     testcases = []
     tests = self.data_source.get_test_results(config_name)
     for test in tests:
         test_time = 0
         if test.func_end_time != None and test.func_start_time != None:
             test_time = test.func_end_time - test.func_start_time
         tc = TestCase(test.name, config_name, test_time, test.description,
                       test.message)
         if 'failed' in test.result:
             tc.add_failure_info(test.result)
         elif 'skipped' in test.result:
             tc.add_skipped_info(test.result)
         testcases.append(tc)
     testsuite = TestSuite(config_name + '_' + self.name, testcases)
     return testsuite
コード例 #37
0
    def _build_test_case(self, task_data, host_data):
        """ build a TestCase from the given TaskData and HostData """

        name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
        duration = host_data.finish - task_data.start

        if self._task_relative_path:
            junit_classname = os.path.relpath(task_data.path,
                                              self._task_relative_path)
        else:
            junit_classname = task_data.path

        if self._task_class == 'true':
            junit_classname = re.sub(r'\.yml:[0-9]+$', '', junit_classname)

        if host_data.status == 'included':
            return TestCase(name, junit_classname, duration, host_data.result)

        res = host_data.result._result
        rc = res.get('rc', 0)
        dump = self._dump_results(res, indent=0)
        dump = self._cleanse_string(dump)

        if host_data.status == 'ok':
            return TestCase(name, junit_classname, duration, dump)

        test_case = TestCase(name, junit_classname, duration)

        if host_data.status == 'failed':
            if 'exception' in res:
                message = res['exception'].strip().split('\n')[-1]
                output = res['exception']
                test_case.add_error_info(message, output)
            elif 'msg' in res:
                message = res['msg']
                test_case.add_failure_info(message, dump)
            else:
                test_case.add_failure_info('rc=%s' % rc, dump)
        elif host_data.status == 'skipped':
            if 'skip_reason' in res:
                message = res['skip_reason']
            else:
                message = 'skipped'
            test_case.add_skipped_info(message)

        return test_case
コード例 #38
0
ファイル: test_exporters.py プロジェクト: DanKupiniak/mbed
    def exporter_junit(self, test_result_ext, test_suite_properties=None):
        """ Export test results in JUnit XML compliant format
        """
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        targets = sorted(test_result_ext.keys())
        for target in targets:
            toolchains = sorted(test_result_ext[target].keys())
            for toolchain in toolchains:
                test_cases = []
                tests = sorted(test_result_ext[target][toolchain].keys())
                for test in tests:
                    test_results = test_result_ext[target][toolchain][test]
                    for test_res in test_results:
                        test_ids = sorted(test_res.keys())
                        for test_no in test_ids:
                            test_result = test_res[test_no]
                            name = test_result['description']
                            classname = '%s.%s.%s.%s'% (self.package, target, toolchain, test_result['id'])
                            elapsed_sec = test_result['elapsed_time']
                            _stdout = test_result['output']

                            if 'target_name_unique' in test_result:
                                _stderr = test_result['target_name_unique']
                            else:
                                _stderr = test_result['target_name']

                            # Test case
                            tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)

                            # Test case extra failure / error info
                            message = test_result['result']
                            if test_result['result'] == 'FAIL':
                                tc.add_failure_info(message, _stdout)
                            elif test_result['result'] == 'SKIP' or test_result["result"] == 'NOT_SUPPORTED':
                                tc.add_skipped_info(message, _stdout)
                            elif test_result['result'] != 'OK':
                                tc.add_error_info(message, _stdout)

                            test_cases.append(tc)

                ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain])
                test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
コード例 #39
0
    def test_init_unicode(self):
        tc = TestCase(decode('Test äöü', 'utf-8'), decode('some.class.name.äöü', 'utf-8'), 123.345,
                      decode('I am stdöüt!', 'utf-8'), decode('I am stdärr!', 'utf-8'))
        tc.add_skipped_info(message=decode('Skipped äöü', 'utf-8'),
                            output=decode('I skippäd!', 'utf-8'))
        tc.add_error_info(message=decode('Skipped error äöü', 'utf-8'),
                          output=decode('I skippäd with an error!', 'utf-8'))

        (ts, tcs) = serialize_and_read(TestSuite('Test Unicode',
                                                 [tc]))[0]
        verify_test_case(self, tcs[0], {'name': decode('Test äöü', 'utf-8'),
                                        'classname': decode('some.class.name.äöü', 'utf-8'),
                                        'time': ("%f" % 123.345)},
                        stdout=decode('I am stdöüt!', 'utf-8'), stderr=decode('I am stdärr!', 'utf-8'),
                        skipped_message=decode('Skipped äöü', 'utf-8'),
                        skipped_output=decode('I skippäd!', 'utf-8'),
                        error_message=decode('Skipped error äöü', 'utf-8'),
                        error_output=decode('I skippäd with an error!', 'utf-8'))
コード例 #40
0
def handle(line, inpipe):
    global logged_in
    global test_cases
    print("> {}".format(line), end='', flush=True)
    if not logged_in:
        if line.startswith('Welcome to Buildroot'):
            time.sleep(1)
            print("we got the prompt!")
            send("root\n", inpipe)
            logged_in = True
            time.sleep(5)
            send("cd /usr/lib/uclibc-ng-test/test\n", inpipe)
            send("sh uclibcng-testrunner.sh\n", inpipe)
    else:
        if 'PASS ' in line:
            r = re.match("PASS (.*)", line)
            if r:
                test_name = r.group(1)
                test = TestCase(test_name, '', time.time())
                test_cases.append(test)

        if 'FAIL ' in line:
            r = re.match("FAIL (.*)", line)
            if r:
                test_name = r.group(1)
                test = TestCase(test_name, '', time.time())
                test.add_failure_info(message="FAIL")
                test_cases.append(test)

        if 'SKIP' in line:
            r = re.match("SKIP (.*)", line)
            if r:
                test_name = r.group(1)
                test = TestCase(test_name, '', time.time())
                test.add_skipped_info(message="SKIP")
                test_cases.append(test)

        if 'Total passed:' in line:
            print(
                "uClibc-ng testsuite run is over, writing test results and exiting."
            )
            return False

    return True
コード例 #41
0
ファイル: ComplianceSuite.py プロジェクト: wibraun/OMCompiler
def readTest(f, expectedFailures):
    cl = ".".join(f.split(".")[:-1])
    name = f.split(".")[-2]
    with open(f) as fin:
        try:
            res = simplejson.load(fin)
        except simplejson.errors.JSONDecodeError:
            print("Error loading file %s" % f)
            raise

    expectFail = cl in expectedFailures

    if "killed" in res:
        tc1 = TestCase(name, cl, 0, '', '')
        tc2 = TestCase(name, cl, 0, '', '')
        if expectFail:
            tc1.add_skipped_info('Killed or crashed; expected failure')
        else:
            tc1.add_error_info('Killed or crashed')
        tc2.add_error_info('Killed or crashed')
        return (tc1, tc2, cl)

    tc1 = TestCase(name, cl, res["time"], res["messages"], '')
    tc2 = TestCase(name, cl, res["time"], res["messages"], '')
    success = res["success"]
    shouldPass = res["shouldPass"]
    if expectFail:
        if success:
            tc1.add_error_info(
                'This testcase started working (failure was expected)')
        else:
            tc1.add_skipped_info('This testcase still fails (as expected)')
    elif not success:
        if shouldPass:
            tc1.add_error_info('failed')
        else:
            tc1.add_error_info('expected failure, but passed')
    if not success:
        if shouldPass:
            tc2.add_error_info('failed')
        else:
            tc2.add_error_info('expected failure, but passed')
    return (tc1, tc2, None if success else cl)
コード例 #42
0
ファイル: generate.py プロジェクト: rbramwell/ara-1
    def take_action(self, args):
        test_cases = []
        if args.playbook is not None:
            playbooks = args.playbook
            results = (models.TaskResult().query
                       .join(models.Task)
                       .filter(models.TaskResult.task_id == models.Task.id)
                       .filter(models.Task.playbook_id.in_(playbooks)))
        else:
            results = models.TaskResult().query.all()

        for result in results:
            task_name = result.task.name
            if not task_name:
                task_name = result.task.action
            additional_results = {
                'host': result.host.name,
                'playbook_path': result.task.playbook.path
            }
            result_str = json.dumps(additional_results)
            test_path = \
                "{playbook_file}.{play_name}".format(
                    playbook_file=os.path.basename(result.task.playbook.path),
                    play_name=result.task.play.name)
            test_case = TestCase(
                name=task_name,
                classname=test_path,
                elapsed_sec=result.duration.seconds,
                stdout=result_str)
            if result.status == "skipped":
                test_case.add_skipped_info(message=result.result)
            elif (result.status in ("failed", "unreachable") and
                    result.ignore_errors is False):
                test_case.add_failure_info(message=result.result)
            test_cases.append(test_case)
        test_suite = TestSuite("Ansible Tasks", test_cases)

        xml_string = test_suite.to_xml_string([test_suite])
        if args.output_file == "-":
            sys.stdout.write(xml_string)
        else:
            with open(args.output_file, "w") as f:
                f.write(xml_string)
コード例 #43
0
def readTest(f, expectedFailures):
  cl = ".".join(f.split(".")[:-1])
  name = f.split(".")[-2]
  with open(f) as fin:
    try:
      res = simplejson.load(fin)
    except simplejson.errors.JSONDecodeError:
      print("Error loading file %s" % f)
      raise

  expectFail = cl in expectedFailures

  if "killed" in res:
    tc1 = TestCase(name, cl, 0, '', '')
    tc2 = TestCase(name, cl, 0, '', '')
    if expectFail:
      tc1.add_skipped_info('Killed or crashed; expected failure')
    else:
      tc1.add_error_info('Killed or crashed')
    tc2.add_error_info('Killed or crashed')
    return (tc1, tc2, cl)

  tc1 = TestCase(name, cl, res["time"], res["messages"], '')
  tc2 = TestCase(name, cl, res["time"], res["messages"], '')
  success = res["success"]
  shouldPass = res["shouldPass"]
  if expectFail:
    if success:
      tc1.add_error_info('This testcase started working (failure was expected)')
    else:
      tc1.add_skipped_info('This testcase still fails (as expected)')
  elif not success:
    if shouldPass:
      tc1.add_error_info('failed')
    else:
      tc1.add_error_info('expected failure, but passed')
  if not success:
    if shouldPass:
      tc2.add_error_info('failed')
    else:
      tc2.add_error_info('expected failure, but passed')
  return (tc1, tc2, None if success else cl)
コード例 #44
0
ファイル: junit.py プロジェクト: ernstp/ansible
    def _build_test_case(self, task_data, host_data):
        """ build a TestCase from the given TaskData and HostData """

        name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
        duration = host_data.finish - task_data.start

        if self._task_class == 'true':
            junit_classname = re.sub('\.yml:[0-9]+$', '', task_data.path)
        else:
            junit_classname = task_data.path

        if host_data.status == 'included':
            return TestCase(name, junit_classname, duration, host_data.result)

        res = host_data.result._result
        rc = res.get('rc', 0)
        dump = self._dump_results(res, indent=0)
        dump = self._cleanse_string(dump)

        if host_data.status == 'ok':
            return TestCase(name, junit_classname, duration, dump)

        test_case = TestCase(name, junit_classname, duration)

        if host_data.status == 'failed':
            if 'exception' in res:
                message = res['exception'].strip().split('\n')[-1]
                output = res['exception']
                test_case.add_error_info(message, output)
            elif 'msg' in res:
                message = res['msg']
                test_case.add_failure_info(message, dump)
            else:
                test_case.add_failure_info('rc=%s' % rc, dump)
        elif host_data.status == 'skipped':
            if 'skip_reason' in res:
                message = res['skip_reason']
            else:
                message = 'skipped'
            test_case.add_skipped_info(message)

        return test_case
コード例 #45
0
 def test_init_utf8(self):
     tc = TestCase('Test äöü', 'some.class.name.äöü', 123.345,
                   'I am stdöüt!', 'I am stdärr!')
     tc.add_skipped_info(message='Skipped äöü', output="I skippäd!")
     tc.add_error_info(message='Skipped error äöü',
                       output="I skippäd with an error!")
     test_suite = TestSuite('Test UTF-8', [tc])
     (ts, tcs) = serialize_and_read(test_suite, encoding='utf-8')[0]
     verify_test_case(
         self,
         tcs[0], {
             'name': decode('Test äöü', 'utf-8'),
             'classname': decode('some.class.name.äöü', 'utf-8'),
             'time': ("%f" % 123.345)
         },
         stdout=decode('I am stdöüt!', 'utf-8'),
         stderr=decode('I am stdärr!', 'utf-8'),
         skipped_message=decode('Skipped äöü', 'utf-8'),
         skipped_output=decode('I skippäd!', 'utf-8'),
         error_message=decode('Skipped error äöü', 'utf-8'),
         error_output=decode('I skippäd with an error!', 'utf-8'))
コード例 #46
0
    def test_deploy_openstack_run_tempest(self, underlay, config,
                                          ccpcluster, k8s_actions, rally):
        """Deploy base environment

        Scenario:
        1. Revert snapshot
        2. Install ccp
        3. Deploy environment
        4. Run tempest

        Duration 35 min
        """
        remote = underlay.remote(host=config.k8s.kube_host)
        if settings.REGISTRY == "127.0.0.1:31500":
            k8s_actions.create_registry()
            ccpcluster.build()

        ccpcluster.deploy()
        post_os_deploy_checks.check_jobs_status(k8s_actions.api, timeout=4500)
        post_os_deploy_checks.check_pods_status(k8s_actions.api, timeout=4500)

        # prepare rally
        rally.prepare()
        rally.pull_image()
        rally.run()
        # run tempest
        rally.run_tempest()

        LOG.info('Storing tests results...')
        res_file_name = 'result.json'
        file_prefix = 'results_' + datetime.datetime.now().strftime(
            '%Y%m%d_%H%M%S') + '_'
        file_dst = '{0}/logs/{1}{2}'.format(
            settings.LOGS_DIR, file_prefix, res_file_name)
        remote.download(
            '/home/{0}/rally/{1}'.format(settings.SSH_LOGIN, res_file_name),
            file_dst)
        res = json.load(remote.open('/home/{}/rally/result.json'.format(
            settings.SSH_LOGIN)))
        formatted_tc = []
        failed_cases = [res['test_cases'][case]
                        for case in res['test_cases']
                        if res['test_cases'][case]['status']
                        in 'fail']
        for case in failed_cases:
            if case:
                tc = TestCase(case['name'])
                tc.add_failure_info(case['traceback'])
                formatted_tc.append(tc)

        skipped_cases = [res['test_cases'][case]
                         for case in res['test_cases']
                         if res['test_cases'][case]['status'] in 'skip']
        for case in skipped_cases:
            if case:
                tc = TestCase(case['name'])
                tc.add_skipped_info(case['reason'])
                formatted_tc.append(tc)

        error_cases = [res['test_cases'][case] for case in res['test_cases']
                       if res['test_cases'][case]['status'] in 'error']

        for case in error_cases:
            if case:
                tc = TestCase(case['name'])
                tc.add_error_info(case['traceback'])
                formatted_tc.append(tc)

        success = [res['test_cases'][case] for case in res['test_cases']
                   if res['test_cases'][case]['status'] in 'success']
        for case in success:
            if case:
                tc = TestCase(case['name'])
                formatted_tc.append(tc)

        ts = TestSuite("tempest", formatted_tc)
        with open('tempest.xml', 'w') as f:
            ts.to_file(f, [ts], prettyprint=False)
        fail_msg = 'Tempest verification fails {}'.format(res)
        assert res['failures'] == 0, fail_msg
コード例 #47
0
ファイル: __init__.py プロジェクト: pawelgalazka/flowp
 def add_skipped(self, behavior):
     super().add_skipped(behavior)
     test_case = TestCase(self.get_behaviors_description(behavior))
     test_case.add_skipped_info(message="Skipped")
     self.junit_cases.append(test_case)
コード例 #48
0
ファイル: cli.py プロジェクト: akabos/devpi-builder
    def _log_skip(self, text, package, version):
        logger.debug(text, package, version)

        log_entry = TestCase('{} {}'.format(package, version))
        log_entry.add_skipped_info(text % (package, version))
        self._results.append(log_entry)
コード例 #49
0
def exporter_testcase_junit(test_result_ext, test_suite_properties=None):
    """! Export test results in JUnit XML compliant format
    @param test_result_ext Extended report from Greentea
    @param test_spec Dictionary of test build names to test suite properties
    @details This function will import junit_xml library to perform report conversion
    @return String containing Junit XML formatted test result output
    """
    from junit_xml import TestSuite, TestCase

    test_suites = []

    for target_name in test_result_ext:
        test_results = test_result_ext[target_name]
        for test_suite_name in test_results:
            test = test_results[test_suite_name]

            # tc_elapsed_sec = test['elapsed_time']
            tc_stdout = str() #test['single_test_output']

            try:
                tc_stdout = test['single_test_output'].decode('unicode_escape').encode('ascii','ignore')
            except UnicodeDecodeError as e:
                err_mgs = "(UnicodeDecodeError) exporter_testcase_junit:", str(e)
                tc_stdout = err_mgs
                print err_mgs

            # testcase_result stores info about test case results
            testcase_result = test['testcase_result']
            #   "testcase_result": {
            #       "STRINGS004": {
            #           "duration": 0.009999990463256836,
            #           "time_start": 1453073018.275,
            #           "time_end": 1453073018.285,
            #           "result": 1
            #       },

            test_cases = []

            for tc_name in sorted(testcase_result.keys()):
                duration = testcase_result[tc_name].get('duration', 0.0)
                utest_log = testcase_result[tc_name].get('utest_log', '')
                result_text = testcase_result[tc_name].get('result_text', "UNDEF")

                try:
                    tc_stderr = '\n'.join(utest_log).decode('unicode_escape').encode('ascii','ignore')
                except UnicodeDecodeError as e:
                    err_mgs = "(UnicodeDecodeError) exporter_testcase_junit:" + str(e)
                    tc_stderr = err_mgs
                    print err_mgs

                tc_class = target_name + '.' + test_suite_name

                if result_text == 'SKIPPED':
                    # Skipped test cases do not have logs and we do not want to put
                    # whole log inside JUNIT for skipped test case
                    tc_stderr = str()

                tc = TestCase(tc_name, tc_class, duration, tc_stdout, tc_stderr)

                if result_text == 'FAIL':
                    tc.add_failure_info(result_text)
                elif result_text == 'SKIPPED':
                    tc.add_skipped_info(result_text)
                elif result_text != 'OK':
                    tc.add_error_info(result_text)

                test_cases.append(tc)

            ts_name = target_name
            test_build_properties = test_suite_properties[target_name] if target_name in test_suite_properties else None
            ts = TestSuite(ts_name, test_cases, properties=test_build_properties)
            test_suites.append(ts)

    return TestSuite.to_xml_string(test_suites)
コード例 #50
0
ファイル: __init__.py プロジェクト: pawelgalazka/flowp
 def add_skipped_slow(self, behavior):
     super().add_skipped_slow(behavior)
     test_case = TestCase(self.get_behaviors_description(behavior))
     test_case.add_skipped_info(message="Test ran too slowly and was skipped.")
     self.junit_cases.append(test_case)
コード例 #51
0
ファイル: parallel.py プロジェクト: keitaroyam/cctbx_fork
  def __init__ (self,
                cmd_list,
                nprocs=1,
                out=sys.stdout,
                log=None,
                verbosity=DEFAULT_VERBOSITY,
                output_junit_xml=False) :
    if (log is None) : log = null_out()
    self.out = multi_out()
    self.log = log
    self.out.register("stdout", out)
    self.out.register("log", log)
    self.verbosity = verbosity
    self.quiet = (verbosity == 0)
    self.results = []

    # Filter cmd list for duplicates.
    self.cmd_list = []
    for cmd in cmd_list :
      if (not cmd in self.cmd_list) :
        self.cmd_list.append(cmd)
      else :
        print >> self.out, "Test %s repeated, skipping"%cmd

    # Set number of processors.
    if (nprocs is Auto) :
      nprocs = cpu_count()
    nprocs = min(nprocs, len(self.cmd_list))

    # Starting summary.
    if (self.verbosity > 0) :
      print >> self.out, "Running %d tests on %s processors:"%(len(self.cmd_list), nprocs)
      for cmd in self.cmd_list:
        print >> self.out, "  %s"%cmd
      print >> self.out, ""

    t_start = time.time()
    if nprocs > 1:
      # Run the tests with multiprocessing pool.
      pool = Pool(processes=nprocs)
      for command in self.cmd_list:
        pool.apply_async(
          run_command,
          [command, verbosity, out],
          callback=self.save_result)
      try:
        pool.close()
      except KeyboardInterrupt:
        print >> self.out, "Caught KeyboardInterrupt, terminating"
        pool.terminate()
      finally:
        pool.join()
    else:
      # Run tests serially.
      for command in self.cmd_list:
        rc = run_command(command, verbosity=verbosity, out=out)
        self.save_result(rc)

    # Print ending summary.
    t_end = time.time()
    print >> self.out, "="*80
    print >> self.out, ""
    print >> self.out, "Tests finished. Elapsed time: %.2fs" %(t_end-t_start)
    print >> self.out, ""
    test_cases = []
    # Process results for errors and warnings.
    extra_stderr = len([result for result in self.results if result.stderr_lines])
    longjobs = [result for result in self.results if result.wall_time > MAX_TIME]
    warnings = [result for result in self.results if self.check_alert(result) == 1]
    failures = [result for result in self.results if self.check_alert(result) == 2]
    self.finished = len(self.results)
    self.failure = len(failures)
    self.warning = len(warnings)

    # Output JUnit XML
    if output_junit_xml:
      from junit_xml import TestSuite, TestCase
      import re
      for result in self.results:
        test_name = reconstruct_test_name(result.command)
        output = '\n'.join(result.stdout_lines + result.stderr_lines)
        tc = TestCase(classname=test_name[0],
                      name=test_name[1],
                      elapsed_sec=result.wall_time,
                      stdout='\n'.join(result.stdout_lines),
                      stderr='\n'.join(result.stderr_lines))
        if result.return_code == 0:
          # Identify skipped tests
          if re.search('skip', output, re.IGNORECASE):
            # find first line including word 'skip' and use it as message
            skipline = re.search('^((.*)skip(.*))$', output, re.IGNORECASE | re.MULTILINE).group(1)
            tc.add_skipped_info(skipline)
        else:
          # Test failed. Extract error message and stack trace if possible
          error_message = 'exit code %d' % result.return_code
          error_output = '\n'.join(result.stderr_lines)
          if len(result.stderr_lines):
            error_message = result.stderr_lines[-1]
          if len(result.stderr_lines) > 20:
            error_output = '\n'.join(result.stderr_lines[-20:])
          tc.add_failure_info(message=error_message, output=error_output)
        test_cases.append(tc)
      ts = TestSuite("libtbx.run_tests_parallel", test_cases=test_cases)
      with open('output.xml', 'wb') as f:
        print >> f, TestSuite.to_xml_string([ts], prettyprint=True)

    # Run time distribution.
    if (libtbx.env.has_module("scitbx")) :
      from scitbx.array_family import flex
      print >> self.out, "Distribution of test runtimes:"
      hist = flex.histogram(flex.double([result.wall_time for result in self.results]), n_slots=10)
      hist.show(f=self.out, prefix="  ", format_cutoffs="%.1fs")
      print >> self.out, ""

    # Long job warning.
    if longjobs:
      print >> self.out, ""
      print >> self.out, "Warning: the following jobs took at least %d seconds:"%MAX_TIME
      for result in sorted(longjobs, key=lambda result:result.wall_time):
        print >> self.out, "  %s: %.1fs"%(result.command, result.wall_time)
      print >> self.out, "Please try to reduce overall runtime - consider splitting up these tests."

    # Failures.
    if failures:
      print >> self.out, ""
      print >> self.out, "Error: the following jobs returned non-zero exit codes or suspicious stderr output:"
      print >> self.out, ""
      for result in warnings:
        self.display_result(result, alert=1, out=self.out, log_return=self.out, log_stderr=self.out)
      for result in failures:
        self.display_result(result, alert=2, out=self.out, log_return=self.out, log_stderr=self.out)
      print >> self.out, ""
      print >> self.out, "Please verify these tests manually."
      print >> self.out, ""

    # Summary
    print >> self.out, "Summary:"
    print >> self.out, "  Tests run                    :",self.finished
    print >> self.out, "  Failures                     :",self.failure
    print >> self.out, "  Warnings (possible failures) :",self.warning
    print >> self.out, "  Stderr output (discouraged)  :",extra_stderr
    if (self.finished != len(self.cmd_list)) :
      print >> self.out, "*" * 80
      print >> self.out, "  WARNING: NOT ALL TESTS FINISHED!"
      print >> self.out, "*" * 80
コード例 #52
0
    def run(self):
        # Clean up result file names
        results = []
        for target in self.targets:
            results.append(target.replace('\\', '/'))

        # Dig through each result file, looking for details on pass/fail:
        for result_file in results:
            lines = list(map(lambda line: line.rstrip(), open(result_file, "r").read().split('\n')))
            if len(lines) == 0:
                raise Exception("Empty test result file: %s" % result_file)

            # define an expression for your file reference
            entry_one = Combine(
                oneOf(list(alphas)) + ':/' +
                Word(alphanums + '_-./'))

            entry_two = Word(printables + ' ', excludeChars=':')
            entry = entry_one | entry_two

            delimiter = Literal(':').suppress()
            tc_result_line = Group(entry.setResultsName('tc_file_name') + delimiter + entry.setResultsName(
                'tc_line_nr') + delimiter + entry.setResultsName('tc_name') + delimiter + entry.setResultsName(
                'tc_status') + Optional(
                delimiter + entry.setResultsName('tc_msg'))).setResultsName("tc_line")

            eol = LineEnd().suppress()
            sol = LineStart().suppress()
            blank_line = sol + eol

            tc_summary_line = Group(Word(nums).setResultsName("num_of_tests") + "Tests" + Word(nums).setResultsName(
                "num_of_fail") + "Failures" + Word(nums).setResultsName("num_of_ignore") + "Ignored").setResultsName(
                "tc_summary")
            tc_end_line = Or(Literal("FAIL"), Literal('Ok')).setResultsName("tc_result")

            # run it and see...
            pp1 = tc_result_line | Optional(tc_summary_line | tc_end_line)
            pp1.ignore(blank_line | OneOrMore("-"))

            result = list()
            for l in lines:
                result.append((pp1.parseString(l)).asDict())
            # delete empty results
            result = filter(None, result)

            tc_list = list()
            for r in result:
                if 'tc_line' in r:
                    tmp_tc_line = r['tc_line']

                    # get only the file name which will be used as the classname
                    file_name = tmp_tc_line['tc_file_name'].split('\\').pop().split('/').pop().rsplit('.', 1)[0]
                    tmp_tc = TestCase(name=tmp_tc_line['tc_name'], classname=file_name)
                    if 'tc_status' in tmp_tc_line:
                        if str(tmp_tc_line['tc_status']) == 'IGNORE':
                            if 'tc_msg' in tmp_tc_line:
                                tmp_tc.add_skipped_info(message=tmp_tc_line['tc_msg'],
                                                        output=r'[File]={0}, [Line]={1}'.format(
                                                            tmp_tc_line['tc_file_name'], tmp_tc_line['tc_line_nr']))
                            else:
                                tmp_tc.add_skipped_info(message=" ")
                        elif str(tmp_tc_line['tc_status']) == 'FAIL':
                            if 'tc_msg' in tmp_tc_line:
                                tmp_tc.add_failure_info(message=tmp_tc_line['tc_msg'],
                                                        output=r'[File]={0}, [Line]={1}'.format(
                                                            tmp_tc_line['tc_file_name'], tmp_tc_line['tc_line_nr']))
                            else:
                                tmp_tc.add_failure_info(message=" ")

                    tc_list.append((str(result_file), tmp_tc))

            for k, v in tc_list:
                try:
                    self.test_suites[k].append(v)
                except KeyError:
                    self.test_suites[k] = [v]
        ts = []
        for suite_name in self.test_suites:
            ts.append(TestSuite(suite_name, self.test_suites[suite_name]))

        with open('result.xml', 'w') as f:
            TestSuite.to_file(f, ts, prettyprint='True', encoding='utf-8')

        return self.report