예제 #1
0
def output_run_results_junit_xml(passing_tests: List[TestRunResult],
                                 failing_tests: List[TestRunResult],
                                 junit_dest: TextIO,
                                 junit_merged_dest: TextIO):
    '''Write results to JUnit XML

    Two versions are produced: a normal version and a merged version. In the
    normal version there is a test suite per unique test name with a different
    test case per seed run. In the merged version there is a single test case
    under the test suite with information for the individual runs merged
    together. This is to aid use of the Azure Pipelines JUnit dashboard, which
    doesn't neatly handle the test suite/test case hierarchy
    '''

    all_tests = passing_tests + failing_tests

    test_suite_info = {}
    for trr in all_tests:
        # test_case_info contains a tuple per unique test name. The first
        # element is a list of junit_xml.TestCase, one per test run with that
        # name. The other merges together all of the test outputs to produce
        # the merged output.
        unmerged, merged = \
            test_suite_info.setdefault(trr.name, ([], {'stdout': '',
                                                       'failures': ''}))
        result_text = gen_test_run_result_text(trr)

        # Create a test case for the TestRunResult. stdout holds the text
        # describing the run. Add the same text to failures if the test failed.
        test_case = junit_xml.TestCase(f'{trr.name}.{trr.seed}')
        test_case.stdout = result_text

        merged['stdout'] += result_text + '\n'

        if not trr.passed:
            test_case.add_failure_info(output=result_text)
            merged['failures'] += result_text

        unmerged.append(test_case)

    # Output the normal JUnit XML
    test_suites = [
        junit_xml.TestSuite(name, test_cases)
        for name, (test_cases, _) in test_suite_info.items()
    ]

    junit_dest.write(junit_xml.to_xml_report_string(test_suites))

    # Output the merged version of the JUnit XML
    merged_test_suites = []

    for name, (_, merged_test_info) in test_suite_info.items():
        test_case = junit_xml.TestCase(name)
        test_case.stdout = merged_test_info['stdout']
        test_case.add_failure_info(output=merged_test_info['failures'])

        merged_test_suites.append(junit_xml.TestSuite(name, [test_case]))

    junit_merged_dest.write(junit_xml.to_xml_report_string(merged_test_suites))
예제 #2
0
 def create_test_case(self, test):
     # type: (Dict[Text, Any]) -> junit_xml.TestCase
     doc = test.get(u'doc', 'N/A').strip()
     return junit_xml.TestCase(doc,
                               elapsed_sec=self.duration,
                               stdout=self.standard_output,
                               stderr=self.error_output)
예제 #3
0
def format_baseline_for_junit_xml(baseline):
    """
    :type baseline: dict
    :rtype: str
    """
    all_secrets = {}

    for filename, secret_list in baseline['results'].items():
        for secret in secret_list:
            test_case = junit_xml.TestCase(
                name="{}:{}".format(filename, secret["line_number"]))
            test_case.add_failure_info(
                message="Found secret of type {} on line {} in file {}".format(
                    secret["type"], secret["line_number"], filename),
                failure_type=secret["type"])
            if secret["type"] in all_secrets:
                all_secrets[secret["type"]].append(test_case)
            else:
                all_secrets[secret["type"]] = [test_case]

    test_suits = map(
        lambda secret: junit_xml.TestSuite(name=secret[0],
                                           test_cases=secret[1]),
        all_secrets.items())

    return junit_xml.to_xml_report_string(test_suits)
예제 #4
0
def create_junit_output(tests):
    """ Creates an output file for generating a junit test report

    args:
        tests: All tests completed + skipped

    returns:
        boolean: False if file generation failed - NOT YET
    """

    # Populate junit objects for all performed tests
    junit_tests = []

    # Integration tests
    for test in tests:
        junit_object = jx.TestCase(test.name_, classname="IncludeOS.{}.{}".format(test.type_, test.category_))

        # If test is skipped add skipped info
        if test.skip_:
            junit_object.add_skipped_info(message = test.skip_reason_, output = test.skip_reason_)
        elif test.proc_.returncode is not 0:
            junit_object.add_failure_info(output = test.output_[0])
        else:
            junit_object.stdout = test.output_[0]
            junit_object.stderr = test.output_[1]

        # Add to list of all test objects
        junit_tests.append(junit_object)

    # Stress and misc tests
    ts = jx.TestSuite("IncludeOS tests", junit_tests)
    with open('output.xml', 'w') as f:
            jx.TestSuite.to_file(f, [ts], prettyprint=False)
예제 #5
0
    def to_junit(
        self,
        suite_name="all_tests",
    ):
        """
        Convert the tests to JUnit XML.

        Returns a junit_xml.TestSuite containing all of the test cases. One test suite will be
        generated with the name given in suite_name. Unity Fixture test groups are mapped to the
        classname attribute of test cases; for basic Unity output there will be one class named
        "default".

        Optional arguments:
        suite_name -- The name to use for the "name" and "package" attributes of the testsuite element.

        Sample output:
        <testsuite disabled="0" errors="0" failures="1" name="[suite_name]" package="[suite_name]" skipped="0" tests="8" time="0">
            <testcase classname="test_group_1" name="group_1_test" />
            <testcase classname="test_group_2" name="group_2_test" />
        </testsuite>
        """
        test_case_list = []

        for test in self._tests:
            if test.result() == "PASS":
                test_case_list.append(
                    junit_xml.TestCase(name=test.name(),
                                       classname=test.group()))
            else:
                junit_tc = junit_xml.TestCase(
                    name=test.name(),
                    classname=test.group(),
                    file=test.file(),
                    line=test.line(),
                )
                if test.result() == "FAIL":
                    junit_tc.add_failure_info(message=test.message(),
                                              output=test.full_line())
                elif test.result() == "IGNORE":
                    junit_tc.add_skipped_info(message=test.message(),
                                              output=test.full_line())
                test_case_list.append(junit_tc)

        return junit_xml.TestSuite(name=suite_name,
                                   package=suite_name,
                                   test_cases=test_case_list)
예제 #6
0
파일: report.py 프로젝트: spoorthik/tcf
    def _mkreport_junit(self, _tc, kws, header, output, tag_info,
                        reproduction):

        for hook in self.junit_hooks:
            hook(self, _tc, kws, output)
        jtc = junit_xml.TestCase(self.junit_name % kws,
                                 classname=self.junit_classname % kws,
                                 elapsed_sec=123.456,
                                 stdout=header + tag_info + reproduction,
                                 stderr=None)

        # FIXME: nail down the exception
        # <error/failure/blockage/skipped/or cause to put that only in
        # the messagee and let's put the whole output always in
        # stdout, with the rest of the info on stderr
        msg_tag = kws['msg_tag']
        if msg_tag == "FAIL":
            jtc.add_failure_info(message="Failed", output=output)
        elif msg_tag == "ERRR":
            jtc.add_error_info(message="Error", output=output)
        elif msg_tag == "BLCK":
            jtc.add_error_info(message="Infrastructure", output=output)
        elif msg_tag == "SKIP":
            if self.junit_report_skip:
                jtc.add_skipped_info(message="Skipped", output=output)
            else:
                jtc.add_skipped_info(message="Skipped")
                jtc.stdout = None
                jtc.stderr = None
        elif msg_tag == "PASS":
            if self.junit_report_pass:
                jtc.stderr = output
            elif self.junit_report_pass == None:
                # we don  want *anything*
                jtc.stderr = None
                jtc.stdout = None
            else:  # False
                jtc.stderr = "<inclusion of output disabled by " \
                             "configuration setting of " \
                             "tcfl.report.junit_report_pass>"
                jtc.stdout = "<inclusion of output disabled by " \
                             "configuration setting of " \
                             "tcfl.report.junit_report_pass>"

        # Write the JUNIT to a pickle file, as we'll join it later
        # with the rest in _finalize. We can't put it in a
        # global because this testcase might be running in a separate
        # thread or process.  later == when the global testcase
        # reporter (tcfl.tc.tc_global) emits a COMPLETION message,
        # then we call _finalize()
        domain = commonl.file_name_make_safe(self.junit_domain % kws)
        # use the core keywords, so it is not modified
        tc_hash = _tc.kws['tc_hash']
        # Note we store it in the common
        pathname = os.path.join(tcfl.tc.tc_c.tmpdir, "junit", domain)
        commonl.makedirs_p(pathname)
        with open(os.path.join(pathname, tc_hash + ".pickle"), "w") as picklef:
            cPickle.dump(jtc, picklef, protocol=2)
예제 #7
0
 def create_test_case(self, test):
     # type: (Dict[Text, Any]) -> junit_xml.TestCase
     doc = test.get(u'doc', 'N/A').strip()
     case = junit_xml.TestCase(doc,
                               elapsed_sec=self.duration,
                               stdout=self.standard_output,
                               stderr=self.error_output)
     if self.return_code > 0:
         case.failure_message = self.message
예제 #8
0
    def handle_composition(self, args: argparse.Namespace,
                           composition: mzcompose.Composition) -> None:
        if args.workflow not in composition.workflows:
            # Restart any dependencies whose definitions have changed. This is
            # Docker Compose's default behavior for `up`, but not for `run`,
            # which is a constant irritation that we paper over here. The trick,
            # taken from Buildkite's Docker Compose plugin, is to run an `up`
            # command that requests zero instances of the requested service.
            if args.workflow:
                composition.invoke(
                    "up",
                    "-d",
                    "--scale",
                    f"{args.workflow}=0",
                    args.workflow,
                )
            super().handle_composition(args, composition)
        else:
            # The user has specified a workflow rather than a service. Run the
            # workflow instead of Docker Compose.
            if args.unknown_args:
                bad_arg = args.unknown_args[0]
            elif args.unknown_subargs[0].startswith("-"):
                bad_arg = args.unknown_subargs[0]
            else:
                bad_arg = None
            if bad_arg:
                raise UIError(
                    f"unknown option {bad_arg!r}",
                    hint=f"if {bad_arg!r} is a valid Docker Compose option, "
                    f"it can't be used when running {args.workflow!r}, because {args.workflow!r} "
                    "is a custom mzcompose workflow, not a Docker Compose service",
                )

            # Run the workflow inside of a test case so that we get some basic
            # test analytics, even if the workflow doesn't define more granular
            # test cases.
            with composition.test_case(f"workflow-{args.workflow}"):
                composition.workflow(args.workflow, *args.unknown_subargs[1:])

            # Upload test report to Buildkite Test Analytics.
            junit_suite = junit_xml.TestSuite(composition.name)
            for (name, result) in composition.test_results.items():
                test_case = junit_xml.TestCase(name, composition.name,
                                               result.duration)
                if result.error:
                    test_case.add_error_info(message=result.error)
                junit_suite.test_cases.append(test_case)
            junit_report = ci_util.junit_report_filename("mzcompose")
            with junit_report.open("w") as f:
                junit_xml.to_xml_report_file(f, [junit_suite])
            ci_util.upload_junit_report("mzcompose", junit_report)

            if any(result.error
                   for result in composition.test_results.values()):
                raise UIError("at least one test case failed")
예제 #9
0
def testcase_to_junit(
    toplevel: str,
    i: int,
    cls_path: str,
    testcase: TestcaseExecution,
    is_toplevel: bool = False,
) -> typing.Tuple[int, typing.List[junit_xml.TestCase]]:
    """Convert a testcase to junit testcases."""
    testcases = []
    my_cls_path = cls_path if is_toplevel else f"{cls_path} -> {testcase.name}"
    tc = junit_xml.TestCase(
        f"99999 - Summary {testcase.name}",
        classname=f"{toplevel}.{i:05} - {my_cls_path}",
    )
    if not testcase.success:
        if testcase.exc is not None:
            tc.add_error_info(f'Testcase failed with "{testcase.exc}"',
                              testcase.trace)
        else:
            tc.add_error_info(
                f"Testcase failed because of sub testcase failure")
    testcases.append(tc)
    old_i = i
    i += 1
    for step_id, step in enumerate(testcase.sub_steps):
        if isinstance(step, TestcaseExecution):
            tc = junit_xml.TestCase(
                f"{step_id:05} - Testcase: {step.name}",
                classname=f"{toplevel}.{old_i:05} - {my_cls_path}",
            )
            testcases.append(tc)
            i_new, testcases_new = testcase_to_junit(toplevel, i, my_cls_path,
                                                     step)
            i = i_new
            testcases += testcases_new
        elif isinstance(step, ShellStep):
            tc = junit_xml.TestCase(
                f"{step_id:05} - Shell: {step.command}",
                classname=f"{toplevel}.{old_i:05} - {my_cls_path}",
                stdout=step.output,
            )
            testcases.append(tc)
    return i, testcases
예제 #10
0
def emit_junit_xml(cluster_launch_attempts, frameworks):
    """Write out all the test actions failures to a junit file for jenkins or
    similar"""
    if not ok():
        return
    import junit_xml
    launch_fake_testcases = []
    for launch_attempt in cluster_launch_attempts:
        attempt_duration = launch_attempt.end_time - launch_attempt.start_time
        fake_test = junit_xml.TestCase(launch_attempt.name,
                                       elapsed_sec=attempt_duration)
        if launch_attempt.launch_succeeded:
            fake_test.stdout = "Launch worked"
        else:
            fake_test.add_failure_info("Launch failed")
        launch_fake_testcases.append(fake_test)

    launch_suite = junit_xml.TestSuite("Cluster launches",
                                       launch_fake_testcases)

    fake_suites = []
    fake_suites.append(launch_suite)
    for framework in frameworks:
        framework_testcases = []
        for action_name, action in framework.actions.items():
            action_duration = action['finish'] - action['start']
            fake_test = junit_xml.TestCase(action_name,
                                           elapsed_sec=action_duration,
                                           stdout=action['stdout'],
                                           stderr=action['stderr'])
            if not action['ok']:
                message = action['error_message']
                if not message:
                    message = "%s failed" % action_name
                fake_test.add_failure_info(message, action['error_output'])
            framework_testcases.append(fake_test)
        framework_suite = junit_xml.TestSuite("%s actions" % framework.name,
                                              framework_testcases)
        fake_suites.append(framework_suite)

    with open("junit_testpy.xml", "w") as f:
        junit_xml.TestSuite.to_file(f, fake_suites)
예제 #11
0
파일: sync.py 프로젝트: TechAbib/git-repo-1
    def create_junit_xml_file(projects, xml_file_path):
        """Create JUnit XML report file that can be used in Jenkins."""

        synced_projects = []
        for project in projects:
            for k, v in project.items():
                tc = junit_xml.TestCase(k)
                tc.add_error_info(message=v[1])
                synced_projects.append(tc)
        ts = junit_xml.TestSuite("Sync", synced_projects)
        with open(xml_file_path, 'w') as f:
            junit_xml.TestSuite.to_file(f, [ts])
예제 #12
0
    def create_test_case(cls, name):
        """
        Extend ``junit_xml.TestCase`` with:

        1. save create test case so it can be get by ``get_current_test_case``
        2. log create timestamp, so ``elapsed_sec`` can be auto updated in ``test_case_finish``.

        :param name: test case name
        :return: instance of ``junit_xml.TestCase``
        """
        # set stdout to empty string, so we can always append string to stdout.
        # It won't affect output logic. If stdout is empty, it won't be put to report.
        test_case = junit_xml.TestCase(name, stdout="")
        cls.JUNIT_CURRENT_TEST_CASE = test_case
        cls._TEST_CASE_CREATED_TS = time.time()
        return test_case
예제 #13
0
 def _end_test(
         self, example, type='Passed', err_msg='', out_msg='', last=False
 ):
     if not last:
         last = self.current_startedAt
     self.current_startedAt = time.time()
     self.junitxml_tests[self.current_group]['tests'].append(
         junit_xml.TestCase(
             name=example.name,
             classname=self.current_group,
             status=type,
             elapsed_sec=self.current_startedAt - last,
             stderr=err_msg,
             stdout=out_msg
         )
     )
예제 #14
0
 def create_test_case(self, test):
     # type: (Dict[Text, Any]) -> junit_xml.TestCase
     doc = test.get(u"doc", "N/A").strip()
     if test.get("tags"):
         category = ", ".join(test["tags"])
     else:
         category = REQUIRED
     short_name = test.get(u"short_name")
     case = junit_xml.TestCase(
         doc,
         elapsed_sec=self.duration,
         file=short_name,
         category=category,
         stdout=self.standard_output,
         stderr=self.error_output,
     )
     if self.return_code > 0:
         case.failure_message = self.message
예제 #15
0
파일: test.py 프로젝트: smartree/elbe
    def addSubTest(self, test, subtest, err):
        """Called at the end of a subtest.
           'err' is None if the subtest ended successfully, otherwise it's a
           tuple of values as returned by sys.exc_info().
        """

        self.current_case = junit.TestCase(name=str(subtest))
        self.cases.append(self.current_case)

        if err is not None:
            if issubclass(err[0], test.failureException):
                self.current_case.add_failure_info(
                    message=self._exc_info_to_string(err, test))
            else:
                self.current_case.add_error_info(
                    message=self._exc_info_to_string(err, test))

        super().addSubTest(test, subtest, err)
예제 #16
0
    def __init__(self, board, riotdir, appdir, resultdir, junit=False):
        self.board = board
        self.riotdir = riotdir
        self.appdir = appdir
        self.resultdir = os.path.join(resultdir, appdir)
        if junit:
            if not junit_xml:
                raise ImportError("`junit-xml` required for --report-xml")
            self.testcase = junit_xml.TestCase(name=self.appdir,
                                               stdout='', stderr='')
            self.log_stream = io.StringIO()
            logging.basicConfig(stream=self.log_stream)
        else:
            self.testcase = None
        self.logger = logging.getLogger(f'{board}.{appdir}')

        # Currently not handling absolute directories or outside of RIOT
        assert is_in_directory(self.resultdir, resultdir), \
            "Application result directory is outside main result directory"
예제 #17
0
def write_JUnit_XML(results, output_filename="output.xml"):
    """Write a JUnit XML test report to a file if junit_xml is available."""
    try:
        import junit_xml
    except ImportError:
        return

    test_cases = []
    for result in results:
        test_name = reconstruct_test_name(result.command)
        tc = junit_xml.TestCase(classname=test_name[0],
                                name=test_name[1],
                                elapsed_sec=result.wall_time,
                                stdout='\n'.join(result.stdout_lines),
                                stderr='\n'.join(result.stderr_lines))
        if result.return_code == 0:
            # Identify skipped tests
            output = '\n'.join(result.stdout_lines + result.stderr_lines)
            if re.search('skip', output, re.IGNORECASE):
                # find first line including word 'skip' and use it as message
                skipline = re.search('^((.*)skip(.*))$', output,
                                     re.IGNORECASE | re.MULTILINE).group(1)
                tc.add_skipped_info(skipline)
        elif result.alert_status == Status.EXPECTED_FAIL:
            tc.add_skipped_info("Expected test failure")
        elif result.alert_status == Status.EXPECTED_UNSTABLE:
            tc.add_skipped_info("Expected test instability")
        else:
            # Test failed. Extract error message and stack trace if possible
            error_message = 'exit code %d' % result.return_code
            error_output = '\n'.join(result.stderr_lines)
            if result.stderr_lines:
                error_message = result.stderr_lines[-1]
                if len(result.stderr_lines) > 20:
                    error_output = '\n'.join(result.stderr_lines[-20:])
            tc.add_failure_info(message=error_message, output=error_output)
        test_cases.append(tc)
    ts = junit_xml.TestSuite("libtbx.run_tests_parallel",
                             test_cases=test_cases)
    with codecs.open(output_filename, "w", encoding="utf-8") as f:
        ts.to_file(f, [ts], prettyprint=True, encoding="utf-8")
예제 #18
0
    def wrapper(cfg):
        global retcode
        if cfg.get('junit'):
            tstart = time.time()
            tc = junit_xml.TestCase(func.__name__, classname="skt")

            try:
                func(cfg)
            except Exception:
                logging.error("Exception caught: %s", traceback.format_exc())
                tc.add_failure_info(traceback.format_exc())
                retcode = 1

            # No exception but retcode != 0, probably tests failed
            if retcode != 0 and not tc.is_failure():
                tc.add_failure_info("Step finished with retcode: %d" % retcode)

            tc.stdout = json.dumps(cfg, default=str)
            tc.elapsed_sec = time.time() - tstart
            cfg['_testcases'].append(tc)
        else:
            func(cfg)
예제 #19
0
    def go(self):
        """ Read executed tests and write junit """
        super().go()

        import_junit_xml()

        suite = junit_xml.TestSuite(self.step.plan.name)
        for result in self.step.plan.execute.results():
            try:
                main_log = self.step.plan.execute.read(result.log[0])
            except (IndexError, AttributeError):
                main_log = None
            case = junit_xml.TestCase(result.name,
                                      classname=None,
                                      elapsed_sec=duration_to_seconds(
                                          result.duration),
                                      stdout=main_log)
            # Map tmt OUTCOME to JUnit states
            if result.result == "error":
                case.add_error_info(result.result)
            elif result.result == "fail":
                case.add_failure_info(result.result)
예제 #20
0
 def _end_test(self, test, type='Success', err_data=None, out_data=''):
     test_index = self.testsRun - 1
     # If already ended, do nothing
     if (len(self.ran_tests) == self.testsRun and
             self.ran_tests[test_index]):
         return
     start_time = self._time_tests[test_index]['start']
     endtime = self._time_tests[test_index]['end'] or time.time()
     if not self._time_tests[test_index]['end']:
         self._time_tests[test_index]['end'] = endtime
     test_name = str(test).split()[0]
     test_classname = str(test).split()[-1][1:-1]
     err_text = ''
     if err_data:
         err_text = self._exc_info_to_string(err_data, test)
     testcase = junit_xml.TestCase(
         name=test_name,
         classname=test_classname,
         elapsed_sec=(endtime - start_time),
         stdout=out_data,
         status=type
     )
     if testcase.status == 'Error':
         testcase.add_error_info(
             message='Error At {}'.format(test_name),
             output=err_text
         )
     elif testcase.status == 'Failure':
         testcase.add_failure_info(
             message='Failure At {}'.format(test_name),
             output=err_text
         )
     elif testcase.status == 'Skip':
         testcase.add_skipped_info(
             message='Skipped {}'.format(test_name),
             output=out_data
         )
     self.ran_tests.append(testcase)
예제 #21
0
    def wrapper(cfg):
        """
        Outer wrapper of a @junit function.
        Args:
            cfg: A dictionary of skt configuration

        """
        # pylint: disable=broad-except
        global retcode
        if cfg.get('junit'):
            tstart = time.time()
            testcase = junit_xml.TestCase(func.__name__, classname="skt")

            try:
                func(cfg)
            except Exception:
                logging.error(
                    "Unexpected exception caught, probably an "
                    "infrastructure failure or skt bug: %s",
                    traceback.format_exc())
                testcase.add_error_info(traceback.format_exc())
                retcode = SKT_ERROR

            if retcode == SKT_FAIL:
                # Tests failed
                testcase.add_failure_info("Step finished with retcode: %d" %
                                          retcode)
            elif retcode >= SKT_ERROR:
                testcase.add_error_info(
                    "Infrastructure issue or skt bug detected, retcode: %d" %
                    retcode)

            testcase.stdout = json.dumps(cfg, default=str)
            testcase.elapsed_sec = time.time() - tstart
            cfg['_testcases'].append(testcase)
        else:
            func(cfg)
예제 #22
0
def results(request):
    from pyresttest.resttest import RestTest
    params = request.GET
    test_groups = []

    try:
        test_file = params['file']
        #lock.acquire()
        try:
            test_groups = RestTest().main({
                "url": "",
                "test": test_file,
                "absolute_urls": True,
                "log": "debug",
                "interactive": False,
                "ssl_insecure": True
            }).items()
        finally:
            pass
            #lock.release()
    except OSError as e:
        print e
        traceback.print_exc
    except IOError:
        return HttpResponseServerError("Supplied test file was not found.")
    except MultiValueDictKeyError:
        return HttpResponseBadRequest(
            "File was not supplied in the input params.")
    # execute automated tests and validate responses and generate result json
    passSteps = warnSteps = failSteps = passCount = warnCount = failCount = 0

    import junit_xml as jx
    try:
        res = []
        testSuites = []
        for test_group in test_groups:
            group_name = test_group[0]
            tests = []
            for test in test_group[1]:
                if not test.step:
                    testCase = jx.TestCase(test.test.name, test.test._url, 0,
                                           test.body)
                    if not test.passed:
                        testCase.add_failure_info(
                            output=str(test.failures[0].message))
                    tests.append(testCase)

                if test.passed:
                    res.append({
                        "passed": "passed",
                        "name": test.test.name,
                        "group": group_name,
                        "response_code": test.response_code,
                        "step": test.step
                    })
                    if not test.step:
                        passCount += 1
                    else:
                        passSteps += 1
                else:
                    if test.response_code == 204:
                        res.append({
                            "passed": "warning",
                            "name": test.test.name,
                            "group": group_name,
                            "response_code": test.response_code,
                            "failures": test.failures[0].message,
                            "step": test.step
                        })
                        if not test.step:
                            warnCount += 1
                        else:
                            warnSteps += 1
                    else:
                        res.append({
                            "passed": "failed",
                            "name": test.test.name,
                            "group": group_name,
                            "response_code": test.response_code,
                            "failures": test.failures[0].message,
                            "step": test.step
                        })
                        if not test.step:
                            failCount += 1
                        else:
                            failSteps += 1

            testSuites.append(jx.TestSuite(group_name, tests))
        res.append({
            "pass": passCount,
            "warn": warnCount,
            "fail": failCount,
            "passSteps": passSteps,
            "warnSteps": warnSteps,
            "failSteps": failSteps
        })
    except Exception as e:
        print e
        traceback.print_exc()
        return HttpResponseServerError(
            "Some unknown error occured, please verify that test file is correctly structured or not."
        )

    if params['format'] == 'json':
        return HttpResponse(json.dumps(res))  # return results as json string.
    elif params['format'] == 'xml':
        return HttpResponse(jx.TestSuite.to_xml_string(testSuites))
예제 #23
0
파일: test.py 프로젝트: smartree/elbe
 def startTest(self, test):
     self.current_case = junit.TestCase(name=str(test))
     self.cases.append(self.current_case)
     super().startTest(test)
예제 #24
0
def run_command(argv):

    # pylint: disable=too-many-locals

    this_dir = os.path.dirname(os.path.realpath(__file__))
    top_dir = os.path.join(this_dir, "..", "..")

    oparser = optparse.OptionParser(usage="usage: %prog [options]")

    oparser.add_option("-f",
                       "--filter",
                       dest="filter",
                       metavar="REGEX",
                       type="string",
                       default=".*",
                       help="Run specific test according to a filter rule")

    oparser.add_option("-l",
                       "--level",
                       dest="level",
                       type="string",
                       default="BASE",
                       help="Set test level threshold")

    oparser.add_option("-i",
                       "--invert",
                       dest="invert_re",
                       action="store_true",
                       default=False,
                       help="Invert the matching of --filter")

    oparser.add_option(
        "-d",
        "--dry-run",
        dest="dry_run",
        action="store_true",
        default=False,
        help="List tests that would have been executed and exit")

    oparser.add_option("-p",
                       "--parallel",
                       dest="parallel",
                       type="string",
                       default="0,1",
                       help="Run every thest where test_ID % N == node_ID")

    oparser.add_option("-o",
                       "--output",
                       dest="output",
                       type="string",
                       default=None,
                       help="Write XML output to file")

    (opt, _) = oparser.parse_args(argv)

    # Set test level threshold
    if opt.level not in ElbeTestLevel.__members__:
        print("Invalid level value '%s'. Valid values are: %s" %
              (opt.level, ", ".join(key for key in ElbeTestLevel.__members__)))
        os.sys.exit(20)

    ElbeTestCase.level = ElbeTestLevel[opt.level]

    # Find all tests
    loader = unittest.defaultTestLoader
    loader.suiteClass = ElbeTestSuite
    suite = loader.discover(top_dir)

    # then filter them
    suite.filter_test(opt.parallel, opt.filter, opt.invert_re)

    # Dry run? Just exit gently
    if opt.dry_run:
        suite.ls()
        print(
            "======================================================================\n"
            "This was a dry run. No tests were executed")
        os.sys.exit(0)

    cases = []

    err_cnt = 0
    fail_cnt = 0

    for test in suite:

        print(test)

        result = unittest.TestResult()

        test.run(result)

        case = junit.TestCase(name=str(test))

        for error in result.errors:
            case.add_error_info(message=error[1])
            err_cnt += 1

        for failure in result.failures:
            case.add_failure_info(message=failure[1])
            fail_cnt += 1
예제 #25
0
def run_tests(args):
    """
    Run the user defined test scenario.
    """
    TestImage.testRuns = args.testRepeats
    TestImage.warmupRuns = args.testWarmups

    # Create output location
    if not os.path.exists("TestOutput"):
        os.mkdir("TestOutput")

    # Load test resources
    binary = get_test_binary()
    reference = get_test_reference_scores()
    testList = get_test_listing(reference)

    # Run tests
    suites = []
    suite = None
    suiteFormat = None

    # Run tests
    maxCount = len(args.testBlockSize) * len(testList)
    curCount = 0

    statRun = 0
    statSkip = 0
    statPass = 0

    for blockSize in args.testBlockSize:
        for test in testList:
            curCount += 1

            # Skip tests not enabled for the current testing throughness level
            if args.testLevel not in test.useLevel:
                statSkip += 1
                continue

            # Skip tests not enabled for the current dynamic range level
            if args.testRange not in test.useRange:
                statSkip += 1
                continue

            # Skip tests not enabled for the current data format
            if args.testFormat not in test.useFormat:
                statSkip += 1
                continue

            # Start a new suite if the format changes
            dat = (test.dynamicRange, test.format, blockSize)
            testFormat = "%s.%s.%s" % dat
            if (not suite) or (suiteFormat != testFormat):
                suiteFormat = testFormat
                suite = juxml.TestSuite("Image %s test suite" % suiteFormat)
                suites.append(suite)
                print("Running suite: %s" % suiteFormat)

            # Run the test
            test.run(binary, blockSize)
            dat = (curCount, maxCount, test.name, blockSize,
                   test.runPSNR[blockSize], test.runTime[blockSize],
                   test.status[blockSize])

            # Log results
            statRun += 1
            if "pass" in test.status[blockSize]:
                statPass += 1

            log = "Ran test %u/%u: %s %s, %0.3f dB, %0.3f s, %s" % dat
            print(" + %s" % log)

            # Generate JUnit result
            caseName = "%s.%s" % (test.name, blockSize)
            case = juxml.TestCase(caseName,
                                  elapsed_sec=test.runTime[blockSize],
                                  stdout=log)
            suite.test_cases.append(case)

            if test.status[blockSize] == "fail":
                dat = (test.runPSNR[blockSize], test.referencePSNR[blockSize])
                msg = "PSNR fail %0.3f dB is worse than %s dB" % dat
                case.add_failure_info(msg)

    # Print summary results
    print("\nSummary")
    if statRun == statPass:
        print("+ PASS (%u ran)" % statRun)
    else:
        print("+ FAIL (%u ran, %u failed)" % (statRun, statRun - statPass))

    # Write the JUnit results file
    with open("TestOutput/results.xml", "w") as fileHandle:
        juxml.TestSuite.to_file(fileHandle, suites)
예제 #26
0
             if not sim_started:
                 sim_started = line.find('## run')==0
             elif not sim_completed:
                 if 'Error:' in line:
                     sim_failed = True
                 if 'PASSED' in line:
                     sim_passed = True
                 # Check for exit, possibly might have tons of errors and clipped file
                 if 'Exiting xsim' in line:
                     sim_completed = True
 except Exception as ex:
     print('Failed parsing %s: %s'%(log,ex))
     sim_error = True
 # Translate results for JUnit.
 test_name = os.path.basename(log).replace('simulate_','').replace('.log','')
 case = junit_xml.TestCase(test_name, classname='pico_ethernet', timestamp=time.time())
 if sim_error:
     print('error')
     case.add_error_info('Error: Failed parsing simulation results')
 elif not sim_completed:
     print('error')
     case.add_error_info('Error: Simulation results appear to be truncated (could not find Exit message)')
 elif sim_failed:
     print('failed')
     case.add_failure_info('Failure: Error detected in simulation')
 elif sim_passed or IGNORE_MISSING_PASS_MSG:
     print('passed')
     pass # Success!
 else:
     case.add_error_info('Error: Invalid simulation log - unsure of result')
     print('invalid')
예제 #27
0
 def __call__(self):
     path = self.node.et.text
     test = junit.TestCase(name=path, classname=self.tag)
     if not self.target.exists(path):
         test.add_failure_info(message="FAILED")
     return test
예제 #28
0
def make(args):
    if args.skip_rules:
        skip_rules = SkipRules(args.skip_rules)

    params = get_params(args)

    package = params.path.partition(os.path.sep)[0]
    suite = junit_xml.TestSuite(
        params.path,
        timestamp=time.time(),
        file=params.path,
        package=package,
    )

    cmd = ['/usr/bin/env', 'make']
    if params.directory:
        cmd.extend(['-C', params.directory])
    if params.makefile:
        cmd.extend(['-f', params.makefile])
    if args.jobs > 1:
        cmd.extend(['-j', str(args.jobs)])

    jobserver_auth_fds = get_jobserver_auth_fds()
    targets = get_targets(args, params.path)

    for target in targets:
        if not target:
            continue

        cmd_target = cmd + [target]

        timestamp = time.time()
        start = time.monotonic()
        # To better control the use of resources by a subprocess make, pass
        # jobserver auth file descriptors in the subprocess, so it will
        # participate in the jobserver protocol and schedule its jobs
        # accordingly.
        ret = subprocess.run(cmd_target,
                             pass_fds=jobserver_auth_fds,
                             capture_output=True,
                             text=True)
        end = time.monotonic()

        case_name = 'make ' + target
        case_log = ' '.join(cmd_target)
        case_classname = params.path
        case = junit_xml.TestCase(
            case_name,
            classname=case_classname,
            file=params.path,
            log=case_log,
            timestamp=timestamp,
            elapsed_sec=(end - start),
            stdout=ret.stdout,
            stderr=ret.stderr,
        )
        suite.test_cases.append(case)

        if ret.returncode != 0:
            skipped, msg = skip_rules.match(params.path, ret.stderr)
            if skipped:
                case.add_skipped_info(msg)
                continue

            msg = 'Ended with non-zero exit code: {}'.format(ret.returncode)
            case.add_failure_info(msg)