Exemplo n.º 1
0
def output_run_results_junit_xml(passing_tests: List[TestRunResult],
                                 failing_tests: List[TestRunResult],
                                 junit_dest: TextIO,
                                 junit_merged_dest: TextIO):
    '''Write results to JUnit XML

    Two versions are produced: a normal version and a merged version. In the
    normal version there is a test suite per unique test name with a different
    test case per seed run. In the merged version there is a single test case
    under the test suite with information for the individual runs merged
    together. This is to aid use of the Azure Pipelines JUnit dashboard, which
    doesn't neatly handle the test suite/test case hierarchy
    '''

    all_tests = passing_tests + failing_tests

    test_suite_info = {}
    for trr in all_tests:
        # test_case_info contains a tuple per unique test name. The first
        # element is a list of junit_xml.TestCase, one per test run with that
        # name. The other merges together all of the test outputs to produce
        # the merged output.
        unmerged, merged = \
            test_suite_info.setdefault(trr.name, ([], {'stdout': '',
                                                       'failures': ''}))
        result_text = gen_test_run_result_text(trr)

        # Create a test case for the TestRunResult. stdout holds the text
        # describing the run. Add the same text to failures if the test failed.
        test_case = junit_xml.TestCase(f'{trr.name}.{trr.seed}')
        test_case.stdout = result_text

        merged['stdout'] += result_text + '\n'

        if not trr.passed:
            test_case.add_failure_info(output=result_text)
            merged['failures'] += result_text

        unmerged.append(test_case)

    # Output the normal JUnit XML
    test_suites = [
        junit_xml.TestSuite(name, test_cases)
        for name, (test_cases, _) in test_suite_info.items()
    ]

    junit_dest.write(junit_xml.to_xml_report_string(test_suites))

    # Output the merged version of the JUnit XML
    merged_test_suites = []

    for name, (_, merged_test_info) in test_suite_info.items():
        test_case = junit_xml.TestCase(name)
        test_case.stdout = merged_test_info['stdout']
        test_case.add_failure_info(output=merged_test_info['failures'])

        merged_test_suites.append(junit_xml.TestSuite(name, [test_case]))

    junit_merged_dest.write(junit_xml.to_xml_report_string(merged_test_suites))
Exemplo n.º 2
0
def main():
    """This is the main entry point used by setup.cfg."""
    # pylint: disable=protected-access
    try:
        global retcode

        parser = setup_parser()
        args = parser.parse_args()
        check_args(parser, args)

        setup_logging(args.verbose)

        # We are gradually migrating away from messing with cfg and passing
        # it everywhere.
        if args.func in ['cmd_merge', 'cmd_build']:
            args.func(args)
        else:
            cfg = load_config(args)
            args.func(cfg)

        if cfg.get('junit'):
            testsuite = junit_xml.TestSuite("skt", cfg.get('_testcases'))
            with open("%s/%s.xml" % (cfg.get('junit'), args._name),
                      'w') as fileh:
                junit_xml.TestSuite.to_file(fileh, [testsuite])

        sys.exit(retcode)
    except KeyboardInterrupt:
        print("\nExited at user request.")
        sys.exit(130)
Exemplo n.º 3
0
def format_baseline_for_junit_xml(baseline):
    """
    :type baseline: dict
    :rtype: str
    """
    all_secrets = {}

    for filename, secret_list in baseline['results'].items():
        for secret in secret_list:
            test_case = junit_xml.TestCase(
                name="{}:{}".format(filename, secret["line_number"]))
            test_case.add_failure_info(
                message="Found secret of type {} on line {} in file {}".format(
                    secret["type"], secret["line_number"], filename),
                failure_type=secret["type"])
            if secret["type"] in all_secrets:
                all_secrets[secret["type"]].append(test_case)
            else:
                all_secrets[secret["type"]] = [test_case]

    test_suits = map(
        lambda secret: junit_xml.TestSuite(name=secret[0],
                                           test_cases=secret[1]),
        all_secrets.items())

    return junit_xml.to_xml_report_string(test_suits)
Exemplo n.º 4
0
def main() -> None:
    """Generate a JUnit XML file."""

    try:
        log = logparser.logfile(sys.argv[1])
    except IndexError:
        sys.stderr.write(f"""\
\x1B[1mUsage: {sys.argv[0]} <logfile>\x1B[0m
""")
        sys.exit(1)
    except OSError:
        sys.stderr.write(f"""\
\x1B[31mopen failed!\x1B[0m
\x1B[1mUsage: {sys.argv[0]} <logfile>\x1B[0m
""")
        sys.exit(1)

    toplevels = parse_log(log)
    testcases: typing.List[junit_xml.TestCase] = []
    for i, toplevel in enumerate(toplevels):
        testcases += toplevel_to_junit(i, toplevel)
    print(
        json.dumps(list(map(lambda x: f"{x.classname}", testcases)), indent=4),
        file=sys.stderr,
    )

    print(
        junit_xml.TestSuite.to_xml_string(
            [junit_xml.TestSuite("TBot", testcases)]))
Exemplo n.º 5
0
def create_junit_output(tests):
    """ Creates an output file for generating a junit test report

    args:
        tests: All tests completed + skipped

    returns:
        boolean: False if file generation failed - NOT YET
    """

    # Populate junit objects for all performed tests
    junit_tests = []

    # Integration tests
    for test in tests:
        junit_object = jx.TestCase(test.name_, classname="IncludeOS.{}.{}".format(test.type_, test.category_))

        # If test is skipped add skipped info
        if test.skip_:
            junit_object.add_skipped_info(message = test.skip_reason_, output = test.skip_reason_)
        elif test.proc_.returncode is not 0:
            junit_object.add_failure_info(output = test.output_[0])
        else:
            junit_object.stdout = test.output_[0]
            junit_object.stderr = test.output_[1]

        # Add to list of all test objects
        junit_tests.append(junit_object)

    # Stress and misc tests
    ts = jx.TestSuite("IncludeOS tests", junit_tests)
    with open('output.xml', 'w') as f:
            jx.TestSuite.to_file(f, [ts], prettyprint=False)
Exemplo n.º 6
0
    def get_xml(self):
        ts = junit.TestSuite(name="test", test_cases=self.cases)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            results = junit.TestSuite.to_xml_string([ts], encoding="utf-8")

        return results
Exemplo n.º 7
0
 def summary(self, duration, example_count, failed_count, pending_count):
     all_tests = []
     for testgroup in self.junitxml_tests.keys():
         for test in self.junitxml_tests[testgroup]['tests']:
             all_tests.append(test)
     return junit_xml.TestSuite(
         name='{}.{}'.format('mamba', self.modulename),
         test_cases=all_tests
     )
Exemplo n.º 8
0
    def handle_composition(self, args: argparse.Namespace,
                           composition: mzcompose.Composition) -> None:
        if args.workflow not in composition.workflows:
            # Restart any dependencies whose definitions have changed. This is
            # Docker Compose's default behavior for `up`, but not for `run`,
            # which is a constant irritation that we paper over here. The trick,
            # taken from Buildkite's Docker Compose plugin, is to run an `up`
            # command that requests zero instances of the requested service.
            if args.workflow:
                composition.invoke(
                    "up",
                    "-d",
                    "--scale",
                    f"{args.workflow}=0",
                    args.workflow,
                )
            super().handle_composition(args, composition)
        else:
            # The user has specified a workflow rather than a service. Run the
            # workflow instead of Docker Compose.
            if args.unknown_args:
                bad_arg = args.unknown_args[0]
            elif args.unknown_subargs[0].startswith("-"):
                bad_arg = args.unknown_subargs[0]
            else:
                bad_arg = None
            if bad_arg:
                raise UIError(
                    f"unknown option {bad_arg!r}",
                    hint=f"if {bad_arg!r} is a valid Docker Compose option, "
                    f"it can't be used when running {args.workflow!r}, because {args.workflow!r} "
                    "is a custom mzcompose workflow, not a Docker Compose service",
                )

            # Run the workflow inside of a test case so that we get some basic
            # test analytics, even if the workflow doesn't define more granular
            # test cases.
            with composition.test_case(f"workflow-{args.workflow}"):
                composition.workflow(args.workflow, *args.unknown_subargs[1:])

            # Upload test report to Buildkite Test Analytics.
            junit_suite = junit_xml.TestSuite(composition.name)
            for (name, result) in composition.test_results.items():
                test_case = junit_xml.TestCase(name, composition.name,
                                               result.duration)
                if result.error:
                    test_case.add_error_info(message=result.error)
                junit_suite.test_cases.append(test_case)
            junit_report = ci_util.junit_report_filename("mzcompose")
            with junit_report.open("w") as f:
                junit_xml.to_xml_report_file(f, [junit_suite])
            ci_util.upload_junit_report("mzcompose", junit_report)

            if any(result.error
                   for result in composition.test_results.values()):
                raise UIError("at least one test case failed")
Exemplo n.º 9
0
 def __call__(self):
     test_cases = []
     for test in self.node:
         try:
             test_cases.append(self.do_test(test, self.target))
         except TestException:
             pass # TODO - Handle me!
     ts = junit.TestSuite(name=self.node.et.attrib["name"],
                          test_cases=test_cases)
     return ts
Exemplo n.º 10
0
class JunitReport(object):
    # wrapper for junit test report
    # TODO: Don't support by multi-thread (although not likely to be used this way).

    JUNIT_FILE_NAME = "XUNIT_RESULT.xml"
    JUNIT_DEFAULT_TEST_SUITE = "test-suite"
    JUNIT_TEST_SUITE = junit_xml.TestSuite(JUNIT_DEFAULT_TEST_SUITE)
    JUNIT_CURRENT_TEST_CASE = None
    _TEST_CASE_CREATED_TS = 0

    @classmethod
    def output_report(cls, junit_file_path):
        """ Output current test result to file. """
        with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME),
                  "w") as f:
            cls.JUNIT_TEST_SUITE.to_file(f, [cls.JUNIT_TEST_SUITE],
                                         prettyprint=False)

    @classmethod
    def get_current_test_case(cls):
        """
        By default, the test framework will handle junit test report automatically.
        While some test case might want to update some info to test report.
        They can use this method to get current test case created by test framework.

        :return: current junit test case instance created by ``JunitTestReport.create_test_case``
        """
        return cls.JUNIT_CURRENT_TEST_CASE

    @classmethod
    def test_case_finish(cls, test_case):
        """
        Append the test case to test suite so it can be output to file.
        Execution time will be automatically updated (compared to ``create_test_case``).
        """
        test_case.elapsed_sec = time.time() - cls._TEST_CASE_CREATED_TS
        cls.JUNIT_TEST_SUITE.test_cases.append(test_case)

    @classmethod
    def create_test_case(cls, name):
        """
        Extend ``junit_xml.TestCase`` with:

        1. save create test case so it can be get by ``get_current_test_case``
        2. log create timestamp, so ``elapsed_sec`` can be auto updated in ``test_case_finish``.

        :param name: test case name
        :return: instance of ``junit_xml.TestCase``
        """
        # set stdout to empty string, so we can always append string to stdout.
        # It won't affect output logic. If stdout is empty, it won't be put to report.
        test_case = junit_xml.TestCase(name, stdout="")
        cls.JUNIT_CURRENT_TEST_CASE = test_case
        cls._TEST_CASE_CREATED_TS = time.time()
        return test_case
Exemplo n.º 11
0
def emit_junit_xml(cluster_launch_attempts, frameworks):
    """Write out all the test actions failures to a junit file for jenkins or
    similar"""
    if not ok():
        return
    import junit_xml
    launch_fake_testcases = []
    for launch_attempt in cluster_launch_attempts:
        attempt_duration = launch_attempt.end_time - launch_attempt.start_time
        fake_test = junit_xml.TestCase(launch_attempt.name,
                                       elapsed_sec=attempt_duration)
        if launch_attempt.launch_succeeded:
            fake_test.stdout = "Launch worked"
        else:
            fake_test.add_failure_info("Launch failed")
        launch_fake_testcases.append(fake_test)

    launch_suite = junit_xml.TestSuite("Cluster launches",
                                       launch_fake_testcases)

    fake_suites = []
    fake_suites.append(launch_suite)
    for framework in frameworks:
        framework_testcases = []
        for action_name, action in framework.actions.items():
            action_duration = action['finish'] - action['start']
            fake_test = junit_xml.TestCase(action_name,
                                           elapsed_sec=action_duration,
                                           stdout=action['stdout'],
                                           stderr=action['stderr'])
            if not action['ok']:
                message = action['error_message']
                if not message:
                    message = "%s failed" % action_name
                fake_test.add_failure_info(message, action['error_output'])
            framework_testcases.append(fake_test)
        framework_suite = junit_xml.TestSuite("%s actions" % framework.name,
                                              framework_testcases)
        fake_suites.append(framework_suite)

    with open("junit_testpy.xml", "w") as f:
        junit_xml.TestSuite.to_file(f, fake_suites)
Exemplo n.º 12
0
    def create_junit_xml_file(projects, xml_file_path):
        """Create JUnit XML report file that can be used in Jenkins."""

        synced_projects = []
        for project in projects:
            for k, v in project.items():
                tc = junit_xml.TestCase(k)
                tc.add_error_info(message=v[1])
                synced_projects.append(tc)
        ts = junit_xml.TestSuite("Sync", synced_projects)
        with open(xml_file_path, 'w') as f:
            junit_xml.TestSuite.to_file(f, [ts])
Exemplo n.º 13
0
def main():
    global retcode

    parser = setup_parser()
    args = parser.parse_args()

    setup_logging(args.verbose)
    cfg = load_config(args)

    args.func(cfg)
    if cfg.get('junit'):
        ts = junit_xml.TestSuite("skt", cfg.get('_testcases'))
        with open("%s/%s.xml" % (cfg.get('junit'), args._name), 'w') as fileh:
            junit_xml.TestSuite.to_file(fileh, [ts])

    sys.exit(retcode)
Exemplo n.º 14
0
def write_JUnit_XML(results, output_filename="output.xml"):
    """Write a JUnit XML test report to a file if junit_xml is available."""
    try:
        import junit_xml
    except ImportError:
        return

    def _decode_string(string):
        try:
            return string.encode('ascii', 'xmlcharrefreplace')
        except Exception:
            return unicode(string,
                           errors='ignore').encode('ascii',
                                                   'xmlcharrefreplace')

    test_cases = []
    for result in results:
        test_name = reconstruct_test_name(result.command)
        plain_stdout = map(_decode_string, result.stdout_lines)
        plain_stderr = map(_decode_string, result.stderr_lines)
        output = '\n'.join(plain_stdout + plain_stderr)
        tc = junit_xml.TestCase(classname=test_name[0],
                                name=test_name[1],
                                elapsed_sec=result.wall_time,
                                stdout='\n'.join(plain_stdout),
                                stderr='\n'.join(plain_stderr))
        if result.return_code == 0:
            # Identify skipped tests
            if re.search('skip', output, re.IGNORECASE):
                # find first line including word 'skip' and use it as message
                skipline = re.search('^((.*)skip(.*))$', output,
                                     re.IGNORECASE | re.MULTILINE).group(1)
                tc.add_skipped_info(skipline)
        else:
            # Test failed. Extract error message and stack trace if possible
            error_message = 'exit code %d' % result.return_code
            error_output = '\n'.join(plain_stderr)
            if plain_stderr:
                error_message = plain_stderr[-1]
                if len(plain_stderr) > 20:
                    error_output = '\n'.join(plain_stderr[-20:])
            tc.add_failure_info(message=error_message, output=error_output)
        test_cases.append(tc)
    ts = junit_xml.TestSuite("libtbx.run_tests_parallel",
                             test_cases=test_cases)
    with open(output_filename, 'wb') as f:
        ts.to_file(f, [ts], prettyprint=True)
Exemplo n.º 15
0
    def to_junit(
        self,
        suite_name="all_tests",
    ):
        """
        Convert the tests to JUnit XML.

        Returns a junit_xml.TestSuite containing all of the test cases. One test suite will be
        generated with the name given in suite_name. Unity Fixture test groups are mapped to the
        classname attribute of test cases; for basic Unity output there will be one class named
        "default".

        Optional arguments:
        suite_name -- The name to use for the "name" and "package" attributes of the testsuite element.

        Sample output:
        <testsuite disabled="0" errors="0" failures="1" name="[suite_name]" package="[suite_name]" skipped="0" tests="8" time="0">
            <testcase classname="test_group_1" name="group_1_test" />
            <testcase classname="test_group_2" name="group_2_test" />
        </testsuite>
        """
        test_case_list = []

        for test in self._tests:
            if test.result() == "PASS":
                test_case_list.append(
                    junit_xml.TestCase(name=test.name(),
                                       classname=test.group()))
            else:
                junit_tc = junit_xml.TestCase(
                    name=test.name(),
                    classname=test.group(),
                    file=test.file(),
                    line=test.line(),
                )
                if test.result() == "FAIL":
                    junit_tc.add_failure_info(message=test.message(),
                                              output=test.full_line())
                elif test.result() == "IGNORE":
                    junit_tc.add_skipped_info(message=test.message(),
                                              output=test.full_line())
                test_case_list.append(junit_tc)

        return junit_xml.TestSuite(name=suite_name,
                                   package=suite_name,
                                   test_cases=test_case_list)
Exemplo n.º 16
0
def main() -> None:
    """Generate a JUnit XML file."""

    log = logparser.from_argv()

    toplevels = parse_log(log)
    testcases: typing.List[junit_xml.TestCase] = []
    for i, toplevel in enumerate(toplevels):
        testcases += toplevel_to_junit(i, toplevel)
    print(
        json.dumps(list(map(lambda x: f"{x.classname}", testcases)), indent=4),
        file=sys.stderr,
    )

    print(
        junit_xml.TestSuite.to_xml_string(
            [junit_xml.TestSuite("tbot", testcases)]))
Exemplo n.º 17
0
def write_JUnit_XML(results, output_filename="output.xml"):
    """Write a JUnit XML test report to a file if junit_xml is available."""
    try:
        import junit_xml
    except ImportError:
        return

    test_cases = []
    for result in results:
        test_name = reconstruct_test_name(result.command)
        tc = junit_xml.TestCase(classname=test_name[0],
                                name=test_name[1],
                                elapsed_sec=result.wall_time,
                                stdout='\n'.join(result.stdout_lines),
                                stderr='\n'.join(result.stderr_lines))
        if result.return_code == 0:
            # Identify skipped tests
            output = '\n'.join(result.stdout_lines + result.stderr_lines)
            if re.search('skip', output, re.IGNORECASE):
                # find first line including word 'skip' and use it as message
                skipline = re.search('^((.*)skip(.*))$', output,
                                     re.IGNORECASE | re.MULTILINE).group(1)
                tc.add_skipped_info(skipline)
        elif result.alert_status == Status.EXPECTED_FAIL:
            tc.add_skipped_info("Expected test failure")
        elif result.alert_status == Status.EXPECTED_UNSTABLE:
            tc.add_skipped_info("Expected test instability")
        else:
            # Test failed. Extract error message and stack trace if possible
            error_message = 'exit code %d' % result.return_code
            error_output = '\n'.join(result.stderr_lines)
            if result.stderr_lines:
                error_message = result.stderr_lines[-1]
                if len(result.stderr_lines) > 20:
                    error_output = '\n'.join(result.stderr_lines[-20:])
            tc.add_failure_info(message=error_message, output=error_output)
        test_cases.append(tc)
    ts = junit_xml.TestSuite("libtbx.run_tests_parallel",
                             test_cases=test_cases)
    with codecs.open(output_filename, "w", encoding="utf-8") as f:
        ts.to_file(f, [ts], prettyprint=True, encoding="utf-8")
Exemplo n.º 18
0
    def _finalize_junit_domain(self, _tc, domain):
        # Find all the junit-*.pickle files dropped by _mkreport()
        # above and collect them into a testsuite, writing an XML in
        # the CWD called junit.xml.

        reports = []
        domain_path_glob = os.path.join(tcfl.tc.tc_c.tmpdir, "junit", domain,
                                        "*.pickle")
        for filename in glob.glob(domain_path_glob):
            with open(filename) as f:
                jtc = cPickle.load(f)
                reports.append(jtc)

        ts = junit_xml.TestSuite(
            self.junit_suite_name % _tc.kws,
            reports,
            # I'd like:
            # hostname = _tc.kws['target_group_info'],
            # but it can't, because each TC in the suite is run in a
            # different target group. Maybe at some point TestCase
            # will support hostname?
            hostname=None,
            id=_tc.kws['runid'],
            package=self.junit_suite_package % _tc.kws,
            timestamp=time.time(),
            properties=self.junit_suite_properties,  # Dictionary
        )
        del reports

        if domain == "default":
            junit_filename = "junit.xml"
        else:
            junit_filename = commonl.file_name_make_safe("junit-%s.xml" %
                                                         domain,
                                                         extra_chars="")
        with codecs.open(junit_filename,
                         'w',
                         encoding='utf-8',
                         errors='ignore') as f:
            junit_xml.TestSuite.to_file(f, [ts], prettyprint=True)
Exemplo n.º 19
0
    def go(self):
        """ Read executed tests and write junit """
        super().go()

        import_junit_xml()

        suite = junit_xml.TestSuite(self.step.plan.name)
        for result in self.step.plan.execute.results():
            try:
                main_log = self.step.plan.execute.read(result.log[0])
            except (IndexError, AttributeError):
                main_log = None
            case = junit_xml.TestCase(result.name,
                                      classname=None,
                                      elapsed_sec=duration_to_seconds(
                                          result.duration),
                                      stdout=main_log)
            # Map tmt OUTCOME to JUnit states
            if result.result == "error":
                case.add_error_info(result.result)
            elif result.result == "fail":
                case.add_failure_info(result.result)
Exemplo n.º 20
0
def run_fuzzer(fuzzer_name, fuzzer_dir, fuzzer_logdir, logger, will_retry):
    def log(msg, *a, **k):
        logger.log(msg, a, k, flush=True)

    make_cmd = os.environ.get('MAKE', 'make')
    make_flags = os.environ.get('MAKEFLAGS', '')
    # Should run things?
    if not should_run_submake(make_flags):
        return 0

    fuzzer_runok = os.path.join(fuzzer_dir, "run.ok")
    if os.path.exists(fuzzer_runok):
        last_modified = datetime.fromtimestamp(os.stat(fuzzer_runok).st_mtime)

        log("Skipping as run.ok exists (updated @ {})",
            last_modified.isoformat())

        return 0

    time_start = datetime.utcnow()
    log("Starting @ {}", time_start.isoformat())

    running_msg = "Running {} -C {} run (with MAKEFLAGS='{}')".format(
        make_cmd,
        fuzzer_dir,
        make_flags,
    )
    log(running_msg)

    log_suffix = ".{}.log".format(time_start.isoformat())
    fuzzer_stdout = os.path.join(fuzzer_logdir, "stdout" + log_suffix)
    fuzzer_stderr = os.path.join(fuzzer_logdir, "stderr" + log_suffix)

    # Write header to stdout/stderr to make sure they match.
    for fname in [fuzzer_stdout, fuzzer_stderr]:
        with open(fname, "w") as fd:
            fd.write("Build starting @ {}\n".format(time_start.isoformat()))
            fd.write(running_msg)
            fd.write("\n")
            fd.write("-" * 75)
            fd.write("\n")
            fd.flush()
            os.fsync(fd)

    # Open the log files for appending
    stdout_fd = open(fuzzer_stdout, "a")
    stderr_fd = open(fuzzer_stderr, "a")

    # Play nice with make's jobserver.
    # See https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html#POSIX-Jobserver  # noqa
    job_fds = []
    if '--jobserver-fds' in make_flags:
        job_re = re.search('--jobserver-fds=([0-9]+),([0-9]+)', make_flags)
        assert job_re, make_flags
        job_rd, job_wr = job_re.groups()

        # Make copies of jobserver FDs in case a retry is needed.

        job_rd = int(job_rd)
        job_wr = int(job_wr)
        assert job_rd > 2, (job_rd, job_wr, make_flags)
        assert job_wr > 2, (job_rd, job_wr, make_flags)
        job_fds.append(job_rd)
        job_fds.append(job_wr)

    p = None
    try:
        p = subprocess.Popen(
            [make_cmd, '-C', fuzzer_dir, 'run'],
            stdin=None,
            stdout=stdout_fd,
            stderr=stderr_fd,
            pass_fds=job_fds,
        )
        while True:
            try:
                retcode = p.wait(timeout=10)
                p = None
            except subprocess.TimeoutExpired:
                retcode = None

            if retcode is not None:
                break
            mem = get_memory()['mem']
            log(
                "Still running (1m:{:0.2f}%, 5m:{:0.2f}%, 15m:{:0.2f}% Mem:{:0.1f}Gi used, {:0.1f}Gi free).\n{}",
                *get_load(),
                mem['used'] / 1e9,
                mem['available'] /
                1e9,  # Using available so the numbers add up.
                PsTree.get(p.pid),
            )
    except (Exception, KeyboardInterrupt, SystemExit):
        retcode = -1
        tb = io.StringIO()
        traceback.print_exc(file=tb)
        log(tb.getvalue())

    # Prevent Ctrl-C so we exit properly...
    old_sigint_handler = signal.getsignal(signal.SIGINT)

    def log_die(sig, frame):
        logger.log("Dieing!")

    signal.signal(signal.SIGINT, log_die)

    # Cleanup child process if they haven't already died.
    try:
        if p is not None:
            try:
                retcode = p.wait(1)
            except subprocess.TimeoutExpired:
                retcode = -1
                p.kill()
                p.wait()
                log("Warning: Killed program which should have been dead!")
    except Exception:
        tb = io.StringIO()
        traceback.print_exc(file=tb)
        log(tb.getvalue())

    # Wait for all children to finish.
    try:
        while True:
            log("Child finished: {}", os.waitpid(-1, 0))
    except ChildProcessError:
        pass

    log("Finishing ({}).", get_usage())

    time_end = datetime.utcnow()

    error_log = "\n".join(last_lines(open(fuzzer_stderr), 10000))
    success_log = "\n".join(last_lines(open(fuzzer_stdout), 100))

    # Find the next X_sponge_log.xml file name...
    for i in range(0, 100):
        tsfilename = os.path.join(fuzzer_logdir, '{}_sponge_log.xml'.format(i))
        if not os.path.exists(tsfilename):
            break

    test_case = junit_xml.TestCase(
        name=fuzzer_name,
        timestamp=time_start.timestamp(),
        elapsed_sec=(time_end - time_start).total_seconds(),
        stdout=success_log,
        stderr=error_log,
    )

    if retcode != 0:
        test_case.add_failure_info(
            'Fuzzer failed with exit code: {}'.format(retcode))

    with open(tsfilename, 'w') as f:
        ts = junit_xml.TestSuite(fuzzer_name, [test_case])
        junit_xml.TestSuite.to_file(f, [ts])

    if retcode != 0:
        test_case.add_failure_info(
            'Fuzzer failed with exit code: {}'.format(retcode), )

        # Log the last 10,000 lines of stderr on a failure
        log("""\
Failed @ {time_end} with exit code: {retcode}
--------------------------------------------------------------------------
{error_log}
--------------------------------------------------------------------------
Failed @ {time_end} with exit code: {retcode}
""",
            retcode=retcode,
            error_log=error_log,
            time_end=time_end.isoformat())
    else:
        # Log the last 100 lines of a successful run
        log(
            """\
Succeeded! @ {}
--------------------------------------------------------------------------
{}
--------------------------------------------------------------------------
Succeeded! @ {}
""", time_end.isoformat(), success_log, time_end.isoformat())

    logger.flush()
    signal.signal(signal.SIGINT, old_sigint_handler)
    return retcode
Exemplo n.º 21
0
def main(args):
    """For one board, compile all examples and tests and run test on board."""
    logger = logging.getLogger(args.board)
    if args.loglevel:
        loglevel = logging.getLevelName(args.loglevel.upper())
        logger.setLevel(loglevel)

    logger.addHandler(LOG_HANDLER)

    logger.info('Saving toolchain')
    save_toolchain(args.riot_directory, args.result_directory)

    board = check_is_board(args.riot_directory, args.board)
    logger.debug('board: %s', board)

    # Expand application directories: allows use of glob in application names
    apps_dirs = _expand_apps_directories(args.applications,
                                         args.riot_directory)
    apps_dirs_skip = _expand_apps_directories(args.applications_exclude,
                                              args.riot_directory, skip=True)

    app_dirs = apps_directories(args.riot_directory, apps_dirs=apps_dirs,
                                apps_dirs_skip=apps_dirs_skip)

    logger.debug('app_dirs: %s', app_dirs)
    logger.debug('resultdir: %s', args.result_directory)
    board_result_directory = os.path.join(args.result_directory, args.board)

    # Overwrite the compile/test targets from command line arguments
    RIOTApplication.COMPILE_TARGETS = args.compile_targets
    RIOTApplication.FLASH_TARGETS = args.flash_targets
    RIOTApplication.TEST_TARGETS = args.test_targets
    RIOTApplication.TEST_AVAILABLE_TARGETS = args.test_available_targets

    # List of applications for board
    applications = [RIOTApplication(board, args.riot_directory, app_dir,
                                    board_result_directory,
                                    junit=args.report_xml)
                    for app_dir in app_dirs]

    # Execute tests
    errors = [app.run_compilation_and_test(clean_after=args.clean_after,
                                           runtest=not args.no_test,
                                           incremental=args.incremental,
                                           jobs=args.jobs,
                                           with_test_only=args.with_test_only)
              for app in applications]
    errors = [e for e in errors if e is not None]
    num_errors = len(errors)

    summary = _test_failed_summary(errors, relpathstart=board_result_directory)
    save_failure_summary(board_result_directory, summary)

    if args.report_xml:
        if not junit_xml:
            raise ImportError("`junit-xml` required for --report-xml")
        report_file = os.path.join(board_result_directory, "report.xml")
        with open(report_file, "w+", encoding="utf-8") as report:
            junit_xml.TestSuite.to_file(
                report,
                [junit_xml.TestSuite(f'compile_and_test_for_{board}',
                                     [app.testcase for app in applications])]
            )
    if num_errors:
        logger.error('Tests failed: %d', num_errors)
        print(summary, end='')
    else:
        logger.info('Tests successful')
    sys.exit(num_errors)
Exemplo n.º 22
0
def main():  # type: () -> int

    args = arg_parser().parse_args(sys.argv[1:])
    if '--' in args.args:
        args.args.remove('--')

    # Remove test arguments with wrong syntax
    if args.testargs is not None:
        args.testargs = [
            testarg for testarg in args.testargs if testarg.count('==') == 1
        ]

    if not args.test:
        arg_parser().print_help()
        return 1

    with open(args.test) as f:
        tests = yaml.load(f, Loader=yaml.SafeLoader)

    failures = 0
    unsupported = 0
    passed = 0
    suite_name, _ = os.path.splitext(os.path.basename(args.test))
    report = junit_xml.TestSuite(suite_name, [])

    if args.only_tools:
        alltests = tests
        tests = []
        for t in alltests:
            loader = schema_salad.ref_resolver.Loader({"id": "@id"})
            cwl = loader.resolve_ref(t["tool"])[0]
            if isinstance(cwl, dict):
                if cwl["class"] == "CommandLineTool":
                    tests.append(t)
            else:
                raise Exception("Unexpected code path.")

    if args.l:
        for i, t in enumerate(tests):
            if t.get("short_name"):
                print(u"[%i] %s: %s" %
                      (i + 1, t["short_name"], t["doc"].strip()))
            else:
                print(u"[%i] %s" % (i + 1, t["doc"].strip()))

        return 0

    if args.n is not None or args.s is not None:
        ntest = []
        if args.n is not None:
            for s in args.n.split(","):
                sp = s.split("-")
                if len(sp) == 2:
                    ntest.extend(list(range(int(sp[0]) - 1, int(sp[1]))))
                else:
                    ntest.append(int(s) - 1)
        if args.s is not None:
            for s in args.s.split(","):
                test_number = get_test_number_by_key(tests, "short_name", s)
                if test_number:
                    ntest.append(test_number)
                else:
                    _logger.error('Test with short name "%s" not found ', s)
                    return 1
    else:
        ntest = list(range(0, len(tests)))

    total = 0
    with ThreadPoolExecutor(max_workers=args.j) as executor:
        jobs = [
            executor.submit(run_test, args, tests[i], i + 1, len(tests),
                            args.timeout) for i in ntest
        ]
        try:
            for i, job in zip(ntest, jobs):
                test_result = job.result()
                test_case = test_result.create_test_case(tests[i])
                total += 1
                return_code = test_result.return_code
                category = test_case.category
                if return_code == 0:
                    passed += 1
                elif return_code != 0 and return_code != UNSUPPORTED_FEATURE:
                    failures += 1
                    test_case.add_failure_info(output=test_result.message)
                elif return_code == UNSUPPORTED_FEATURE and category == REQUIRED:
                    failures += 1
                    test_case.add_failure_info(output=test_result.message)
                elif category != REQUIRED and return_code == UNSUPPORTED_FEATURE:
                    unsupported += 1
                    test_case.add_skipped_info("Unsupported")
                else:
                    raise Exception(
                        "This is impossible, return_code: {}, category: "
                        "{}".format(return_code, category))
                report.test_cases.append(test_case)
        except KeyboardInterrupt:
            for job in jobs:
                job.cancel()
            _logger.error("Tests interrupted")

    if args.junit_xml:
        with open(args.junit_xml, 'w') as xml:
            junit_xml.TestSuite.to_file(xml, [report])

    if failures == 0 and unsupported == 0:
        _logger.info("All tests passed")
        return 0
    if failures == 0 and unsupported > 0:
        _logger.warning("%i tests passed, %i unsupported features",
                        total - unsupported, unsupported)
        return 0
    _logger.warning("%i tests passed, %i failures, %i unsupported features",
                    total - (failures + unsupported), failures, unsupported)
    return 1
Exemplo n.º 23
0
        for error in result.errors:
            case.add_error_info(message=error[1])
            err_cnt += 1

        for failure in result.failures:
            case.add_failure_info(message=failure[1])
            fail_cnt += 1

        for us in result.unexpectedSuccesses:
            case.add_failure_info(message=us)
            err_cnt += 1

        for skip in result.skipped:
            case.add_skipped_info(message=skip[1])

        cases.append(case)

    ts = junit.TestSuite(name="test", test_cases=cases)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        results = junit.TestSuite.to_xml_string([ts], encoding="utf-8")

    if opt.output is None:
        print(results)
    else:
        with open(opt.output, "w") as f:
            f.write(results)

    os.sys.exit(err_cnt | fail_cnt)
Exemplo n.º 24
0
def main():  # type: () -> int

    args = arg_parser().parse_args(sys.argv[1:])
    if "--" in args.args:
        args.args.remove("--")

    # Remove test arguments with wrong syntax
    if args.testargs is not None:
        args.testargs = [
            testarg for testarg in args.testargs if testarg.count("==") == 1
        ]

    if not args.test:
        arg_parser().print_help()
        return 1

    schema_resource = pkg_resources.resource_stream(__name__,
                                                    "cwltest-schema.yml")
    cache = {
        "https://w3id.org/cwl/cwltest/cwltest-schema.yml":
        schema_resource.read().decode("utf-8")
    }  # type: Optional[Dict[str, Union[str, Graph, bool]]]
    (document_loader, avsc_names, schema_metadata,
     metaschema_loader) = schema_salad.schema.load_schema(
         "https://w3id.org/cwl/cwltest/cwltest-schema.yml", cache=cache)

    if not isinstance(avsc_names, schema_salad.avro.schema.Names):
        print(avsc_names)
        return 1

    tests, metadata = schema_salad.schema.load_and_validate(
        document_loader, avsc_names, args.test, True)

    failures = 0
    unsupported = 0
    passed = 0
    suite_name, _ = os.path.splitext(os.path.basename(args.test))
    report = junit_xml.TestSuite(suite_name, [])

    # the number of total tests, failured tests, unsupported tests and passed tests for each tag
    ntotal = defaultdict(int)  # type: Dict[str, int]
    nfailures = defaultdict(int)  # type: Dict[str, int]
    nunsupported = defaultdict(int)  # type: Dict[str, int]
    npassed = defaultdict(int)  # type: Dict[str, int]

    if args.only_tools:
        alltests = tests
        tests = []
        for t in alltests:
            loader = schema_salad.ref_resolver.Loader({"id": "@id"})
            cwl = loader.resolve_ref(t["tool"])[0]
            if isinstance(cwl, dict):
                if cwl["class"] == "CommandLineTool":
                    tests.append(t)
            else:
                raise Exception("Unexpected code path.")

    if args.tags:
        alltests = tests
        tests = []
        tags = args.tags.split(",")
        for t in alltests:
            ts = t.get("tags", [])
            if any((tag in ts for tag in tags)):
                tests.append(t)

    for t in tests:
        if t.get("label"):
            t["short_name"] = t["label"]

    if args.show_tags:
        alltags = set()  # type: Set[str]
        for t in tests:
            ts = t.get("tags", [])
            alltags |= set(ts)
        for tag in alltags:
            print(tag)
        return 0

    if args.l:
        for i, t in enumerate(tests):
            if t.get("short_name"):
                print(u"[%i] %s: %s" %
                      (i + 1, t["short_name"], t.get("doc", "").strip()))
            else:
                print(u"[%i] %s" % (i + 1, t.get("doc", "").strip()))

        return 0

    if args.n is not None or args.s is not None:
        ntest = []
        if args.n is not None:
            for s in args.n.split(","):
                sp = s.split("-")
                if len(sp) == 2:
                    ntest.extend(list(range(int(sp[0]) - 1, int(sp[1]))))
                else:
                    ntest.append(int(s) - 1)
        if args.s is not None:
            for s in args.s.split(","):
                test_number = get_test_number_by_key(tests, "short_name", s)
                if test_number:
                    ntest.append(test_number)
                else:
                    _logger.error('Test with short name "%s" not found ', s)
                    return 1
    else:
        ntest = list(range(0, len(tests)))

    total = 0
    with ThreadPoolExecutor(max_workers=args.j) as executor:
        jobs = [
            executor.submit(
                run_test,
                args,
                tests[i],
                i + 1,
                len(tests),
                args.timeout,
                args.junit_verbose,
            ) for i in ntest
        ]
        try:
            for i, job in zip(ntest, jobs):
                test_result = job.result()
                test_case = test_result.create_test_case(tests[i])
                test_case.url = "cwltest:{}#{}".format(suite_name, i + 1)
                total += 1
                tags = tests[i].get("tags", [])
                for t in tags:
                    ntotal[t] += 1

                return_code = test_result.return_code
                category = test_case.category
                if return_code == 0:
                    passed += 1
                    for t in tags:
                        npassed[t] += 1
                elif return_code != 0 and return_code != UNSUPPORTED_FEATURE:
                    failures += 1
                    for t in tags:
                        nfailures[t] += 1
                    test_case.add_failure_info(output=test_result.message)
                elif return_code == UNSUPPORTED_FEATURE and category == REQUIRED:
                    failures += 1
                    for t in tags:
                        nfailures[t] += 1
                    test_case.add_failure_info(output=test_result.message)
                elif category != REQUIRED and return_code == UNSUPPORTED_FEATURE:
                    unsupported += 1
                    for t in tags:
                        nunsupported[t] += 1
                    test_case.add_skipped_info("Unsupported")
                else:
                    raise Exception(
                        "This is impossible, return_code: {}, category: "
                        "{}".format(return_code, category))
                report.test_cases.append(test_case)
        except KeyboardInterrupt:
            for job in jobs:
                job.cancel()
            _logger.error("Tests interrupted")

    if args.junit_xml:
        with open(args.junit_xml, "w") as xml:
            junit_xml.TestSuite.to_file(xml, [report])

    if args.badgedir:
        os.mkdir(args.badgedir)
        for t, v in ntotal.items():
            percent = int((npassed[t] / float(v)) * 100)
            if npassed[t] == v:
                color = "green"
            elif t == "required":
                color = "red"
            else:
                color = "yellow"

            with open("{}/{}.json".format(args.badgedir, t), "w") as out:
                out.write(
                    json.dumps({
                        "subject": "{}".format(t),
                        "status": "{}%".format(percent),
                        "color": color,
                    }))

    if failures == 0 and unsupported == 0:
        _logger.info("All tests passed")
        return 0
    if failures == 0 and unsupported > 0:
        _logger.warning("%i tests passed, %i unsupported features",
                        total - unsupported, unsupported)
        return 0
    _logger.warning(
        "%i tests passed, %i failures, %i unsupported features",
        total - (failures + unsupported),
        failures,
        unsupported,
    )
    return 1
Exemplo n.º 25
0
def main():  # type: () -> int
    parser = argparse.ArgumentParser(
        description='Compliance tests for cwltool')
    parser.add_argument("--test",
                        type=str,
                        help="YAML file describing test cases",
                        required=True)
    parser.add_argument("--basedir",
                        type=str,
                        help="Basedir to use for tests",
                        default=".")
    parser.add_argument("-l", action="store_true", help="List tests then exit")
    parser.add_argument("-n",
                        type=str,
                        default=None,
                        help="Run a specific tests, format is 1,3-6,9")
    parser.add_argument(
        "--tool",
        type=str,
        default="cwl-runner",
        help="CWL runner executable to use (default 'cwl-runner'")
    parser.add_argument("--only-tools",
                        action="store_true",
                        help="Only test CommandLineTools")
    parser.add_argument("--junit-xml",
                        type=str,
                        default=None,
                        help="Path to JUnit xml file")
    parser.add_argument("--test-arg",
                        type=str,
                        help="Additional argument given in test cases and "
                        "required prefix for tool runner.",
                        metavar="cache==--cache-dir",
                        action="append",
                        dest="testargs")
    parser.add_argument("args",
                        help="arguments to pass first to tool runner",
                        nargs=argparse.REMAINDER)
    parser.add_argument(
        "-j",
        type=int,
        default=1,
        help="Specifies the number of tests to run simultaneously "
        "(defaults to one).")
    parser.add_argument("--verbose",
                        action="store_true",
                        help="More verbose output during test run.")

    args = parser.parse_args()
    if '--' in args.args:
        args.args.remove('--')

    # Remove test arguments with wrong syntax
    args.testargs = [
        testarg for testarg in args.testargs if testarg.count('==') == 1
    ]

    if not args.test:
        parser.print_help()
        return 1

    with open(args.test) as f:
        tests = yaml.load(f, Loader=yaml.SafeLoader)

    failures = 0
    unsupported = 0
    passed = 0
    suite_name, _ = os.path.splitext(os.path.basename(args.test))
    report = junit_xml.TestSuite(suite_name, [])

    if args.only_tools:
        alltests = tests
        tests = []
        for t in alltests:
            loader = schema_salad.ref_resolver.Loader({"id": "@id"})
            cwl = loader.resolve_ref(t["tool"])[0]
            if isinstance(cwl, dict):
                if cwl["class"] == "CommandLineTool":
                    tests.append(t)
            else:
                raise Exception("Unexpected code path.")

    if args.l:
        for i, t in enumerate(tests):
            print u"[%i] %s" % (i + 1, t["doc"].strip())
        return 0

    if args.n is not None:
        ntest = []
        for s in args.n.split(","):
            sp = s.split("-")
            if len(sp) == 2:
                ntest.extend(range(int(sp[0]) - 1, int(sp[1])))
            else:
                ntest.append(int(s) - 1)
    else:
        ntest = range(0, len(tests))

    total = 0
    with ThreadPoolExecutor(max_workers=args.j) as executor:
        jobs = [executor.submit(run_test, args, i, tests) for i in ntest]
        try:
            for i, job in zip(ntest, jobs):
                test_result = job.result()
                test_case = test_result.create_test_case(tests[i])
                total += 1
                if test_result.return_code == 1:
                    failures += 1
                    test_case.add_failure_info(output=test_result.message)
                elif test_result.return_code == UNSUPPORTED_FEATURE:
                    unsupported += 1
                    test_case.add_skipped_info("Unsupported")
                else:
                    passed += 1
                report.test_cases.append(test_case)
        except KeyboardInterrupt:
            for job in jobs:
                job.cancel()
            _logger.error("Tests interrupted")

    if args.junit_xml:
        with open(args.junit_xml, 'w') as fp:
            junit_xml.TestSuite.to_file(fp, [report])

    if failures == 0 and unsupported == 0:
        _logger.info("All tests passed")
        return 0
    elif failures == 0 and unsupported > 0:
        _logger.warn("%i tests passed, %i unsupported features",
                     total - unsupported, unsupported)
        return 0
    else:
        _logger.warn("%i tests passed, %i failures, %i unsupported features",
                     total - (failures + unsupported), failures, unsupported)
        return 1
Exemplo n.º 26
0
                    classname=case.suite.name,
                    timestamp=case.logged,
                )
                if case.result == TestCase.RESULT_FAIL:
                    logs = None
                    # TODO: is this of any use? (yaml inside xml!)
                    if (case.start_log_line is not None
                            and case.end_log_line is not None):
                        logs = logs_instance.read(self.get_object(),
                                                  case.start_log_line,
                                                  case.end_log_line)
                    tc.add_error_info("failed", output=logs)
                elif case.result == TestCase.RESULT_SKIP:
                    tc.add_skipped_info("skipped")
                cases.append(tc)
            suites.append(junit_xml.TestSuite(suite.name, test_cases=cases))

        data = junit_xml.TestSuite.to_xml_string(suites, encoding="utf-8")
        response = HttpResponse(data, content_type="application/xml")
        response["Content-Disposition"] = ("attachment; filename=job_%d.xml" %
                                           self.get_object().id)
        return response

    @detail_route(methods=["get"], suffix="logs")
    def logs(self, request, **kwargs):
        start = safe_str2int(request.query_params.get("start", 0))
        end = safe_str2int(request.query_params.get("end", None))
        try:
            if start == 0 and end is None:
                data = logs_instance.open(self.get_object())
                response = FileResponse(data, content_type="application/yaml")
Exemplo n.º 27
0
def run_tests(args):
    """
    Run the user defined test scenario.
    """
    TestImage.testRuns = args.testRepeats
    TestImage.warmupRuns = args.testWarmups

    # Create output location
    if not os.path.exists("TestOutput"):
        os.mkdir("TestOutput")

    # Load test resources
    binary = get_test_binary()
    reference = get_test_reference_scores()
    testList = get_test_listing(reference)

    # Run tests
    suites = []
    suite = None
    suiteFormat = None

    # Run tests
    maxCount = len(args.testBlockSize) * len(testList)
    curCount = 0

    statRun = 0
    statSkip = 0
    statPass = 0

    for blockSize in args.testBlockSize:
        for test in testList:
            curCount += 1

            # Skip tests not enabled for the current testing throughness level
            if args.testLevel not in test.useLevel:
                statSkip += 1
                continue

            # Skip tests not enabled for the current dynamic range level
            if args.testRange not in test.useRange:
                statSkip += 1
                continue

            # Skip tests not enabled for the current data format
            if args.testFormat not in test.useFormat:
                statSkip += 1
                continue

            # Start a new suite if the format changes
            dat = (test.dynamicRange, test.format, blockSize)
            testFormat = "%s.%s.%s" % dat
            if (not suite) or (suiteFormat != testFormat):
                suiteFormat = testFormat
                suite = juxml.TestSuite("Image %s test suite" % suiteFormat)
                suites.append(suite)
                print("Running suite: %s" % suiteFormat)

            # Run the test
            test.run(binary, blockSize)
            dat = (curCount, maxCount, test.name, blockSize,
                   test.runPSNR[blockSize], test.runTime[blockSize],
                   test.status[blockSize])

            # Log results
            statRun += 1
            if "pass" in test.status[blockSize]:
                statPass += 1

            log = "Ran test %u/%u: %s %s, %0.3f dB, %0.3f s, %s" % dat
            print(" + %s" % log)

            # Generate JUnit result
            caseName = "%s.%s" % (test.name, blockSize)
            case = juxml.TestCase(caseName,
                                  elapsed_sec=test.runTime[blockSize],
                                  stdout=log)
            suite.test_cases.append(case)

            if test.status[blockSize] == "fail":
                dat = (test.runPSNR[blockSize], test.referencePSNR[blockSize])
                msg = "PSNR fail %0.3f dB is worse than %s dB" % dat
                case.add_failure_info(msg)

    # Print summary results
    print("\nSummary")
    if statRun == statPass:
        print("+ PASS (%u ran)" % statRun)
    else:
        print("+ FAIL (%u ran, %u failed)" % (statRun, statRun - statPass))

    # Write the JUnit results file
    with open("TestOutput/results.xml", "w") as fileHandle:
        juxml.TestSuite.to_file(fileHandle, suites)
Exemplo n.º 28
0
                        sim_passed = True
                    # Check for exit, possibly might have tons of errors and clipped file
                    if 'Exiting xsim' in line:
                        sim_completed = True
    except Exception as ex:
        print('Failed parsing %s: %s'%(log,ex))
        sim_error = True
    # Translate results for JUnit.
    test_name = os.path.basename(log).replace('simulate_','').replace('.log','')
    case = junit_xml.TestCase(test_name, classname='pico_ethernet', timestamp=time.time())
    if sim_error:
        print('error')
        case.add_error_info('Error: Failed parsing simulation results')
    elif not sim_completed:
        print('error')
        case.add_error_info('Error: Simulation results appear to be truncated (could not find Exit message)')
    elif sim_failed:
        print('failed')
        case.add_failure_info('Failure: Error detected in simulation')
    elif sim_passed or IGNORE_MISSING_PASS_MSG:
        print('passed')
        pass # Success!
    else:
        case.add_error_info('Error: Invalid simulation log - unsure of result')
        print('invalid')
    test_cases.append(case)

test_suite = junit_xml.TestSuite('pico_ethernet', test_cases)
with open('sim_results.xml','wt') as fh:
    junit_xml.TestSuite.to_file(fh, [test_suite])
Exemplo n.º 29
0
 def get_test_suite(self, module_name):
     return junit_xml.TestSuite(
         name=module_name,
         test_cases=self.ran_tests
     )
Exemplo n.º 30
0
class JunitReport(object):
    # wrapper for junit test report
    # TODO: JunitReport methods are not thread safe (although not likely to be used this way).

    JUNIT_FILE_NAME = 'XUNIT_RESULT.xml'
    JUNIT_DEFAULT_TEST_SUITE = 'test-suite'
    JUNIT_TEST_SUITE = junit_xml.TestSuite(JUNIT_DEFAULT_TEST_SUITE,
                                           hostname=socket.gethostname(),
                                           timestamp=datetime.utcnow().isoformat())
    JUNIT_CURRENT_TEST_CASE = None
    _TEST_CASE_CREATED_TS = 0

    @classmethod
    def output_report(cls, junit_file_path):
        """ Output current test result to file. """
        with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), 'w') as f:
            junit_xml.to_xml_report_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)

    @classmethod
    def get_current_test_case(cls):
        """
        By default, the test framework will handle junit test report automatically.
        While some test case might want to update some info to test report.
        They can use this method to get current test case created by test framework.

        :return: current junit test case instance created by ``JunitTestReport.create_test_case``
        """
        return cls.JUNIT_CURRENT_TEST_CASE

    @classmethod
    def test_case_finish(cls, test_case):
        """
        Append the test case to test suite so it can be output to file.
        Execution time will be automatically updated (compared to ``create_test_case``).
        """
        test_case.elapsed_sec = time.time() - cls._TEST_CASE_CREATED_TS
        cls.JUNIT_TEST_SUITE.test_cases.append(test_case)

    @classmethod
    def create_test_case(cls, name):
        """
        Extend ``junit_xml.TestCase`` with:

        1. save create test case so it can be get by ``get_current_test_case``
        2. log create timestamp, so ``elapsed_sec`` can be auto updated in ``test_case_finish``.

        :param name: test case name
        :return: instance of ``junit_xml.TestCase``
        """
        # set stdout to empty string, so we can always append string to stdout.
        # It won't affect output logic. If stdout is empty, it won't be put to report.
        test_case = junit_xml.TestCase(name, stdout='')
        cls.JUNIT_CURRENT_TEST_CASE = test_case
        cls._TEST_CASE_CREATED_TS = time.time()
        return test_case

    @classmethod
    def update_performance(cls, performance_items):
        """
        Update performance results to ``stdout`` of current test case.

        :param performance_items: a list of performance items. each performance item is a key-value pair.
        """
        assert cls.JUNIT_CURRENT_TEST_CASE

        for item in performance_items:
            cls.JUNIT_CURRENT_TEST_CASE.stdout += '[{}]: {}\n'.format(item[0], item[1])