Esempio n. 1
0
        def _parse_suite_results(
            test_suite_tuple: Tuple[str, List[Tuple[Decoder, TestSuite]]]
        ) -> junitp.TestSuite:
            jsuites = []

            test_suite_name, test_suite_results = test_suite_tuple

            for suite_decoder_res in test_suite_results:
                timeouts = 0

                jsuite = junitp.TestSuite(test_suite_name)
                jsuite.add_property("decoder", suite_decoder_res[0].name)

                for vector in suite_decoder_res[1].test_vectors.values():
                    jcase = junitp.TestCase(vector.name)
                    if vector.test_result not in [
                            TestVectorResult.SUCCESS,
                            TestVectorResult.REFERENCE,
                    ]:
                        jcase.result = _parse_vector_errors(vector)

                    jsuite.add_testcase(jcase)

                    if vector.test_result is TestVectorResult.TIMEOUT and ctx.jobs == 1:
                        timeouts += ctx.timeout

                jsuite.time = round(suite_decoder_res[1].time_taken - timeouts,
                                    3)

                jsuites.append(jsuite)

            return jsuites
Esempio n. 2
0
def dump_junit_report(suite_res: 'MiniHwResult', artifacts: Path):
    try:
        import junitparser
    except ImportError:
        LOG.warning("No JUNIT generated - junit parser is not installed")
        return
    reportpath = artifacts / 'junit_report.xml'
    LOG.info(f"[REPORT] Generating JUNIT report: {reportpath}")
    suites = junitparser.JUnitXml()
    for task in suite_res.tasks:
        jsuite = junitparser.TestSuite(name=task.task.name)
        for case in task.cases:
            jcase = junitparser.TestCase(name=case.case.name,
                                         classname='/'.join(
                                             case.case.namespace),
                                         time=case.cmd_result.elapsed /
                                         1000000.0 if case.cmd_result else 0)
            if case.is_fail():
                jcase.result = [
                    junitparser.Failure(c.fail_msg())
                    for c in case.checks.values() if c.is_fail
                ]
                if case.cmd_result:
                    jcase.system_out = str(case.cmd_result.stdout)
                    jcase.system_err = str(case.cmd_result.stderr)
            elif case.is_skip():
                jcase.result = [junitparser.Skipped()]
            jsuite.add_testcase(jcase)
Esempio n. 3
0
def dump_junit_report(suite_res: 'SuiteRunResult',
                      artifacts: Path) -> Optional[Path]:
    try:
        import junitparser
    except ImportError:
        LOG.warning("No JUNIT generated - junit parser is not installed")
        return None
    report_path = artifacts / 'junit_report.xml'
    LOG.info(f"[REPORT] Generating JUNIT report: {report_path}")
    junit_suites = junitparser.JUnitXml(suite_res.df.name)
    for unit_res in suite_res.units:
        unit_suite = junitparser.TestSuite(name=unit_res.df.name)
        for test_res in unit_res.tests:
            junit_case = junitparser.TestCase(
                name=test_res.df.desc,
                classname=test_res.df.unit.name + '/' + test_res.df.name,
                time=test_res.cmd_result.elapsed /
                1000000.0 if test_res.cmd_result else 0)
            unit_suite.add_testcase(junit_case)
            if test_res.kind.is_pass():
                continue
            fails = []
            for c in test_res.checks:
                fail = junitparser.Failure(c.message)
                fail.text = "\n" + c.fail_msg()
                fails.append(fail)
            junit_case.result = fails
            if test_res.cmd_result:
                junit_case.system_out = str(test_res.cmd_result.stdout)
                junit_case.system_err = str(test_res.cmd_result.stderr)
        junit_suites.add_testsuite(unit_suite)

    junit_suites.write(str(report_path))
    return report_path
def add_junit_failure(xml: junitparser.JUnitXml, test: Path, message: str,
                      starttime: datetime.datetime):
    t = junitparser.TestCase(name=test.name)
    t.result = junitparser.Failure(message=str(message))
    t.time = (datetime.datetime.utcnow() - starttime).total_seconds()
    suite = junitparser.TestSuite(name=test.name)
    suite.add_testcase(t)
    xml.add_testsuite(suite)
Esempio n. 5
0
 def add_test_result(junit_result=None):
     nonlocal current_test
     nonlocal current_output
     if current_test is None:
         current_test = junitparser.TestCase("UNKNOWN TEST CASE")
     if junit_result is not None:
         current_test.result = junit_result
     current_test.time = (datetime.datetime.utcnow() -
                          starttime).total_seconds()
     if current_output:
         current_test.system_out = "".join(current_output)
     testsuite.add_testcase(current_test)
     current_test = None
     current_output = []
Esempio n. 6
0
def _to_junit(result, platform=""):
    """
    Convert result to junit format.
    """
    report = junit.JUnitXml()
    if not result:
        return report

    platform += platform and "."

    for target, target_result in result.items():
        suite = junit.TestSuite(platform + target)
        for case_name, case_result in target_result.items():
            case = junit.TestCase(case_name)
            if case_result["status"] == "FAIL":
                case.result = junit.Failure(case_result.get("details", ""))
            if case_result["status"] == "IGNORE":
                case.result = junit.Skipped(case_result.get("details", ""))
def createJunitTestResults(boardToResults, fileName):
    """Create junit xml test result.
    Args:
        boardToResults(dict[str:obj(OtaTestResult)]): Dictionary of the board name to it's OtaTestResult.
        fileName: The name of the junit test file to create.
    """
    report = junit.JUnitXml()
    for board in boardToResults.keys():
        group_suite = junit.TestSuite(board + '.OTAEndToEndTests')
        for otaTestResult in boardToResults[board]:
            test_case = junit.TestCase(otaTestResult.testName)
            if otaTestResult.result == OtaTestResult.FAIL:
                test_case.result = junit.Failure(otaTestResult.summary)
            elif otaTestResult.result == OtaTestResult.ERROR:
                test_case.result = junit.Skipped(otaTestResult.summary)
            group_suite.add_testcase(test_case)
        report.add_testsuite(group_suite)

    report.update_statistics()
    report.write(fileName, pretty=True)
Esempio n. 8
0
def run_tests(module):
	eprint(" starting " + module["name"] + " test")

	test_cases = []
	for case in module["cases"]:
		# default value is False
		is_inverted = case.get("invert", False)

		# print header line
		test_case_name = case["number"]
		if "title" in case:
			test_case_name += " " + case["title"]

		# start line
		start_line = "  starting \"" + test_case_name + "\""
		if is_inverted:
			start_line += " expecting failure"
		eprint(start_line)

		args = make_args(module, case)

		# start subprocess
		completed = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
		expected = (completed.returncode == 0) != is_inverted # != is xor

		print(end_line(test_case_name, completed.returncode, expected))

		# fill test case data
		test_case = junit.TestCase(test_case_name)
		test_case.system_err = '\n'.join(completed.stderr.decode("utf-8").split('\0'))
		test_case.system_out = '\n'.join(completed.stdout.decode("utf-8").split('\0'))
		if not expected:
			if completed.returncode == 0:
				test_case.result = junit.Failure("succeeded unexpectedly")
			else:
				test_case.result = junit.Failure("failed unexpectedly")
		test_cases.append(test_case)

	eprint(" all tests run for module " + module["name"])
	return test_cases
Esempio n. 9
0
    def run(self, persist_cluster=False, startup_time=1):
        LOGGER.info("Running test {!r} on cluster {!r}".format(
            self.id, self.cluster_name))

        # Step-0: sanity-check the cluster configuration.
        self.verify_cluster_configuration_matches("initial")

        # Start the test timer.
        timer = Timer()
        timer.start()

        # Step-1: load test data.
        test_data = self.spec.driverWorkload.get('testData')
        if test_data:
            LOGGER.info("Loading test data on cluster {!r}".format(
                self.cluster_name))
            connection_string = self.get_connection_string()
            load_test_data(connection_string, self.spec.driverWorkload)
            LOGGER.info("Successfully loaded test data on cluster {!r}".format(
                self.cluster_name))

        # Step-2: run driver workload.
        self.workload_runner.spawn(
            workload_executor=self.config.workload_executor,
            connection_string=self.get_connection_string(),
            driver_workload=self.spec.driverWorkload,
            startup_time=startup_time)

        # Step-3: begin maintenance routine.
        final_config = self.spec.maintenancePlan.final
        cluster_config = final_config.clusterConfiguration
        process_args = final_config.processArgs

        if not cluster_config and not process_args:
            raise RuntimeError("invalid maintenance plan")

        if cluster_config:
            LOGGER.info("Pushing cluster configuration update")
            self.cluster_url.patch(**cluster_config)

        if process_args:
            LOGGER.info("Pushing process arguments update")
            self.cluster_url.processArgs.patch(**process_args)

        # Sleep before polling to give Atlas time to update cluster.stateName.
        sleep(3)

        # Step-4: wait until maintenance completes (cluster is IDLE).
        selector = BooleanCallablePoller(
            frequency=self.config.polling_frequency,
            timeout=self.config.polling_timeout)
        LOGGER.info("Waiting for cluster maintenance to complete")
        selector.poll([self],
                      attribute="is_cluster_state",
                      args=("IDLE", ),
                      kwargs={})
        self.verify_cluster_configuration_matches("final")
        LOGGER.info("Cluster maintenance complete")

        # Step-5: interrupt driver workload and capture streams
        stats = self.workload_runner.terminate()

        # Stop the timer
        timer.stop()

        # Step-6: compute xunit entry.
        junit_test = junitparser.TestCase(self.id)
        junit_test.time = timer.elapsed

        if (stats['numErrors'] != 0 or stats['numFailures'] != 0
                or stats['numSuccesses'] == 0):
            LOGGER.info("FAILED: {!r}".format(self.id))
            self.failed = True
            # Write xunit logs for failed tests.
            junit_test.result = junitparser.Failure(str(stats))
        else:
            LOGGER.info("SUCCEEDED: {!r}".format(self.id))
            # Directly log output of successful tests as xunit output
            # is only visible for failed tests.

        LOGGER.info("Workload Statistics: {}".format(stats))

        # Step 7: download logs asynchronously and delete cluster.
        # TODO: https://github.com/mongodb-labs/drivers-atlas-testing/issues/4
        if not persist_cluster:
            self.cluster_url.delete()
            LOGGER.info("Cluster {!r} marked for deletion.".format(
                self.cluster_name))

        return junit_test
Esempio n. 10
0
    def run(self, persist_cluster=False, startup_time=1):
        LOGGER.info("Running test {!r} on cluster {!r}".format(
            self.id, self.cluster_name))

        # Step-1: sanity-check the cluster configuration.
        self.verify_cluster_configuration_matches(
            self.spec.initialConfiguration)

        # Start the test timer.
        timer = Timer()
        timer.start()

        # Step-2: run driver workload.
        self.workload_runner.spawn(
            workload_executor=self.config.workload_executor,
            connection_string=self.get_connection_string(),
            driver_workload=self.spec.driverWorkload,
            startup_time=startup_time)

        for operation in self.spec.operations:
            if len(operation) != 1:
                raise ValueError("Operation must have exactly one key: %s" %
                                 operation)

            op_name, op_spec = list(operation.items())[0]

            if op_name == 'setClusterConfiguration':
                # Step-3: begin maintenance routine.
                final_config = op_spec
                cluster_config = final_config.clusterConfiguration
                process_args = final_config.processArgs

                if not cluster_config and not process_args:
                    raise RuntimeError("invalid maintenance plan")

                if cluster_config:
                    LOGGER.info("Pushing cluster configuration update")
                    self.cluster_url.patch(**cluster_config)

                if process_args:
                    LOGGER.info("Pushing process arguments update")
                    self.cluster_url.processArgs.patch(**process_args)

                # Step-4: wait until maintenance completes (cluster is IDLE).
                self.wait_for_idle()
                self.verify_cluster_configuration_matches(final_config)
                LOGGER.info("Cluster maintenance complete")

            elif op_name == 'testFailover':
                timer = Timer()
                timer.start()
                timeout = 90

                # DRIVERS-1585: failover may fail due to the cluster not being
                # ready. Retry failover up to a timeout if the
                # CLUSTER_RESTART_INVALID error is returned from the call
                while True:
                    try:
                        self.cluster_url['restartPrimaries'].post()
                    except AtlasApiError as exc:
                        if exc.error_code != 'CLUSTER_RESTART_INVALID':
                            raise
                    else:
                        break

                    if timer.elapsed > timeout:
                        raise PollingTimeoutError(
                            "Could not test failover as cluster wasn't ready")
                    else:
                        sleep(5)

                self.wait_for_idle()

            elif op_name == 'sleep':
                _time.sleep(op_spec)

            elif op_name == 'waitForIdle':
                self.wait_for_idle()

            elif op_name == 'restartVms':
                rv = self.admin_client.nds.groups[self.project.id].clusters[
                    self.cluster_name].reboot.post(api_version='private')

                self.wait_for_idle()

            elif op_name == 'assertPrimaryRegion':
                region = op_spec['region']

                cluster_config = self.cluster_url.get().data
                timer = Timer()
                timer.start()
                timeout = op_spec.get('timeout', 90)

                with mongo_client(self.get_connection_string()) as mc:
                    while True:
                        rsc = mc.admin.command('replSetGetConfig')
                        member = [
                            m for m in rsc['config']['members']
                            if m['horizons']['PUBLIC'] == '%s:%s' % mc.primary
                        ][0]
                        member_region = member['tags']['region']

                        if region == member_region:
                            break

                        if timer.elapsed > timeout:
                            raise Exception(
                                "Primary in cluster not in expected region '%s' (actual region '%s')"
                                % (region, member_region))
                        else:
                            sleep(5)

            else:
                raise Exception('Unrecognized operation %s' % op_name)

        # Wait 10 seconds to ensure that the driver is not experiencing any
        # errors after the maintenance has concluded.
        sleep(10)

        # Step-5: interrupt driver workload and capture streams
        stats = self.workload_runner.stop()

        # Stop the timer
        timer.stop()

        # Step-6: compute xunit entry.
        junit_test = junitparser.TestCase(self.id)
        junit_test.time = timer.elapsed

        if (stats['numErrors'] != 0 or stats['numFailures'] != 0
                or stats['numSuccesses'] == 0):
            LOGGER.info("FAILED: {!r}".format(self.id))
            self.failed = True
            # Write xunit logs for failed tests.
            junit_test.result = junitparser.Failure(str(stats))
        else:
            LOGGER.info("SUCCEEDED: {!r}".format(self.id))
            # Directly log output of successful tests as xunit output
            # is only visible for failed tests.

        LOGGER.info("Workload Statistics: {}".format(stats))

        get_logs(admin_client=self.admin_client,
                 project=self.project,
                 cluster_name=self.cluster_name)

        # Step 7: download logs asynchronously and delete cluster.
        # TODO: https://github.com/mongodb-labs/drivers-atlas-testing/issues/4
        if not persist_cluster:
            self.cluster_url.delete()
            LOGGER.info("Cluster {!r} marked for deletion.".format(
                self.cluster_name))

        return junit_test
Esempio n. 11
0
async def run_testrig(args: argparse.Namespace, remaining_args: list,
                      output_dir: str):
    trace_rootdir = Path(args.trace_dir)
    if not trace_rootdir.is_dir():
        sys.exit(str(trace_rootdir) + " does not exist!")
    xunit_output = Path(args.xunit_output)
    if not xunit_output.parent.is_dir():
        sys.exit("invalid xunit output file: " + str(xunit_output))
    global DEBUG
    if args.debug:
        DEBUG = True
    command = [
        str(Path(__file__).parent / "runTestRIG.py"),
        "--trace-dir",
        str(trace_rootdir),
        "--save-dir",
        output_dir,
        "-a",
        args.reference_impl,
        "-b",
        args.test_impl,
        "--no-shrink",
    ] + remaining_args + ["-v", "1"]  # verbosity 1 required for output
    if args.timeout:
        command += ["--timeout", str(args.timeout)]

    info("Running '", " ".join(shlex.quote(s) for s in command), "'", sep="")
    process = await asyncio.create_subprocess_exec(
        command[0],
        *command[1:],
        stdout=asyncio.subprocess.PIPE,
        stderr=asyncio.subprocess.STDOUT)
    xml = junitparser.JUnitXml(name="Regression test " + args.test_impl +
                               " vs " + args.reference_impl)
    testsuite = junitparser.TestSuite(name=args.trace_dir)
    xml.add_testsuite(testsuite)
    current_test = None
    current_output = []  # type: typing.List[str]
    starttime = datetime.datetime.utcnow()

    def add_test_result(junit_result=None):
        nonlocal current_test
        nonlocal current_output
        if current_test is None:
            current_test = junitparser.TestCase("UNKNOWN TEST CASE")
        if junit_result is not None:
            current_test.result = junit_result
        current_test.time = (datetime.datetime.utcnow() -
                             starttime).total_seconds()
        if current_output:
            current_test.system_out = "".join(current_output)
        testsuite.add_testcase(current_test)
        current_test = None
        current_output = []

    async def fatal_error(error_message):
        process.terminate()
        process.kill()
        remaining_stdout = await process.stdout.read()
        debug("Remaining output: ", remaining_stdout)
        current_output.append(remaining_stdout.decode("utf-8"))
        add_test_result(junit_result=junitparser.Error(message=error_message))
        error(error_message)

    while True:
        # No output for N seconds means something went wrong...
        output = b""
        try:
            output = await asyncio.wait_for(process.stdout.readline(), 60)
        except asyncio.TimeoutError:
            try:
                output = await asyncio.wait_for(
                    process.stdout.readuntil(separator=b'\r'), 10)
            except asyncio.TimeoutError:
                await fatal_error("TIMEOUT!")
                break
        if not output and process.stdout.at_eof():
            info("EOF")
            break
        debug("==>TR: \x1b[1;33m",
              output.decode("utf-8").rstrip(),
              "\x1b[0m",
              sep="")
        if output.startswith(b'Reading trace from '):
            # start of testcase
            assert current_test is None, "Reading new test before last one finished?"
            starttime = datetime.datetime.utcnow()
            trace_file = output[len(b'Reading trace from '):].rstrip().decode(
                "utf-8")
            relpath = Path(trace_file).relative_to(trace_rootdir)
            debug("Starting test", relpath, " from", trace_file)
            info("==== Testing:", relpath, "... ", end="\n" if DEBUG else "")
            current_test = junitparser.TestCase(str(relpath))
            current_output = []
            continue
        if output.startswith(b'+++ OK, passed'):
            # End of testcase
            if current_test.result is not None:
                if isinstance(current_test.result, junitparser.Failure):
                    print_coloured("ERROR, but TestRIG reported OK",
                                   colour=AnsiColour.red)
                    continue
                assert isinstance(
                    current_test.result,
                    junitparser.Skipped), "unexpected test result"
                print_coloured("SKIPPED", colour=AnsiColour.yellow)
            else:
                print_coloured("OK", colour=AnsiColour.green)
            assert output == b"+++ OK, passed 1 test.\n", b"output format changed? " + output
            assert current_test is not None
            add_test_result(current_test.result)
            continue
        elif output == b'Failure.\n':
            assert isinstance(
                current_test.result,
                junitparser.Failure), "Didn't see ''*** Failed!' message?"
            add_test_result(current_test.result)
            error("FAILED!")

        # Not a marker message -> add to current test output
        if current_output is not None:
            current_output.append(output.decode("utf-8"))

        # Check if test failed:
        if output.startswith(b"*** Failed!"):
            assert current_test is not None
            current_test.result = junitparser.Failure(
                message=output.strip().decode("utf-8"))
        elif output.startswith(b"Error:"):
            error(output.decode("utf-8"))
            assert current_test is not None
            current_test.result = junitparser.Failure(
                message=output.strip().decode("utf-8"))
            if output.startswith(
                    b"Error: implementation A timeout.") or output.startswith(
                        b"Error: implementation B timeout."):
                if current_test.result is None:
                    current_test.result = junitparser.Error(
                        message=output.strip().decode("utf-8"))
                continue
            else:
                await fatal_error("Unknown error: " + output.decode("utf-8"))
                break
        elif output.startswith(b"Warning:"):
            if output.startswith(
                    b"Warning: reporting success since implementations not running"
            ):
                debug("implementations not running!")
                if current_test.result is None:
                    current_test.result = junitparser.Skipped(
                        message=output.strip().decode("utf-8"))
                continue
            await fatal_error("Unknown warning!")
            break

    await process.wait()
    xml.update_statistics()
    print("SUMMARY:")
    print("Total tests:", xml.tests)
    print("Successful: ", xml.tests - xml.failures - xml.errors - xml.skipped)
    print("Failed:     ", xml.failures)
    print("ERRORS:     ", xml.errors)
    print(xml)
    if xml.failures != 0:
        print("Minimized cases: ")
        subprocess.check_call(["find", str(output_dir)], cwd=str(output_dir))
    if str(xunit_output) != "/dev/null":
        xml.write(filepath=str(xunit_output), pretty=True)