Beispiel #1
0
 def test_console_pass_with_retries(self):
     saved_stdout = sys.stdout
     results = ResultList()
     res1 = Result()
     res1.set_verdict("fail", 1001, 1)
     res1.retries_left = 1
     res2 = Result()
     res2.set_verdict("pass", 0, 1)
     res2.retries_left = 0
     results.append(res1)
     results.append(res2)
     try:
         out = StringIO()
         sys.stdout = out
         report = ReportConsole(results)
         report.generate()
         output = out.getvalue().strip()
         lines = output.split("\n")
         self.assertEqual(len(lines), 17)
         self.assertRegexpMatches(lines[3], "Yes", lines[4])
         self.assertRegexpMatches(lines[4], "No", lines[5])
         self.assertRegexpMatches(lines[9], "Final Verdict.*PASS", lines[9])
         self.assertRegexpMatches(lines[10], "count.*2", lines[10])
         self.assertRegexpMatches(lines[11], r"passrate.*50.00 \%",
                                  lines[11])
         self.assertRegexpMatches(lines[12],
                                  r"passrate excluding retries.*100.00 \%",
                                  lines[12])
     finally:
         sys.stdout = saved_stdout
Beispiel #2
0
 def test_html_report(self):
     results = ResultList()
     results.append(
         Result({
             "testcase": "test-case-A1",
             "verdict": "PASS",
             "duration": 20
         }))
     failres = Result({
         "testcase": "test-case-A4",
         "verdict": "FAIL",
         "reason": "unknown",
         "duration": 120
     })
     results.append(failres)
     html_report = ReportHtml(results)
     # pylint: disable=protected-access
     hotml = html_report._create(title='Test Results',
                                 heads={
                                     'Build': '',
                                     'Branch': ""
                                 },
                                 refresh=None)
     doc = html.document_fromstring(hotml)
     body = doc.get_element_by_id("body")
     passes = body.find_class("item_pass")
     fails = body.find_class("item_fail")
     self.assertEqual(len(passes), len(fails))
Beispiel #3
0
    def test_junit_hex_escape_support(self):
        reprstring = hex_escape_str(b'\x00\x00\x00\x00\x00\x00\x01\xc8')
        str_should_be = '<testsuite failures="1" tests="1" errors="0" skipped="0">\n\
            <testcase classname="test-case-A1" name="unknown" time="20">\n\
                <failure message="' + reprstring + '"></failure>\n\
            </testcase>\n\
        </testsuite>'

        results = ResultList()
        results.append(
            Result({
                "testcase": "test-case-A1",
                "verdict": "FAIL",
                "reason": b'\x00\x00\x00\x00\x00\x00\x01\xc8',
                "duration": 20
            }))
        junit = ReportJunit(results)
        str_report = junit.to_string()
        xml_report = ET.fromstring(str_report)
        shouldbe_report = ET.fromstring(str_should_be)
        self.assertDictEqual(xml_report.attrib, shouldbe_report.attrib)
        reported_tc = xml_report.find("testcase")
        failure_reason = reported_tc.find("failure")
        required_tc = shouldbe_report.find("testcase")
        required_reason = required_tc.find("failure")
        self.assertTrue(required_reason.attrib["message"] ==
                        failure_reason.attrib["message"])
Beispiel #4
0
    def parse_unity_result(self, dut):
        """
        Parses given DUTs stdout for Unity test results.
        :param dut: icetea DUT to inspect the results from
        """
        re_unity_test = re.compile(
            r".*\<\*\*\*UnityTest\*\*\*\>TEST\((?P<suite>.*?), (?P<test>.*?)\)\<\/\*\*\*UnityTest\*\*\*\>")  # noqa: E501 # pylint: disable=line-too-long
        re_unity_result = re.compile(
            r".*\<\*\*\*UnityResult\*\*\*\>(?P<result>.*?)\<\/\*\*\*UnityResult\*\*\*\>")  # noqa: E501 # pylint: disable=line-too-long

        test_output = ""
        test_active = False
        test_results = ResultList()
        for line in dut.traces:
            line = line.strip()

            # Activate test output logging
            match = re_unity_test.match(line)
            if match:
                test_active = True
                test_output = ""
                unity_name = match.group("test")
                unity_suite = match.group("suite")
                self.logger.info("parsing %s.%s", unity_suite, unity_name)

            # Log test output
            if test_active:
                test_output += line + "\n"

            # Check if test is over
            match = re_unity_result.match(line)
            if match and test_active:
                unity_result = match.group("result")

                # Create icetea Result()
                test_result = Result(
                    {
                        "testcase": unity_suite + "." + unity_name,
                        "stdout": test_output,
                        "reason": line if unity_result == "FAIL" else ""
                    })
                test_result.build_result_metadata({"toolchain": unity_name})
                # Would need to do runtime analysis to get duration
                test_result.set_verdict(unity_result, 0, 0.0)
                test_results.append(test_result)
                test_active = False
                self.logger.info("finished %s", unity_name)
        return test_results
Beispiel #5
0
 def test_reportconsole_one_results(self):
     saved_stdout = sys.stdout
     results = ResultList()
     results.append(Result())
     try:
         out = StringIO()
         sys.stdout = out
         report = ReportConsole(results)
         report.generate()
         output = out.getvalue().strip()
         lines = output.split("\n")
         self.assertEqual(len(lines), 15)
         self.assertRegexpMatches(lines[8], r"Final Verdict.*INCONCLUSIVE",
                                  lines[8])
         self.assertRegexpMatches(lines[9], r"count.*1", lines[9])
         self.assertRegexpMatches(lines[10], r"passrate.*0.00 \%",
                                  lines[10])
     finally:
         sys.stdout = saved_stdout
Beispiel #6
0
 def test_console_multiple_results_with_retries(self):
     saved_stdout = sys.stdout
     results = ResultList()
     res1 = Result()
     res1.set_verdict("fail", 1001, 1)
     res1.retries_left = 1
     res2 = Result()
     res2.set_verdict("pass", 0, 1)
     res2.retries_left = 0
     results.append(res1)
     results.append(res2)
     results.append(Result())
     results.append(Result())
     results.append(Result())
     try:
         out = StringIO()
         sys.stdout = out
         report = ReportConsole(results)
         report.generate()
         output = out.getvalue().strip()
         lines = output.split("\n")
         self.assertEqual(len(lines), 21)
         self.assertRegexpMatches(lines[3], "Yes", lines[4])
         self.assertRegexpMatches(lines[4], "No", lines[5])
         self.assertRegexpMatches(lines[12], "Final Verdict.*INCONCLUSIVE",
                                  lines[12])
         self.assertRegexpMatches(lines[13], "count.*5", lines[13])
         self.assertRegexpMatches(lines[14], r"passrate.*50.00 \%",
                                  lines[14])
     finally:
         sys.stdout = saved_stdout
Beispiel #7
0
    def test_junit_default(self):
        str_should_be = '<testsuite failures="0" tests="1" errors="0" skipped="0">\n\
            <testcase classname="test-case-A1" name="unknown" time="20"></testcase>\n\
        </testsuite>'

        results = ResultList()
        results.append(
            Result({
                "testcase": "test-case-A1",
                "verdict": "PASS",
                "duration": 20
            }))
        junit = ReportJunit(results)
        str_report = junit.to_string()
        xml_shouldbe = ET.fromstring(str_should_be)
        report_xml = ET.fromstring(str_report)
        self.assertDictEqual(xml_shouldbe.attrib, report_xml.attrib)
        self.assertDictEqual(
            xml_shouldbe.find("testcase").attrib,
            report_xml.find("testcase").attrib)
        self.assertEqual(len(xml_shouldbe.findall("testcase")),
                         len(report_xml.findall("testcase")))
Beispiel #8
0
    def test_reportconsole_multiple_results(self):  # pylint: disable=invalid-name
        saved_stdout = sys.stdout
        results = ResultList()
        results.append(Result())
        results.append(Result())
        results.append(Result())
        try:
            out = StringIO()
            sys.stdout = out
            report = ReportConsole(results)
            report.generate()
            output = out.getvalue().strip()
            lines = output.split("\n")
            self.assertEqual(len(lines), 17)
            self.assertRegexpMatches(lines[10], r"Final Verdict.*INCONCLUSIVE",
                                     lines[10])
            self.assertRegexpMatches(lines[11], r"count.*3", lines[11])
            self.assertRegexpMatches(lines[12], r"passrate.*0.00 \%",
                                     lines[12])

        finally:
            sys.stdout = saved_stdout
Beispiel #9
0
 def test_get_summary(self):
     expected = {
         "count": 3,
         "pass": 1,
         "fail": 1,
         "skip": 0,
         "inconclusive": 1,
         "retries": 1,
         "duration": 10
     }
     dictionary = {"retcode": 0}
     res = Result(kwargs=dictionary)
     dictionary = {"retcode": 1}
     res2 = Result(kwargs=dictionary)
     res2.set_verdict(verdict="fail", retcode=1, duration=5)
     res3 = Result()
     res3.set_verdict("inconclusive", 4, 5)
     res3.retries_left = 1
     resultlist = ResultList()
     resultlist.append(res)
     resultlist.append(res2)
     resultlist.append(res3)
     self.assertDictEqual(resultlist.get_summary(), expected)
Beispiel #10
0
    def test_append(self):
        # Test append for single Result
        rlist = ResultList()
        result1 = Result()
        rlist.append(result1)
        self.assertListEqual(rlist.data, [result1])

        # Test append for ResultList
        result2 = Result()
        rlist2 = ResultList()
        rlist2.append(result2)
        rlist.append(rlist2)
        self.assertListEqual(rlist.data, [result1, result2])

        # Test append TypeError
        with self.assertRaises(TypeError):
            rlist.append(["test"])
Beispiel #11
0
 def test_reportconsole_skip(self):
     saved_stdout = sys.stdout
     results = ResultList()
     res = Result()
     res.skip_reason = "Skip_reason"
     res.set_verdict("skip", -1, -1)
     results.append(res)
     try:
         out = StringIO()
         sys.stdout = out
         report = ReportConsole(results)
         report.generate()
         output = out.getvalue().strip()
         lines = output.split("\n")
         self.assertEqual(len(lines), 15)
         self.assertRegexpMatches(lines[3], r"skip.*Skip_reason")
         self.assertRegexpMatches(lines[8], r"Final Verdict.*PASS",
                                  lines[8])
         self.assertRegexpMatches(lines[9], r"count.*1", lines[9])
         self.assertRegexpMatches(lines[11], r"passrate.*0.00 \%",
                                  lines[10])
         self.assertRegexpMatches(lines[12], r"skip.*1", lines[10])
     finally:
         sys.stdout = saved_stdout
Beispiel #12
0
    def test_reportconsole_decodefail(self):
        saved_stdout = sys.stdout
        failing_message = "\x00\x00\x00\x00\x00\x00\x01\xc8"
        results = ResultList()
        res = Result()
        res.set_verdict("fail", 1001, 0)
        res.fail_reason = failing_message
        results.append(res)
        try:
            out = StringIO()
            sys.stdout = out
            report = ReportConsole(results)
            report.generate()
            output = out.getvalue().strip()
            lines = output.split("\n")

            self.assertEqual(len(lines), 15)
            self.assertRegexpMatches(lines[8], r"Final Verdict.*FAIL",
                                     lines[8])
            self.assertRegexpMatches(lines[9], r"count.*1", lines[9])
            self.assertRegexpMatches(lines[10], r"passrate.*0.00 \%",
                                     lines[10])
        finally:
            sys.stdout = saved_stdout
Beispiel #13
0
 def test_get_verdict(self):
     dictionary = {"retcode": 0}
     res = Result(kwargs=dictionary)
     reslist = ResultList()
     reslist.append(res)
     self.assertEquals(reslist.get_verdict(), "pass")
     dictionary = {"retcode": 1}
     res2 = Result(kwargs=dictionary)
     res2.set_verdict(verdict="fail", retcode=1, duration=10)
     reslist.append(res2)
     self.assertEquals(reslist.get_verdict(), "fail")
     res3 = Result()
     res3.set_verdict("inconclusive", 4, 1)
     reslist = ResultList()
     reslist.append(res3)
     self.assertEquals(reslist.get_verdict(), "inconclusive")
     reslist.append(res2)
     self.assertEquals(reslist.get_verdict(), "fail")
Beispiel #14
0
 def test_retries_count(self):
     dictionary = {"retcode": 0}
     res = Result(kwargs=dictionary)
     dictionary = {"retcode": 1}
     res2 = Result(kwargs=dictionary)
     res2.set_verdict(verdict="fail", retcode=1, duration=5)
     res3 = Result()
     res3.set_verdict("inconclusive", 4, 5)
     res3.retries_left = 1
     resultlist = ResultList()
     resultlist.append(res)
     resultlist.append(res2)
     resultlist.append(res3)
     self.assertEqual(resultlist.retry_count(), 1)
     res4 = Result()
     res4.set_verdict("inconclusive", 4, 5)
     resultlist.append(res4)
     self.assertEqual(resultlist.retry_count(), 1)
Beispiel #15
0
 def test_pass_rate(self):
     dictionary = {"retcode": 0}
     res = Result(kwargs=dictionary)
     res.set_verdict(verdict="pass", retcode=0, duration=0)
     dictionary = {"retcode": 1}
     res2 = Result(kwargs=dictionary)
     res2.set_verdict(verdict="fail", retcode=1, duration=5)
     res3 = Result()
     res3.set_verdict("inconclusive", 4, 5)
     res4 = Result(kwargs=dictionary)
     res4.set_verdict(verdict="skip", retcode=1, duration=5)
     resultlist = ResultList()
     resultlist.append(res)
     resultlist.append(res2)
     resultlist.append(res3)
     self.assertEquals(resultlist.pass_rate(), "50.00 %")
     self.assertEquals(resultlist.pass_rate(include_inconclusive=True),
                       "33.33 %")
     self.assertEquals(resultlist.pass_rate(include_skips=True), "50.00 %")
     resultlist.append(res4)
     self.assertEquals(
         resultlist.pass_rate(include_skips=True,
                              include_inconclusive=True), "25.00 %")
Beispiel #16
0
class Testcase(Bench):
    """
    Testcase class implementing PAL Unity test runner
    """
    def __init__(self):
        Bench.__init__(self,
                       name="pal_test_runner",
                       type="smoke",
                       requirements={
                           "duts": {
                               '*': {
                                   "count": 1,
                                   "application": {
                                       "init_cli_cmds": [],
                                       "post_cli_cmds": []
                                   }
                               }
                           }
                       })
        self.workaround_dut_lineno = 0

    def parse_unity_result(self, dut):
        """
        Parses given DUTs stdout for Unity test results.
        :param dut: icetea DUT to inspect the results from
        """
        re_unity_test = re.compile(
            r".*\<\*\*\*UnityTest\*\*\*\>TEST\((?P<suite>.*?), (?P<test>.*?)\)\<\/\*\*\*UnityTest\*\*\*\>")  # noqa: E501 # pylint: disable=line-too-long
        re_unity_result = re.compile(
            r".*\<\*\*\*UnityResult\*\*\*\>(?P<result>.*?)\<\/\*\*\*UnityResult\*\*\*\>")  # noqa: E501 # pylint: disable=line-too-long

        test_output = ""
        test_active = False
        test_results = ResultList()
        for line in dut.traces:
            line = line.strip()

            # Activate test output logging
            match = re_unity_test.match(line)
            if match:
                test_active = True
                test_output = ""
                unity_name = match.group("test")
                unity_suite = match.group("suite")
                self.logger.info("parsing %s.%s", unity_suite, unity_name)

            # Log test output
            if test_active:
                test_output += line + "\n"

            # Check if test is over
            match = re_unity_result.match(line)
            if match and test_active:
                unity_result = match.group("result")

                # Create icetea Result()
                test_result = Result(
                    {
                        "testcase": unity_suite + "." + unity_name,
                        "stdout": test_output,
                        "reason": line if unity_result == "FAIL" else ""
                    })
                test_result.build_result_metadata({"toolchain": unity_name})
                # Would need to do runtime analysis to get duration
                test_result.set_verdict(unity_result, 0, 0.0)
                test_results.append(test_result)
                test_active = False
                self.logger.info("finished %s", unity_name)
        return test_results

    def print_dut_log_to_trace(self):
        """
        Workaround to print all DUT log without all icetea logs.
        """
        for line in self.duts[0].traces[self.workaround_dut_lineno:]:
            self.logger.info(line)
            self.workaround_dut_lineno += 1

    def check_execution_errors(self):
        """
        Checks that there are no execution errors during PAL Unity tests.
        """
        errors = ["MbedOS Fault Handler", "CMSIS-RTOS error"]
        for error in errors:
            if self.verify_trace(0, error, False):
                raise TestStepError(error)

    def wait_trace(self, stop_match_str, duration):
        """
        Waits a given trace for the given time.
        :param stop_match_str: Stop waiting when this string is found
        :param duration: How long to wait for a string
        """
        # Wait tests to pass (set timeout on suite level)
        wait_start = time.time()
        while (time.time()-wait_start) < duration:
            if self.verify_trace(0, stop_match_str, False):
                return
            self.logger.info("waiting \"%s\"...%.2fs",
                             stop_match_str, duration-(time.time()-wait_start))
            self.print_dut_log_to_trace()
            self.check_execution_errors()
            time.sleep(1)
        raise TestStepTimeout(
            "Didn't get \"{}\" in {}".format(stop_match_str, duration))

    def fail_testcase(self, error):
        """
        Workaround that overwrites icetea's result value in order to
        store the test binary name in the results. Without this workaround
        the testcase name would be "pal_test_runner" which is not descriptive.
        :param error: Python error that failed the test case
        """
        # Overwrite test case result
        self._results = ResultList()
        self._results.append(
            Result({
                "testcase": (self.config["requirements"]
                             ["duts"]["*"]["application"]["bin"]),
                "reason": repr(error),
                "verdict": "FAIL"
            })
        )

    def setup(self):  # pylint: disable=method-hidden
        """
        Pre-test activities.
        """
        pass

    def case(self):
        """
        Testcase activities.
        """
        try:
            # Wait test case
            self.wait_trace("PAL_TEST_START", 60)
            self.wait_trace("PAL_TEST_END", 600)
        except (TestStepError, TestStepTimeout) as error:
            self.fail_testcase(error)
            return

        # Overwrite previously written test case result.
        #   If no errors during execution, parse errors.
        #   Don't put this into teardown() as it is executed even if
        #   the case raises TestStepFail. This then causes test to "pass"
        #   even with stack overflow on DUT
        self._results = self.parse_unity_result(self.duts[0])

    def teardown(self):  # pylint: disable=method-hidden
        """
        Post-test activities
        """
        pass
Beispiel #17
0
class TestSuite(object):
    def __init__(self, logger=None, cloud_module=None, args=None):
        self.logger = logger
        if logger is None:
            import logging
            self.logger = logging.getLogger("TestSuite")
            if not self.logger.handlers:
                self.logger.addHandler(logging.StreamHandler())
                self.logger.setLevel(logging.INFO)
        self.args = args
        self.cloud_module = cloud_module
        self._testcases = []
        self._default_configs = {}
        self.status = TestStatus.PENDING
        self._results = ResultList()
        self._create_tc_list()

    def __len__(self):
        return len(self._testcases)

    def get_testcases(self):
        """
        Return internal list of TestcaseContainers
        """
        return self._testcases

    def get_tcnames(self):
        """
        Return list of names of all test cases in this Suite.

        :return: list
        """
        return [tc.get_name() for tc in self._testcases]

    def run(self):
        """
        Test runner
        """
        self.status = TestStatus.RUNNING
        self.logger.info("Starting suite.")
        i = 0
        repeats = int(self.args.repeat) if self.args.repeat and int(
            self.args.repeat) >= 2 else 1
        repeat = 1
        self.logger.debug("Test suite repeats: %i", repeats)
        while repeat <= repeats:

            self.logger.info("Starting repeat %i of %i", repeat, repeats)
            repeat += 1
            for test in self._testcases:
                self.logger.debug("Starting next test case: %s",
                                  test.get_name())
                iterations = self.get_default_configs().get('iteration', 1)
                if iterations == 0:
                    continue
                iteration = 0
                while iteration < iterations:
                    self.logger.info("Iteration %i of %i", iteration + 1,
                                     iterations)
                    retries = self.get_default_configs().get("retryCount", 0)
                    self.logger.debug("Test case retries: %i", retries)
                    retryreason = self.get_default_configs().get(
                        "retryReason", "inconclusive")
                    iteration += 1
                    if self.args.forceflash_once:
                        self.args.forceflash = i == 0
                        self.logger.debug(
                            "Forceflash_once set: Forceflash is %s",
                            self.args.forceflash)
                        i += 1
                    try:
                        result, retries, repeat, iteration = self._run_testcase(
                            test, retries, repeat, repeats, iteration,
                            iterations, retryreason)
                    except KeyboardInterrupt:
                        self.logger.error("Test run aborted.")
                        self.status = TestStatus.FINISHED
                        return self._results
                    self._upload_results(result)
                if result and result.get_verdict() not in [
                        'pass', 'skip'
                ] and self.args.stop_on_failure:
                    break
        self.status = TestStatus.FINISHED
        i += 1
        return self._results

    def _run_testcase(self, test, retries, repeat, repeats, iteration,
                      iterations, retryreason):
        """
        Internal runner for handling a single test case run in the suite.
        Repeats and iterations are handled outside this function.

        :param test: TestcaseContainer to be run
        :param retries: Amount of retries desired
        :param repeat: Current repeat index
        :param repeats: Total amount of repeats
        :param iteration: Current iteration index
        :param iterations: Total number of iterations
        :param retryreason: suite related parameter for which test verdicts to retry.
        :return: (Result, retries(int), repeat(int), iteration(int))
        """
        result = None
        while True:
            try:
                self.logger.debug("Starting test case run.")
                result = test.run(forceflash=self.args.forceflash)
                result.retries_left = retries
            except KeyboardInterrupt:
                self.logger.info("User aborted test run")
                iteration = iterations
                repeat = repeats + 1
                raise
            if result is not None:

                # Test had required attributes and ran succesfully or was skipped.
                # Note that a fail *during* a testcase run will still be reported.
                if not isinstance(result, ResultList):
                    result.build_result_metadata(args=self.args)
                self._results.append(result)
                if self.args.stop_on_failure and result.get_verdict() not in [
                        'pass', 'skip'
                ]:
                    # Stopping run on failure,
                    self.logger.info(
                        "Test case %s failed or was inconclusive, "
                        "stopping run.\n", test.get_name())
                    repeat = repeats + 1
                    iteration = iterations + 1
                    break
                if result.get_verdict() == 'pass':
                    self.logger.info("Test case %s passed.\n", test.get_name())
                    break
                if result.get_verdict() == 'skip':
                    iteration = iterations
                    result.retries_left = 0
                    self.logger.info("Test case %s skipped.\n",
                                     test.get_name())
                    break
                elif retries > 0:
                    if retryreason == "includeFailures" or (
                            retryreason == "inconclusive"
                            and result.inconclusive):
                        self.logger.error(
                            "Testcase %s failed, %d "
                            "retries left.\n", test.get_name(), retries)
                        retries -= 1
                        self._upload_results(result)
                        continue
                    else:
                        result.retries_left = 0
                        break
                else:
                    self.logger.error(
                        "Test case %s failed, No retries left.\n",
                        test.get_name())
                    break
        return result, retries, repeat, iteration

    def _upload_results(self, result):
        """
        Upload result to cloud.

        :param result: Result object
        :return: Nothing
        """
        if self.cloud_module:
            self.logger.debug("Uploading results to DB.")
            if isinstance(result, Result):
                self._upload_result(result)
            elif isinstance(result, ResultList):
                for result_item in result:
                    self._upload_result(result_item)

    def _upload_result(self, result_object):
        if not result_object.uploaded:
            response_data = self.cloud_module.send_result(result_object)
            if response_data:
                data = response_data
                self.logger.info("Results sent to the server. ID: %s",
                                 data.get('_id'))
                result_object.uploaded = True

    def get_default_configs(self):
        """
        Get suite default configs
        """
        return self._default_configs

    def get_results(self):
        """
        Return results
        """
        return self._results

    def list_testcases(self):
        """
        List all test cases in this Suite in a neat table.

        :return: PrettyTable
        """
        testcases = []
        try:
            if self.args.json:
                self._create_json_objects(testcases)
                if self.args.export:
                    self._create_suite_file(testcases, self.args.export)
                return json.dumps(testcases)
            else:
                self._create_rows_for_table(testcases)
                from prettytable import PrettyTable
                table = PrettyTable([
                    "Index", "Name", "Status", "Type", "Subtype", "Group",
                    "Component", "Feature", "Allowed platforms"
                ])
                table.align["Index"] = "l"
                for row in testcases:
                    table.add_row(row)
                return table
        except TypeError:
            self.logger.error(
                "Error, print_list_testcases: error during iteration.")
            return

    def _create_suite_file(self, testcases, suite_name):  # pylint: disable=no-self-use
        base_json = dict()
        base_json["default"] = {}
        base_json["testcases"] = []
        for testcase in testcases:
            base_json["testcases"].append({"name": testcase["name"]})
        with open(suite_name, "w") as filehandle:
            filehandle.write(json.dumps(base_json))

    def _create_json_objects(self, testcases):
        for testcase in self._testcases:
            info = testcase.get_instance_config()
            testcases.append(info)

        return testcases

    def _create_rows_for_table(self, rows):
        index = 0
        for testcase in self._testcases:
            info = testcase.get_infodict()
            try:
                index += 1
                grp = info.get('group')
                if grp:
                    group = os.sep.join(info.get('group').split(os.sep)[1:])
                    if not group:
                        group = "no group"
                else:
                    group = "no group"
                rows.append([
                    index,
                    info.get('name'),
                    info.get('status'),
                    info.get('type'),
                    info.get('subtype'), group,
                    info.get('comp'),
                    info.get('feature'),
                    info.get("allowed_platforms")
                ])
            except KeyError:
                self.logger.error(
                    "Error, printListTestcases: Testcase list item with "
                    "index %d missing attributes.", index)

    def update_testcases(self):
        """
        Update test cases of this Suite from cloud.

        :return: Nothing
        """
        if not self.cloud_module:
            self.logger.error("Cloud module has not been initialized! "
                              "Skipping testcase update.")
            return False
        else:
            for testcase in self._testcases:
                try:
                    tc_instance = testcase.get_instance()
                    self.cloud_module.update_testcase(tc_instance.config)
                except Exception as err:  # pylint: disable=broad-except
                    self.logger.error(err)
                    self.logger.debug("Invalid TC: " + testcase.tcname)

    @staticmethod
    def get_suite_files(path):
        """
        Static method for finding all suite files in path.

        :param path: Search path
        :return: List of json files.
        """
        return_list = []
        if not isinstance(path, str):
            return return_list
        if not os.path.exists(path):
            return return_list
        for _, _, files in os.walk(path):
            for fil in sorted(files):
                _, extension = os.path.splitext(fil)
                if extension != '.json':
                    continue
                return_list.append(fil)
        return return_list

    def _create_tc_list(self):
        """
        Parses testcase metadata from suite file or from testcase list in args.
        Sets TestSuite status to 'parsed' to indicate that it has not yet been prepared.

        :raises: SuiteException
        """
        suite = None
        if self.args.suite:
            if os.path.exists(os.path.abspath(self.args.suite)):
                # If suite can be found using just the suite argument, we use that.
                suitedir, filename = os.path.split(
                    os.path.abspath(self.args.suite))
            elif os.path.exists(self.args.suitedir):
                suitedir = self.args.suitedir
                # We presume that this is just the filename, or a path relative to the suitedir.
                filename = self.args.suite
            else:
                raise SuiteException(
                    "Suite creation from file failed. Unable to determine suite "
                    "directory. Check --suitedir and --suite.")
            suite = self._load_suite_file(filename, suitedir)
            if not suite:
                raise SuiteException(
                    "Suite creation from file failed. "
                    "Check your suite file format, path and access rights.")
            self._default_configs = suite.get("default", {})
            tcnames = []
            for i, testcase in enumerate(suite.get("testcases")):
                tcnames.append(str(testcase.get("name")))
            testcases = self._get_suite_tcs(self.args.tcdir, tcnames)
            if not testcases:
                raise SuiteException(
                    "Suite creation failed: Unable to find or filter testcases."
                )
            self._testcases = testcases
            self._print_search_errors()
            if len(testcases) != len(suite.get("testcases")):
                raise SuiteException(
                    "Suite creation from file failed: "
                    "Number of requested testcases does not match "
                    "amount of found testcases.")

            for i, testcase in enumerate(suite.get("testcases")):
                suiteconfig = testcase.get("config")
                self._testcases.get(i).set_suiteconfig(suiteconfig)
        else:
            tclist = self._load_suite_list()
            if tclist is False:
                raise SuiteException("Suite creation failed.")
            self._testcases = tclist
            if self.args.tc and self.args.tc != "all":
                self._print_search_errors()
            elif self._testcases.search_errors:
                self.logger.error(
                    "Failed import the following modules during test case search:"
                )
                for item in self._testcases.search_errors:
                    self.logger.error("%s: %s", item["module"], item["error"])
        self.logger.info("Suite creation complete.")
        self._prepare_suite()

    def _print_search_errors(self):
        for testcase in self._testcases:
            if isinstance(testcase, DummyContainer):
                self.logger.error("Some test cases were not found.")
                for item in self._testcases.search_errors:
                    self.logger.error("%s: %s", item["module"], item["error"])

    def _prepare_suite(self):
        """
        Prepares parsed testcases for running.

        :raises: SyntaxError, SuiteException
        """
        for i, testcase in enumerate(self._testcases):
            try:
                self._prepare_testcase(testcase)
            except (TypeError, ImportError, ValueError) as err:
                raise SuiteException("Test case preparation failed for "
                                     "test case {}: {}".format(i, err))
            except SyntaxError:
                pass
                if self.args.list:
                    pass
                else:
                    raise
            testcase.status = TestStatus.READY
        self.logger.info("Test cases prepared.")
        self.status = TestStatus.READY

    def _get_suite_tcs(self, tcdir, testcases):
        """
        Generate a TestcaseList from a Suite.

        :param tcdir: Test case directory
        :param testcases: Names of testcases.
        :return: TestcaseList or None
        """
        if not os.path.isdir(tcdir):
            self.logger.error("Test case directory does not exist!")
            return None
        self.logger.info("Importing testcases for filtering")
        abs_tcpath = os.path.abspath(tcdir)
        sys.path.append(abs_tcpath)
        tclist = TestcaseList(logger=self.logger)
        tclist.import_from_path(abs_tcpath)
        if not tclist:
            self.logger.error(
                "Error, runSuite: "
                "Could not find any python files in given testcase dirpath")
            return None
        try:
            filt = TestcaseFilter().tc(testcases)
        except (TypeError, IndexError):
            self.logger.error(
                "Error: Failed to create testcase filter for suite.")
            return None
        self.logger.info("Filtering testcases")
        if testcases == "all":
            testcases = None
        final_tclist = tclist.filter(filt, testcases)
        if not final_tclist:
            self.logger.error(
                "Error, create_suite: "
                "Specified testcases not found in %s.", abs_tcpath)
            return None
        return final_tclist

    def _prepare_testcase(self, testcase):
        """
        Run some preparatory commands on a test case to prep it for running.

        :param testcase: TestcaseContainer
        :return: Nothing
        """
        testcase.validate_tc_instance()
        testcase.merge_tc_config(self._default_configs)
        if testcase.get_suiteconfig():
            testcase.merge_tc_config(testcase.get_suiteconfig())
        testcase.set_final_config()
        testcase.validate_tc_instance()

    def _load_suite_file(self, name, suitedir):
        """
        Load a suite file from json to dict.

        :param name: Name of suite
        :param suitedir: Path to suite
        :return: Dictionary or None
        """
        self.logger.info("Loading suite from file")
        if not isinstance(name, str):
            self.logger.error("Error, load_suite: Suite name not a string")
            return None
        filename = name if name.split('.')[-1] == 'json' else name + '.json'
        filepath = os.path.join(suitedir, filename)

        suite = None
        if not os.path.exists(filepath):
            if self.cloud_module:
                suite = self.cloud_module.get_suite(name)
            else:
                self.logger.error(
                    "Error, load_suite_file: "
                    "Suite file not found and cloud module not defined.")
                return None
            if not suite:
                self.logger.error("Error, load_suite_file: "
                                  "Suite file not found locally or in cloud.")
            return suite
        try:
            with open(filepath) as fil:
                suite = json.load(fil)
                return suite
        except IOError:
            self.logger.error(
                "Error, load_suite_file: "
                "Test suite %s cannot be read.", name)
        except ValueError:
            self.logger.error(
                "Error, load_suite_file: "
                "Could not load test suite. No JSON object could be decoded.")
        return None

    def _load_suite_list(self):
        """
        Generate a TestcaseList from command line filters.

        :return: TestcaseList or False
        """
        self.logger.info("Generating suite from command line.")
        args = self.args
        filt = TestcaseFilter()
        testcase = args.tc if args.tc else "all"
        try:
            filt = TestcaseFilter().tc(testcase)
            filt.status(args.status).group(args.group).testtype(args.testtype)
            filt.subtype(args.subtype).component(args.component).feature(
                args.feature)
            filt.platform(args.platform_filter)
        except (TypeError, IndexError):
            self.logger.exception("Filter creation failed.")
            return False

        self.logger.info("Importing testcases for filtering")
        if not os.path.isdir(args.tcdir):
            self.logger.error("Test case directory does not exist!")
            return False
        abs_tcpath = os.path.abspath(args.tcdir)
        sys.path.append(abs_tcpath)
        tclist = TestcaseList(self.logger)
        tclist.import_from_path(abs_tcpath)
        if not tclist:
            self.logger.error("Could not find any python files in given path")
            return False
        self.logger.info("Filtering testcases")
        if filt.get_filter()["list"] is not False:
            if isinstance(filt.get_filter()["list"], list):
                testcases = filt.get_filter()["list"]
            else:
                testcases = None
        else:
            testcases = None
        final_tclist = tclist.filter(filt, testcases)
        if not final_tclist:
            self.logger.error(
                "Error, create_suite: "
                "Specified testcases not found in %s.", abs_tcpath)
        return final_tclist
Beispiel #18
0
    def test_clean_inconcs(self):
        dictionary = {"retcode": 0}
        res = Result(kwargs=dictionary)
        res.set_verdict(verdict="pass", retcode=0, duration=0)
        dictionary = {"retcode": 1}
        res2 = Result(kwargs=dictionary)
        res2.set_verdict(verdict="fail", retcode=1, duration=5)
        res3 = Result()
        res3.set_verdict("inconclusive", 4, 5)
        resultlist = ResultList()
        resultlist.append(res)
        resultlist.append(res2)
        resultlist.append(res3)
        self.assertTrue(resultlist.clean_inconcs())

        dictionary = {"retcode": 0}
        res = Result(kwargs=dictionary)
        res.set_verdict(verdict="pass", retcode=0, duration=0)
        dictionary = {"retcode": 1}
        res2 = Result(kwargs=dictionary)
        res2.set_verdict(verdict="fail", retcode=1, duration=5)
        res2.retries_left = 1
        res3 = Result()
        res3.set_verdict("inconclusive", 4, 5)
        res3.retries_left = 1
        resultlist = ResultList()
        resultlist.append(res)
        resultlist.append(res2)
        resultlist.append(res3)
        res4 = Result()
        res4.set_verdict("inconclusive", 4, 5)
        resultlist.append(res4)
        self.assertTrue(resultlist.clean_inconcs())

        dictionary = {"retcode": 0}
        res = Result(kwargs=dictionary)
        res.set_verdict(verdict="pass", retcode=0, duration=0)
        dictionary = {"retcode": 1}
        res2 = Result(kwargs=dictionary)
        res2.set_verdict(verdict="inconclusive", retcode=1, duration=5)
        res2.retries_left = 1
        res3 = Result()
        res3.set_verdict("pass", 4, 5)
        res3.retries_left = 0
        resultlist = ResultList()
        resultlist.append(res)
        resultlist.append(res2)
        resultlist.append(res3)
        self.assertFalse(resultlist.clean_inconcs())
Beispiel #19
0
    def test_junit_hides(self):
        str_should_be1 = '<testsuite failures="0" tests="3" errors="0" skipped="0">\n\
            <testcase classname="test-case-A1" name="unknown" time="20"></testcase>\n\
            <testcase classname="test-case-A4" name="unknown" time="120"></testcase>\n\
            <testcase classname="test-case-A6" name="unknown" time="2"></testcase>\n\
        </testsuite>'

        results = ResultList()
        results.append(
            Result({
                "testcase": "test-case-A1",
                "verdict": "PASS",
                "duration": 20
            }))
        failres = Result({
            "testcase": "test-case-A4",
            "verdict": "FAIL",
            "reason": "unknown",
            "duration": 120
        })
        failres.retries_left = 1
        results.append(failres)
        results.append(
            Result({
                "testcase": "test-case-A4",
                "verdict": "PASS",
                "duration": 120
            }))
        incres = Result({
            "testcase": "test-case-A6",
            "verdict": "INCONCLUSIVE",
            "reason": "unknown",
            "duration": 2
        })
        incres.retries_left = 1
        results.append(incres)
        results.append(
            Result({
                "testcase": "test-case-A6",
                "verdict": "PASS",
                "duration": 2
            }))

        junit = ReportJunit(results)
        str_report = junit.to_string()
        report_xml = ET.fromstring(str_report)
        shouldbe_xml = ET.fromstring(str_should_be1)
        self.assertDictEqual(report_xml.attrib, shouldbe_xml.attrib)
        self.assertEqual(len(report_xml.findall("testcase")),
                         len(shouldbe_xml.findall("testcase")))

        str_should_be2 = '<testsuite failures="0" tests="3" errors="1" skipped="0">\n\
            <testcase classname="test-case-A1" name="unknown" time="20"></testcase>\n\
            <testcase classname="test-case-A4" name="unknown" time="12"></testcase>\n\
            <testcase classname="test-case-A6" name="unknown" time="2">\n\
                <error message="unknown"></error>\n\
            </testcase>\n\
        </testsuite>'

        results = ResultList()
        results.append(
            Result({
                "testcase": "test-case-A1",
                "verdict": "PASS",
                "duration": 20
            }))
        failres = Result({
            "testcase": "test-case-A4",
            "verdict": "FAIL",
            "reason": "unknown",
            "duration": 120
        })
        failres.retries_left = 1
        results.append(failres)
        results.append(
            Result({
                "testcase": "test-case-A4",
                "verdict": "PASS",
                "duration": 12
            }))
        results.append(
            Result({
                "testcase": "test-case-A6",
                "verdict": "INCONCLUSIVE",
                "reason": "unknown",
                "duration": 2
            }))
        junit = ReportJunit(results)
        str_report = junit.to_string()
        report_xml = ET.fromstring(str_report)
        shouldbe_xml = ET.fromstring(str_should_be2)
        self.assertDictEqual(report_xml.attrib, shouldbe_xml.attrib)
        self.assertEqual(len(report_xml.findall("testcase")),
                         len(shouldbe_xml.findall("testcase")))
        errors = []
        for elem in report_xml.findall("testcase"):
            if elem.find("error") is not None:
                errors.append(elem)
        self.assertEqual(len(errors), 1)
Beispiel #20
0
    def test_junit_multiple(self):
        str_should_be = '<testsuite failures="2" tests="7" errors="1" skipped="1">\n\
            <testcase classname="test-case-A1" name="unknown" time="20"></testcase>\n\
            <testcase classname="test-case-A2" name="unknown" time="50"></testcase>\n\
            <testcase classname="test-case-A3" name="unknown" time="120"></testcase>\n\
            <testcase classname="test-case-A4" name="unknown" time="120">\n\
                <failure message="unknown"></failure>\n\
            </testcase>\n\
            <testcase classname="test-case-A5" name="unknown" time="1">\n\
                <skipped></skipped>\n\
            </testcase>\n\
            <testcase classname="test-case-A6" name="unknown" time="2">\n\
                <error message="unknown"></error>\n\
            </testcase>\n\
            <testcase classname="test-case-A4" name="unknown" time="1220">\n\
                <failure message="WIN blue screen"></failure>\n\
            </testcase>\n\
        </testsuite>'

        shouldbe_xml = ET.fromstring(str_should_be)
        results = ResultList()
        results.append(
            Result({
                "testcase": "test-case-A1",
                "verdict": "PASS",
                "duration": 20
            }))
        results.append(
            Result({
                "testcase": "test-case-A2",
                "verdict": "PASS",
                "duration": 50
            }))
        results.append(
            Result({
                "testcase": "test-case-A3",
                "verdict": "PASS",
                "duration": 120
            }))
        results.append(
            Result({
                "testcase": "test-case-A4",
                "verdict": "FAIL",
                "reason": "unknown",
                "duration": 120
            }))
        results.append(
            Result({
                "testcase": "test-case-A5",
                "verdict": "SKIP",
                "reason": "unknown",
                "duration": 1
            }))
        results.append(
            Result({
                "testcase": "test-case-A6",
                "verdict": "INCONCLUSIVE",
                "reason": "unknown",
                "duration": 2
            }))
        results.append(
            Result({
                "testcase": "test-case-A4",
                "verdict": "FAIL",
                "reason": "WIN blue screen",
                "duration": 1220
            }))
        junit = ReportJunit(results)
        str_report = junit.to_string()
        is_xml = ET.fromstring(str_report)
        self.assertDictEqual(is_xml.attrib, {
            "failures": "2",
            "tests": "7",
            "errors": "1",
            "skipped": "1"
        })
        self.assertEqual(len(is_xml.findall("testcase")),
                         len(shouldbe_xml.findall("testcase")))
        self.assertEqual(len(is_xml.findall("failure")),
                         len(shouldbe_xml.findall("failure")))
        self.assertEqual(len(is_xml.findall("skipped")),
                         len(shouldbe_xml.findall("skipped")))
        self.assertEqual(len(is_xml.findall("error")),
                         len(shouldbe_xml.findall("error")))
Beispiel #21
0
    def test_run(self, mock_tclist):
        testsuite = TestSuite(args=self.args_tc)
        cont1 = mock.MagicMock()
        pass_result = Result()
        pass_result.set_verdict('pass', 0, 10)
        fail_result = Result()
        fail_result.set_verdict('fail', 1000, 10)
        skipped_result = Result()
        skipped_result.set_verdict('skip', 0, 1)
        resultlist = ResultList()
        resultlist.append(pass_result)
        testsuite._default_configs["retryCount"] = 1
        cont1.run.side_effect = [
            pass_result, fail_result, skipped_result, KeyboardInterrupt,
            fail_result, pass_result
        ]
        cont_reslist = mock.MagicMock()
        cont_reslist.run = mock.MagicMock()
        cont_reslist.run.return_value = resultlist
        # Passing result
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = []
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 1)
        self.assertEqual(testsuite._results[0].get_verdict(), "pass")
        self.assertTrue(self.args_tc.forceflash)

        # ResultList as result
        testsuite._testcases = []
        testsuite._testcases.append(cont_reslist)
        testsuite._results = []
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 1)
        self.assertEqual(testsuite._results[0].get_verdict(), "pass")

        # Failing result, no retry
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = []
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 1)
        self.assertEqual(testsuite._results[0].get_verdict(), "fail")

        # skipped result
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = []
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 1)
        self.assertEqual(testsuite._results[0].get_verdict(), "skip")

        # Interrupt
        cont2 = mock.MagicMock()
        cont2.run = mock.MagicMock()
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._testcases.append(cont2)
        testsuite._results = []
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 0)
        cont2.run.assert_not_called()

        # Failing result, retried
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = []
        testsuite._default_configs["retryReason"] = "includeFailures"
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 2)
        self.assertEqual(testsuite._results[0].get_verdict(), "fail")
        self.assertEqual(testsuite._results[1].get_verdict(), "pass")

        self.args_tc.repeat = 2
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = []
        cont1.run.side_effect = [
            pass_result, pass_result, pass_result, pass_result
        ]
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 2)
        self.assertEqual(testsuite._results[0].get_verdict(), "pass")
        self.assertFalse(self.args_tc.forceflash)

        # Failing result, stop_on_failure
        self.args_tc.stop_on_failure = True
        self.args_tc.repeat = 1
        testsuite._default_configs["retryCount"] = 0
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = []
        cont1.run.side_effect = [pass_result]
        cont2 = mock.MagicMock()
        cont2.run = mock.MagicMock()
        cont2.run.side_effect = [fail_result]
        cont3 = mock.MagicMock()
        cont3.run = mock.MagicMock()
        cont3.run.side_effect = [pass_result]
        testsuite._testcases.append(cont2)
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 2)
        self.assertEqual(testsuite._results[0].get_verdict(), "pass")
        self.assertEqual(testsuite._results[1].get_verdict(), "fail")

        # Skipped result, stop_on_failure
        self.args_tc.stop_on_failure = True
        self.args_tc.repeat = 0
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = []
        cont1.run.side_effect = [skipped_result]
        cont2 = mock.MagicMock()
        cont2.run = mock.MagicMock()
        cont2.run.side_effect = [pass_result]
        testsuite._testcases.append(cont2)
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 2)
        self.assertEqual(testsuite._results[0].get_verdict(), "skip")
        self.assertEqual(testsuite._results[1].get_verdict(), "pass")
Beispiel #22
0
    def test_run(self, mock_tclist):  # pylint: disable=too-many-statements
        testsuite = TestSuite(args=self.args_tc)
        cont1 = mock.MagicMock()
        pass_result = Result()
        pass_result.set_verdict('pass', 0, 10)
        fail_result = Result()
        fail_result.set_verdict('fail', 1000, 10)
        skipped_result = Result()
        skipped_result.set_verdict('skip', 0, 1)
        resultlist = ResultList()
        resultlist.append(pass_result)
        resultlist.save = mock.MagicMock()
        testsuite._default_configs["retryCount"] = 1
        cont1.run.side_effect = [pass_result,
                                 fail_result,
                                 skipped_result,
                                 KeyboardInterrupt,
                                 fail_result,
                                 pass_result]
        cont_reslist = mock.MagicMock()
        cont_reslist.run = mock.MagicMock()
        cont_reslist.run.return_value = resultlist
        # Passing result
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 1)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "pass")
        self.assertTrue(self.args_tc.forceflash)  # pylint: disable=no-member
        self.assertEquals(testsuite._results.save.call_count, 1)

        # ResultList as result
        testsuite._testcases = []
        testsuite._testcases.append(cont_reslist)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 1)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "pass")

        # Failing result, no retry
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 1)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "fail")

        # skipped result
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 1)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "skip")

        # Interrupt
        cont2 = mock.MagicMock()
        cont2.run = mock.MagicMock()
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._testcases.append(cont2)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 0)
        cont2.run.assert_not_called()

        # Failing result, retried
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        testsuite._default_configs["retryReason"] = "includeFailures"
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 2)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "fail")
        self.assertEqual(testsuite._results.get(1).get_verdict(), "pass")

        # TC not exist result, retried
        testsuite._testcases = []
        contx = mock.MagicMock()
        inconc_res = Result()
        inconc_res.set_verdict("inconclusive", 1015, 0)
        contx.run = mock.MagicMock(return_value=inconc_res)
        testsuite._testcases.append(contx)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        testsuite._default_configs["retryReason"] = "includeFailures"
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 1)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "inconclusive")

        # Failing result, retried, from a result list.
        testsuite._testcases = []
        fail_result_2 = Result()
        fail_result_2.set_verdict('fail', 1000, 10)
        reslist = ResultList()
        reslist.append(fail_result_2)
        cont_retry = mock.MagicMock()
        cont_retry.run.side_effect = [reslist, resultlist]
        testsuite._testcases.append(cont_retry)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        testsuite._default_configs["retryReason"] = "includeFailures"
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 2)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "fail")
        self.assertEqual(testsuite._results.get(1).get_verdict(), "pass")
        self.assertEqual(testsuite._results.get(0).retries_left, 1)
        self.assertEqual(testsuite._results.get(1).retries_left, 0)

        # Inconclusive result, retried, from a result list.
        testsuite._testcases = []
        fail_result_3 = Result()
        fail_result_3.set_verdict('inconclusive', 1000, 10)
        reslist = ResultList()
        reslist.append(fail_result_3)
        cont_retry = mock.MagicMock()
        cont_retry.run.side_effect = [reslist, resultlist]
        testsuite._testcases.append(cont_retry)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        testsuite._default_configs["retryReason"] = "includeFailures"
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 2)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "inconclusive")
        self.assertEqual(testsuite._results.get(1).get_verdict(), "pass")
        self.assertEqual(testsuite._results.get(0).retries_left, 1)
        self.assertEqual(testsuite._results.get(1).retries_left, 0)

        self.args_tc.repeat = 2
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        cont1.run.side_effect = [pass_result, pass_result, pass_result, pass_result]
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 2)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "pass")
        self.assertFalse(self.args_tc.forceflash)  # pylint: disable=no-member

        # Failing result, stop_on_failure
        self.args_tc.stop_on_failure = True
        self.args_tc.repeat = 1
        testsuite._default_configs["retryCount"] = 0
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        cont1.run.side_effect = [pass_result]
        cont2 = mock.MagicMock()
        cont2.run = mock.MagicMock()
        cont2.run.side_effect = [fail_result]
        cont3 = mock.MagicMock()
        cont3.run = mock.MagicMock()
        cont3.run.side_effect = [pass_result]
        testsuite._testcases.append(cont2)
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 2)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "pass")
        self.assertEqual(testsuite._results.get(1).get_verdict(), "fail")
        self.assertEquals(testsuite._results.save.call_count, 2)

        # Skipped result, stop_on_failure
        self.args_tc.stop_on_failure = True
        self.args_tc.repeat = 0
        testsuite._testcases = []
        testsuite._testcases.append(cont1)
        testsuite._results = ResultList()
        testsuite._results.save = mock.MagicMock()
        cont1.run.side_effect = [skipped_result]
        cont2 = mock.MagicMock()
        cont2.run = mock.MagicMock()
        cont2.run.side_effect = [pass_result]
        testsuite._testcases.append(cont2)
        testsuite.run()
        self.assertEqual(testsuite.status, TestStatus.FINISHED)
        self.assertEqual(len(testsuite._results), 2)
        self.assertEqual(testsuite._results.get(0).get_verdict(), "skip")
        self.assertEqual(testsuite._results.get(1).get_verdict(), "pass")
        self.assertEquals(testsuite._results.save.call_count, 2)
Beispiel #23
0
class Results(object):
    """
    ResultMixer manage test results and verdicts.
    It provide public API get_result() for TestManagement.
    """
    def __init__(self, logger, resources, configuration, args, **kwargs):
        super(Results, self).__init__(**kwargs)
        self._result_list = ResultList()
        self._retcode = ReturnCodes.RETCODE_SUCCESS
        self._failreason = ''
        self._logger = logger
        self._configuration = configuration
        self._args = args
        self._resources = resources

    def init(self, logger=None):
        """
        Initialize the internal ResultList.
        :return: Nothing
        """
        if logger:
            self._logger = logger
        self._result_list = ResultList()

    @staticmethod
    def create_new_result(verdict, retcode, duration, input_data):
        """
        Create a new Result object with data in function arguments.

        :param verdict: Verdict as string
        :param retcode: Return code as int
        :param duration: Duration as time
        :param input_data: Input data as dictionary
        :return: Result
        """
        new_result = Result(input_data)
        new_result.set_verdict(verdict, retcode, duration)
        return new_result

    def add_new_result(self, verdict, retcode, duration, input_data):
        """
        Add a new Result to result object to the internal ResultList.

        :param verdict: Verdict as string
        :param retcode: Return code as int
        :param duration: Duration as time
        :param input_data: Input data as dict
        :return: Result
        """
        new_result = Results.create_new_result(verdict, retcode, duration, input_data)
        self._result_list.append(new_result)
        return new_result

    @property
    def retcode(self):
        """
        Getter for return code.

        :return: int
        """
        return self._retcode

    @retcode.setter
    def retcode(self, value):
        """
        Setter for retcode.

        :param value: int
        :return: Nothing
        """
        self._retcode = value

    def set_failure(self, retcode, reason):
        """
        Set internal state to reflect failure of test.

        :param retcode: return code
        :param reason: failure reason as string
        :return: Nothing
        """
        if self._resources.resource_provider:
            if hasattr(self._resources.resource_provider, "allocator"):
                # Check for backwards compatibility with older pyclient versions.
                if hasattr(self._resources.resource_provider.allocator, "get_status"):
                    pr_reason = self._resources.resource_provider.allocator.get_status()
                    if pr_reason:
                        reason = "{}. Other error: {}".format(pr_reason, reason)
                        retcode = ReturnCodes.RETCODE_FAIL_DUT_CONNECTION_FAIL

        if self.retcode is None or self.retcode == ReturnCodes.RETCODE_SUCCESS:
            self.retcode = retcode
            self._failreason = reason
            self._logger.error("Test Case fails because of: %s", reason)
        else:
            self._logger.info("another fail reasons: %s", reason)

    def get_results(self):
        """
        Getter for internal _results variable.
        """
        return self._result_list

    def get_result(self, tc_file=None):
        """
        Generate a Result object from this test case.

        :param tc_file: Location of test case file
        :return: Result
        """
        self.append_result(tc_file)
        return self._result_list.data[0]

    def set_results(self, value):
        """
        Setter for _result_list.

        :param value: ResultList
        :return: Nothing
        """
        self._result_list = value

    def append_result(self, tc_file=None):
        """
        Append a new fully constructed Result to the internal ResultList.

        :param tc_file: Test case file path
        :return: Nothing
        """
        result = Result()

        result.set_tc_metadata(self._configuration.config)
        tc_rev = get_git_info(self._configuration.get_tc_abspath(tc_file),
                              verbose=self._args.verbose)
        if self._logger:
            self._logger.debug(tc_rev)
        result.set_tc_git_info(tc_rev)
        result.component = self._configuration.get_test_component()
        result.feature = self._configuration.get_features_under_test()
        result.skip_reason = self._configuration.skip_reason() if self._configuration.skip() else ''
        result.fail_reason = self._failreason
        result.logpath = os.path.abspath(LogManager.get_base_dir())
        result.logfiles = LogManager.get_logfiles()
        result.retcode = self.retcode
        result.set_dutinformation(self._resources.dutinformations)
        # pylint: disable=unused-variable
        for platform, serialnumber in zip(self._resources.get_platforms(),
                                          self._resources.get_serialnumbers()):
            #  Zipping done to keep platforms and serial numbers aligned in case some sn:s are
            #  missing
            result.dut_vendor.append('')
            result.dut_resource_id.append(serialnumber)
        result.dut_count = self._resources.get_dut_count()
        result.duts = self._resources.resource_configuration.get_dut_configuration()
        if self._resources.resource_configuration.count_hardware() > 0:
            result.dut_type = 'hw'
        elif self._resources.resource_configuration.count_process() > 0:
            result.dut_type = 'process'
        else:
            result.dut_type = None

        self._result_list.append(result)