Example #1
0
    def __init__(self, job=None):
        """
        Creates an instance of ResultJournal.

        :param job: an instance of :class:`avocado.core.job.Job`.
        """
        Result.__init__(self, job)
        self.journal_initialized = False
Example #2
0
class xUnitSucceedTest(unittest.TestCase):

    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        args = argparse.Namespace(base_logdir=self.tmpdir)
        args.xunit_output = self.tmpfile[1]
        self.job = job.Job(args)
        self.test_result = Result(FakeJob(args))
        self.test_result.tests_total = 1
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
        self.test1._Test__status = 'PASS'
        self.test1.time_elapsed = 1.23
        junit_xsd = os.path.join(os.path.dirname(__file__),
                                 os.path.pardir, ".data", 'junit-4.xsd')
        self.junit = os.path.abspath(junit_xsd)

    def tearDown(self):
        errs = []
        cleanups = (lambda: os.close(self.tmpfile[0]),
                    lambda: os.remove(self.tmpfile[1]),
                    lambda: shutil.rmtree(self.tmpdir))
        for cleanup in cleanups:
            try:
                cleanup()
            except Exception as exc:
                errs.append(str(exc))
        self.assertFalse(errs, "Failures occurred during cleanup:\n%s"
                         % "\n".join(errs))

    @unittest.skipUnless(SCHEMA_CAPABLE,
                         'Unable to validate schema due to missing lxml.etree library')
    def test_add_success(self):
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        xunit_result = xunit.XUnitResult()
        xunit_result.render(self.test_result, self.job)
        with open(self.job.args.xunit_output, 'rb') as fp:
            xml = fp.read()
        try:
            dom = minidom.parseString(xml)
        except Exception as details:
            raise ParseXMLError("Error parsing XML: '%s'.\nXML Contents:\n%s" % (details, xml))
        self.assertTrue(dom)
        els = dom.getElementsByTagName('testcase')
        self.assertEqual(len(els), 1)

        with open(self.junit, 'r') as f:
            xmlschema = etree.XMLSchema(etree.parse(f))   # pylint: disable=I1101
        # pylint: disable=I1101
        self.assertTrue(xmlschema.validate(etree.parse(BytesIO(xml))),
                        "Failed to validate against %s, content:\n%s" %
                        (self.junit, xml))
Example #3
0
class xUnitSucceedTest(unittest.TestCase):

    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        args = argparse.Namespace()
        args.xunit_output = self.tmpfile[1]
        self.job = job.Job(args)
        self.test_result = Result(FakeJob(args))
        self.test_result.tests_total = 1
        self.test_result.start_tests()
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
        self.test1.status = 'PASS'
        self.test1.time_elapsed = 1.23
        unittests_path = os.path.dirname(os.path.abspath(__file__))
        self.junit_schema_path = os.path.join(unittests_path, 'junit-4.xsd')

    def tearDown(self):
        os.close(self.tmpfile[0])
        os.remove(self.tmpfile[1])
        shutil.rmtree(self.tmpdir)

    def testAddSuccess(self):
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        xunit_result = xunit.XUnitResult()
        xunit_result.render(self.test_result, self.job)
        with open(self.job.args.xunit_output) as fp:
            xml = fp.read()
        try:
            dom = minidom.parseString(xml)
        except Exception as details:
            raise ParseXMLError("Error parsing XML: '%s'.\nXML Contents:\n%s" % (details, xml))
        self.assertTrue(dom)
        els = dom.getElementsByTagName('testcase')
        self.assertEqual(len(els), 1)

        with open(self.junit_schema_path, 'r') as f:
            xmlschema = etree.XMLSchema(etree.parse(f))
        self.assertTrue(xmlschema.validate(etree.parse(StringIO(xml))),
                        "Failed to validate against %s, content:\n%s" %
                        (self.junit_schema_path, xml))
Example #4
0
    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        args = argparse.Namespace(json_output=self.tmpfile[1])
        self.job = job.Job(args)
        self.test_result = Result(FakeJob(args))
        self.test_result.filename = self.tmpfile[1]
        self.test_result.start_tests()
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
        self.test1.status = 'PASS'
        self.test1.time_elapsed = 1.23
Example #5
0
    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        args = argparse.Namespace(base_logdir=self.tmpdir)
        args.xunit_output = self.tmpfile[1]
        self.job = job.Job(args)
        self.test_result = Result(FakeJob(args))
        self.test_result.tests_total = 1
        self.test_result.logfile = ("/.../avocado/job-results/"
                                    "job-2018-11-28T16.27-8fef221/job.log")
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
        self.test1._Test__status = 'PASS'
        self.test1.time_elapsed = 678.23689
Example #6
0
    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        args = argparse.Namespace(logdir=self.tmpdir)
        args.xunit_output = self.tmpfile[1]
        self.job = job.Job(args)
        self.test_result = Result(FakeJob(args))
        self.test_result.tests_total = 1
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
        self.test1._Test__status = 'PASS'
        self.test1.time_elapsed = 1.23
        self.junit = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                     os.path.pardir, ".data", 'junit-4.xsd'))
Example #7
0
    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        args = argparse.Namespace()
        args.xunit_output = self.tmpfile[1]
        self.job = job.Job(args)
        self.test_result = Result(FakeJob(args))
        self.test_result.start_tests()
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
        self.test1.status = 'PASS'
        self.test1.time_elapsed = 1.23
        unittests_path = os.path.dirname(os.path.abspath(__file__))
        self.junit_schema_path = os.path.join(unittests_path, 'junit-4.xsd')
Example #8
0
 def end_test(self, state):
     self.lazy_init_journal(state)
     Result.end_test(self, state)
     self._record_status(state, "ENDED")
Example #9
0
 def test_result_rate_half_succeeded(self):
     result = Result(FakeJob([]))
     result.check_test({'status': 'PASS'})
     result.check_test({'status': 'FAIL'})
     result.end_tests()
     self.assertEquals(result.rate, 50.0)
Example #10
0
 def test_result_rate_all_succeeded_with_cancelled(self):
     result = Result(FakeJob([]))
     result.check_test({'status': 'PASS'})
     result.check_test({'status': 'CANCEL'})
     result.end_tests()
     self.assertEquals(result.rate, 100.0)
Example #11
0
 def test_result_rate_all_succeeded_with_skips(self):
     result = Result(FakeJob([]))
     result.check_test({'status': 'PASS'})
     result.check_test({'status': 'SKIP'})
     result.end_tests()
     self.assertEquals(result.rate, 100.0)
Example #12
0
class JSONResultTest(unittest.TestCase):

    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        prefix = temp_dir_prefix(__name__, self, 'setUp')
        self.tmpdir = tempfile.TemporaryDirectory(prefix=prefix)
        config = {'json_output': self.tmpfile[1],
                  'base_logdir': self.tmpdir.name}
        self.job = job.Job(config)
        self.test_result = Result(UNIQUE_ID, LOGFILE)
        self.test_result.filename = self.tmpfile[1]
        self.test_result.tests_total = 1
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir.name)
        self.test1._Test__status = 'PASS'
        self.test1.time_elapsed = 1.23

    def tearDown(self):
        os.close(self.tmpfile[0])
        os.remove(self.tmpfile[1])
        self.tmpdir.cleanup()

    def test_add_success(self):
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        with open(self.job.config.get('json_output')) as fp:
            j = fp.read()
        obj = json.loads(j)
        self.assertTrue(obj)
        self.assertEqual(len(obj['tests']), 1)

    def test_add_several_statuses(self):
        def run_fake_status(status):
            self.test_result.start_test(self.test1)
            self.test_result.check_test(status)

        def check_item(name, value, exp):
            self.assertEqual(value, exp, "Result%s is %s and not %s\n%s"
                             % (name, value, exp, res))

        # Set the number of tests to all tests + 3
        self.test_result.tests_total = 13
        # Full PASS status
        self.test_result.start_test(self.test1)
        self.test_result.check_test(self.test1.get_state())
        # Only status - valid statuses
        run_fake_status({"status": "PASS"})
        run_fake_status({"status": "SKIP"})
        run_fake_status({"status": "FAIL"})
        run_fake_status({"status": "ERROR"})
        run_fake_status({"status": "WARN"})
        run_fake_status({"status": "INTERRUPTED"})
        # Only status - invalid statuses
        run_fake_status({"status": "INVALID"})
        run_fake_status({"status": None})
        run_fake_status({"status": ""})
        # Postprocess
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        res = json.loads(open(self.job.config.get('json_output')).read())
        check_item("[pass]", res["pass"], 2)
        check_item("[errors]", res["errors"], 4)
        check_item("[failures]", res["failures"], 1)
        check_item("[skip]", res["skip"], 4)
        check_item("[total]", res["total"], 13)

    def test_negative_status(self):
        def check_item(name, value, exp):
            self.assertEqual(value, exp, "Result%s is %s and not %s\n%s"
                             % (name, value, exp, res))

        self.test_result.tests_total = 0
        self.test_result.start_test(self.test1)
        self.test_result.check_test(self.test1.get_state())
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        res = json.loads(open(self.job.config.get('json_output')).read())
        check_item("[total]", res["total"], 1)
        check_item("[skip]", res["skip"], 0)
        check_item("[pass]", res["pass"], 1)
Example #13
0
class JSONResultTest(TestCaseTmpDir):
    def setUp(self):
        super(JSONResultTest, self).setUp()

        class SimpleTest(Test):
            def test(self):
                pass

        json_output_path = os.path.join(self.tmpdir.name, 'results.json')
        config = {
            'run.results_dir': self.tmpdir.name,
            'job.run.result.json.output': json_output_path
        }
        self.job = job.Job(config)
        self.job.setup()
        self.test_result = Result(UNIQUE_ID, LOGFILE)
        self.test_result.filename = json_output_path
        self.test_result.tests_total = 1
        self.test1 = SimpleTest(config=self.job.config,
                                base_logdir=self.tmpdir.name)
        self.test1._Test__status = 'PASS'
        self.test1.time_elapsed = 1.23

    def test_add_success(self):
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        with open(self.job.config.get('job.run.result.json.output')) as fp:
            j = fp.read()
        obj = json.loads(j)
        self.assertTrue(obj)
        self.assertEqual(len(obj['tests']), 1)

    def test_add_several_statuses(self):
        def run_fake_status(status):
            self.test_result.start_test(self.test1)
            self.test_result.check_test(status)

        def check_item(name, value, exp):
            self.assertEqual(
                value, exp,
                "Result%s is %s and not %s\n%s" % (name, value, exp, res))

        # Set the number of tests to all tests + 3
        self.test_result.tests_total = 13
        # Full PASS status
        self.test_result.start_test(self.test1)
        self.test_result.check_test(self.test1.get_state())
        # Only status - valid statuses
        run_fake_status({"status": "PASS"})
        run_fake_status({"status": "SKIP"})
        run_fake_status({"status": "FAIL"})
        run_fake_status({"status": "ERROR"})
        run_fake_status({"status": "WARN"})
        run_fake_status({"status": "INTERRUPTED"})
        # Only status - invalid statuses
        run_fake_status({"status": "INVALID"})
        run_fake_status({"status": None})
        run_fake_status({"status": ""})
        # Postprocess
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        output = self.job.config.get('job.run.result.json.output')
        with open(output) as json_output:
            res = json.loads(json_output.read())
        check_item("[pass]", res["pass"], 2)
        check_item("[errors]", res["errors"], 4)
        check_item("[failures]", res["failures"], 1)
        check_item("[skip]", res["skip"], 4)
        check_item("[total]", res["total"], 13)

    def test_negative_status(self):
        def check_item(name, value, exp):
            self.assertEqual(
                value, exp,
                "Result%s is %s and not %s\n%s" % (name, value, exp, res))

        self.test_result.tests_total = 0
        self.test_result.start_test(self.test1)
        self.test_result.check_test(self.test1.get_state())
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        output = self.job.config.get('job.run.result.json.output')
        with open(output) as json_output:
            res = json.loads(json_output.read())
        check_item("[total]", res["total"], 1)
        check_item("[skip]", res["skip"], 0)
        check_item("[pass]", res["pass"], 1)

    def tearDown(self):
        self.job.cleanup()
        super(JSONResultTest, self).tearDown()
Example #14
0
 def test_result_rate_all_succeeded_with_cancelled(self):
     result = Result(FakeJob([]))
     result.check_test({'status': 'PASS'})
     result.check_test({'status': 'CANCEL'})
     result.end_tests()
     self.assertEquals(result.rate, 100.0)
Example #15
0
 def start_test(self, state):
     self.lazy_init_journal(state)
     Result.start_test(self, state)
     self._record_status(state, "STARTED")
Example #16
0
 def test_result_rate_none_succeeded(self):
     result = Result(UNIQUE_ID, LOGFILE)
     result.check_test({'status': 'FAIL'})
     result.end_tests()
     self.assertEqual(result.rate, 0.0)
Example #17
0
 def test_result_rate_all_succeeded_with_cancelled(self):
     result = Result(UNIQUE_ID, LOGFILE)
     result.check_test({'status': 'PASS'})
     result.check_test({'status': 'CANCEL'})
     result.end_tests()
     self.assertEqual(result.rate, 100.0)
Example #18
0
 def test_result_no_job_logfile(self):
     with self.assertRaises(TypeError):
         Result(UNIQUE_ID)
Example #19
0
class xUnitSucceedTest(unittest.TestCase):
    def setUp(self):
        class SimpleTest(Test):
            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        args = argparse.Namespace(base_logdir=self.tmpdir)
        args.xunit_output = self.tmpfile[1]
        self.job = job.Job(args)
        self.test_result = Result(FakeJob(args))
        self.test_result.tests_total = 1
        self.test_result.logfile = ("/.../avocado/job-results/"
                                    "job-2018-11-28T16.27-8fef221/job.log")
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
        self.test1._Test__status = 'PASS'
        self.test1.time_elapsed = 1.23

    def tearDown(self):
        errs = []
        cleanups = (lambda: os.close(self.tmpfile[0]),
                    lambda: os.remove(self.tmpfile[1]),
                    lambda: shutil.rmtree(self.tmpdir))
        for cleanup in cleanups:
            try:
                cleanup()
            except Exception as exc:
                errs.append(str(exc))
        self.assertFalse(
            errs, "Failures occurred during cleanup:\n%s" % "\n".join(errs))

    @unittest.skipUnless(
        SCHEMA_CAPABLE,
        'Unable to validate schema due to missing lxml.etree library')
    def test_add_success(self):
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        xunit_result = xunit.XUnitResult()
        xunit_result.render(self.test_result, self.job)
        with open(self.job.args.xunit_output, 'rb') as fp:
            xml = fp.read()
        try:
            dom = minidom.parseString(xml)
        except Exception as details:
            raise ParseXMLError("Error parsing XML: '%s'.\nXML Contents:\n%s" %
                                (details, xml))
        self.assertTrue(dom)
        els = dom.getElementsByTagName('testcase')
        self.assertEqual(len(els), 1)

        junit_xsd = os.path.abspath(
            os.path.join(os.path.dirname(__file__), os.path.pardir, ".data",
                         'jenkins-junit.xsd'))
        with open(junit_xsd, 'r') as f:
            xmlschema = etree.XMLSchema(etree.parse(f))  # pylint: disable=I1101
        # pylint: disable=I1101
        self.assertTrue(
            xmlschema.validate(etree.parse(BytesIO(xml))),
            "Failed to validate against %s, content:\n%s\nerror log:\n%s" %
            (junit_xsd, xml, xmlschema.error_log))

    def test_max_test_log_size(self):
        log = tempfile.NamedTemporaryFile(dir=self.tmpdir, delete=False)
        log_content = b"1234567890" * 100
        log_content += b"this should not be present" + b"0987654321" * 100
        log.write(log_content)
        log_path = log.name
        log.close()
        self.test1._Test__status = "ERROR"
        self.test1._Test__logfile = log_path
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        xunit_result = xunit.XUnitResult()
        xunit_result.render(self.test_result, self.job)
        with open(self.job.args.xunit_output, 'rb') as fp:
            unlimited = fp.read()
        self.job.args.xunit_max_test_log_chars = 10
        xunit_result.render(self.test_result, self.job)
        with open(self.job.args.xunit_output, 'rb') as fp:
            limited = fp.read()
        self.assertLess(
            len(limited),
            len(unlimited) - 500,
            "Length of xunit limitted to 10 chars was greater "
            "than (unlimited - 500). Unlimited output:\n%s\n\n"
            "Limited output:\n%s" % (unlimited, limited))
        self.assertIn(b"this should not be present", unlimited)
        self.assertNotIn(b"this should not be present", limited)
        self.assertIn(b"1234567890", unlimited)
        self.assertNotIn(b"1234567890", limited)
        self.assertIn(b"12345", limited)
        self.assertIn(b"0987654321", unlimited)
        self.assertNotIn(b"0987654321", limited)
        self.assertIn(b"54321", limited)
Example #20
0
class xUnitSucceedTest(unittest.TestCase):

    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        args = argparse.Namespace(base_logdir=self.tmpdir)
        args.xunit_output = self.tmpfile[1]
        self.job = job.Job(args)
        self.test_result = Result(FakeJob(args))
        self.test_result.tests_total = 1
        self.test_result.logfile = ("/.../avocado/job-results/"
                                    "job-2018-11-28T16.27-8fef221/job.log")
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
        self.test1._Test__status = 'PASS'
        self.test1.time_elapsed = 678.23689

    def tearDown(self):
        errs = []
        cleanups = (lambda: os.close(self.tmpfile[0]),
                    lambda: os.remove(self.tmpfile[1]),
                    lambda: shutil.rmtree(self.tmpdir))
        for cleanup in cleanups:
            try:
                cleanup()
            except Exception as exc:
                errs.append(str(exc))
        self.assertFalse(errs, "Failures occurred during cleanup:\n%s"
                         % "\n".join(errs))

    @unittest.skipUnless(SCHEMA_CAPABLE,
                         'Unable to validate schema due to missing lxml.etree library')
    def test_add_success(self):
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        xunit_result = xunit.XUnitResult()
        xunit_result.render(self.test_result, self.job)
        with open(self.job.args.xunit_output, 'rb') as fp:
            xml = fp.read()
        try:
            dom = minidom.parseString(xml)
        except Exception as details:
            raise ParseXMLError("Error parsing XML: '%s'.\nXML Contents:\n%s" % (details, xml))
        self.assertTrue(dom)

        els = dom.getElementsByTagName('testsuite')
        self.assertEqual(len(els), 1)
        self.assertEqual(els[0].attributes['time'].value, '678.237')

        els = dom.getElementsByTagName('testcase')
        self.assertEqual(len(els), 1)
        self.assertEqual(els[0].attributes['time'].value, '678.237')

        junit_xsd = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                                 os.path.pardir, ".data",
                                                 'jenkins-junit.xsd'))
        with open(junit_xsd, 'r') as f:
            xmlschema = etree.XMLSchema(etree.parse(f))   # pylint: disable=I1101
        # pylint: disable=I1101
        self.assertTrue(xmlschema.validate(etree.parse(BytesIO(xml))),
                        "Failed to validate against %s, content:\n%s\nerror log:\n%s" %
                        (junit_xsd, xml, xmlschema.error_log))

    def test_max_test_log_size(self):
        def get_system_out(out):
            return out[out.find(b"<system-out>"):out.find(b"<system-out/>")]
        log = tempfile.NamedTemporaryFile(dir=self.tmpdir, delete=False)
        log_content = b"1234567890" * 100
        log_content += b"this should not be present" + b"0987654321" * 100
        log.write(log_content)
        log_path = log.name
        log.close()
        self.test1._Test__status = "ERROR"
        self.test1._Test__logfile = log_path
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        xunit_result = xunit.XUnitResult()
        xunit_result.render(self.test_result, self.job)
        with open(self.job.args.xunit_output, 'rb') as fp:
            unlimited = fp.read()
        self.job.args.xunit_max_test_log_chars = 10
        xunit_result.render(self.test_result, self.job)
        with open(self.job.args.xunit_output, 'rb') as fp:
            limited = fp.read()
        self.job.args.xunit_max_test_log_chars = 100000
        xunit_result.render(self.test_result, self.job)
        with open(self.job.args.xunit_output, 'rb') as fp:
            limited_but_fits = fp.read()
        self.assertLess(len(limited), len(unlimited) - 500,
                        "Length of xunit limitted to 10 chars was greater "
                        "than (unlimited - 500). Unlimited output:\n%s\n\n"
                        "Limited output:\n%s" % (unlimited, limited))
        unlimited_output = get_system_out(unlimited)
        self.assertIn(log_content, unlimited_output)
        self.assertEqual(unlimited_output, get_system_out(limited_but_fits))
        self.assertIn(b"this should not be present", unlimited)
        self.assertNotIn(b"this should not be present", limited)
        self.assertIn(b"1234567890", unlimited)
        self.assertNotIn(b"1234567890", limited)
        self.assertIn(b"12345", limited)
        self.assertIn(b"0987654321", unlimited)
        self.assertNotIn(b"0987654321", limited)
        self.assertIn(b"54321", limited)
Example #21
0
 def test_result_no_job_id(self):
     with self.assertRaises(TypeError):
         Result()
class JSONResultTest(unittest.TestCase):

    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        args = argparse.Namespace(json_output=self.tmpfile[1])
        self.job = job.Job(args)
        self.test_result = Result(FakeJob(args))
        self.test_result.filename = self.tmpfile[1]
        self.test_result.tests_total = 1
        self.test_result.start_tests()
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
        self.test1.status = 'PASS'
        self.test1.time_elapsed = 1.23

    def tearDown(self):
        os.close(self.tmpfile[0])
        os.remove(self.tmpfile[1])
        shutil.rmtree(self.tmpdir)

    def testAddSuccess(self):
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        with open(self.job.args.json_output) as fp:
            j = fp.read()
        obj = json.loads(j)
        self.assertTrue(obj)
        self.assertEqual(len(obj['tests']), 1)

    def testAddSeveralStatuses(self):
        def run_fake_status(status):
            self.test_result.start_test(self.test1)
            self.test_result.check_test(status)

        def check_item(name, value, exp):
            self.assertEqual(value, exp, "Result%s is %s and not %s\n%s"
                             % (name, value, exp, res))

        # Set the number of tests to all tests + 3
        self.test_result.tests_total = 13
        # Full PASS status
        self.test_result.start_test(self.test1)
        self.test_result.check_test(self.test1.get_state())
        # Only status - valid statuses
        run_fake_status({"status": "PASS"})
        run_fake_status({"status": "SKIP"})
        run_fake_status({"status": "FAIL"})
        run_fake_status({"status": "ERROR"})
        run_fake_status({"status": "WARN"})
        run_fake_status({"status": "INTERRUPTED"})
        # Only status - invalid statuses
        run_fake_status({"status": "INVALID"})
        run_fake_status({"status": None})
        run_fake_status({"status": ""})
        # Postprocess
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        res = json.loads(open(self.job.args.json_output).read())
        check_item("[pass]", res["pass"], 2)
        check_item("[errors]", res["errors"], 4)
        check_item("[failures]", res["failures"], 1)
        check_item("[skip]", res["skip"], 4)
        check_item("[total]", res["total"], 13)

    def testNegativeStatus(self):
        def check_item(name, value, exp):
            self.assertEqual(value, exp, "Result%s is %s and not %s\n%s"
                             % (name, value, exp, res))

        self.test_result.tests_total = 0
        self.test_result.start_test(self.test1)
        self.test_result.check_test(self.test1.get_state())
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        res = json.loads(open(self.job.args.json_output).read())
        check_item("[total]", res["total"], 1)
        check_item("[skip]", res["skip"], 0)
        check_item("[pass]", res["pass"], 1)
Example #23
0
 def test_result_rate_all_succeeded_with_skips(self):
     result = Result(FakeJob([]))
     result.check_test({'status': 'PASS'})
     result.check_test({'status': 'SKIP'})
     result.end_tests()
     self.assertEquals(result.rate, 100.0)
Example #24
0
class xUnitSucceedTest(unittest.TestCase):

    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        prefix = temp_dir_prefix(__name__, self, 'setUp')
        self.tmpdir = tempfile.TemporaryDirectory(prefix=prefix)
        config = {'job.run.result.xunit.output': self.tmpfile[1],
                  'run.results_dir': self.tmpdir.name}
        self.job = job.Job(config)
        self.job.setup()
        self.test_result = Result(UNIQUE_ID, LOGFILE)
        self.test_result.tests_total = 1
        self.test_result.logfile = ("/.../avocado/job-results/"
                                    "job-2018-11-28T16.27-8fef221/job.log")
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir.name)
        self.test1._Test__status = 'PASS'
        self.test1._Test__logfile = ''
        self.test1.time_elapsed = 678.23689

    def tearDown(self):
        self.job.cleanup()
        errs = []
        cleanups = (lambda: os.close(self.tmpfile[0]),
                    lambda: os.remove(self.tmpfile[1]),
                    self.tmpdir.cleanup)
        for cleanup in cleanups:
            try:
                cleanup()
            except Exception as exc:
                errs.append(str(exc))
        self.assertFalse(errs, "Failures occurred during cleanup:\n%s"
                         % "\n".join(errs))

    @unittest.skipUnless(SCHEMA_CAPABLE,
                         'Unable to validate schema due to missing xmlschema library')
    def test_add_success(self):
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        xunit_result = xunit.XUnitResult()
        xunit_result.render(self.test_result, self.job)
        xunit_output = self.job.config.get('job.run.result.xunit.output')
        with open(xunit_output, 'rb') as fp:
            xml = fp.read()
        try:
            dom = minidom.parseString(xml)
        except Exception as details:
            raise ParseXMLError("Error parsing XML: '%s'.\nXML Contents:\n%s" % (details, xml))
        self.assertTrue(dom)

        els = dom.getElementsByTagName('testsuite')
        self.assertEqual(len(els), 1)
        self.assertEqual(els[0].attributes['time'].value, '678.237')

        els = dom.getElementsByTagName('testcase')
        self.assertEqual(len(els), 1)
        self.assertEqual(els[0].attributes['time'].value, '678.237')

        junit_xsd = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                                 os.path.pardir, ".data",
                                                 'jenkins-junit.xsd'))
        xml_schema = xmlschema.XMLSchema(junit_xsd)
        self.assertTrue(xml_schema.is_valid(xunit_output))

    def test_max_test_log_size(self):
        def get_system_out(out):
            return out[out.find(b"<system-out>"):out.find(b"<system-out/>")]
        log = tempfile.NamedTemporaryFile(dir=self.tmpdir.name, delete=False)
        log_content = b"1234567890" * 100
        log_content += b"this should not be present" + b"0987654321" * 100
        log.write(log_content)
        log_path = log.name
        log.close()
        self.test1._Test__status = "ERROR"
        self.test1._Test__logfile = log_path
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        xunit_result = xunit.XUnitResult()
        xunit_result.render(self.test_result, self.job)
        xunit_output = self.job.config.get('job.run.result.xunit.output')
        with open(xunit_output, 'rb') as fp:
            unlimited = fp.read()
        self.job.config['xunit.max_test_log_chars'] = 10
        xunit_result.render(self.test_result, self.job)
        with open(xunit_output, 'rb') as fp:
            limited = fp.read()
        self.job.config['xunit.max_test_log_chars'] = 100000
        xunit_result.render(self.test_result, self.job)
        with open(xunit_output, 'rb') as fp:
            limited_but_fits = fp.read()
        self.assertLess(len(limited), len(unlimited) - 500,
                        "Length of xunit limitted to 10 chars was greater "
                        "than (unlimited - 500). Unlimited output:\n%s\n\n"
                        "Limited output:\n%s" % (unlimited, limited))
        unlimited_output = get_system_out(unlimited)
        self.assertIn(log_content, unlimited_output)
        self.assertEqual(unlimited_output, get_system_out(limited_but_fits))
        self.assertIn(b"this should not be present", unlimited)
        self.assertNotIn(b"this should not be present", limited)
        self.assertIn(b"1234567890", unlimited)
        self.assertNotIn(b"1234567890", limited)
        self.assertIn(b"12345", limited)
        self.assertIn(b"0987654321", unlimited)
        self.assertNotIn(b"0987654321", limited)
        self.assertIn(b"54321", limited)
Example #25
0
 def test_result_rate_none_succeeded(self):
     result = Result(FakeJob([]))
     result.check_test({'status': 'FAIL'})
     result.end_tests()
     self.assertEquals(result.rate, 0.0)
Example #26
0
 def setUp(self):
     prefix = temp_dir_prefix(__name__, self, 'setUp')
     self.tmpdir = tempfile.TemporaryDirectory(prefix=prefix)
     args = argparse.Namespace(base_logdir=self.tmpdir.name)
     self.job = Job(args)
     self.result = Result(self.job)
Example #27
0
class JSONResultTest(unittest.TestCase):

    def setUp(self):

        class SimpleTest(Test):

            def test(self):
                pass

        self.tmpfile = tempfile.mkstemp()
        self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
        args = argparse.Namespace(json_output=self.tmpfile[1],
                                  base_logdir=self.tmpdir)
        self.job = job.Job(args)
        self.test_result = Result(FakeJob(args))
        self.test_result.filename = self.tmpfile[1]
        self.test_result.tests_total = 1
        self.test1 = SimpleTest(job=self.job, base_logdir=self.tmpdir)
        self.test1._Test__status = 'PASS'
        self.test1.time_elapsed = 1.23

    def tearDown(self):
        os.close(self.tmpfile[0])
        os.remove(self.tmpfile[1])
        shutil.rmtree(self.tmpdir)

    def test_add_success(self):
        self.test_result.start_test(self.test1)
        self.test_result.end_test(self.test1.get_state())
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        with open(self.job.args.json_output) as fp:
            j = fp.read()
        obj = json.loads(j)
        self.assertTrue(obj)
        self.assertEqual(len(obj['tests']), 1)

    def test_add_several_statuses(self):
        def run_fake_status(status):
            self.test_result.start_test(self.test1)
            self.test_result.check_test(status)

        def check_item(name, value, exp):
            self.assertEqual(value, exp, "Result%s is %s and not %s\n%s"
                             % (name, value, exp, res))

        # Set the number of tests to all tests + 3
        self.test_result.tests_total = 13
        # Full PASS status
        self.test_result.start_test(self.test1)
        self.test_result.check_test(self.test1.get_state())
        # Only status - valid statuses
        run_fake_status({"status": "PASS"})
        run_fake_status({"status": "SKIP"})
        run_fake_status({"status": "FAIL"})
        run_fake_status({"status": "ERROR"})
        run_fake_status({"status": "WARN"})
        run_fake_status({"status": "INTERRUPTED"})
        # Only status - invalid statuses
        run_fake_status({"status": "INVALID"})
        run_fake_status({"status": None})
        run_fake_status({"status": ""})
        # Postprocess
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        res = json.loads(open(self.job.args.json_output).read())
        check_item("[pass]", res["pass"], 2)
        check_item("[errors]", res["errors"], 4)
        check_item("[failures]", res["failures"], 1)
        check_item("[skip]", res["skip"], 4)
        check_item("[total]", res["total"], 13)

    def test_negative_status(self):
        def check_item(name, value, exp):
            self.assertEqual(value, exp, "Result%s is %s and not %s\n%s"
                             % (name, value, exp, res))

        self.test_result.tests_total = 0
        self.test_result.start_test(self.test1)
        self.test_result.check_test(self.test1.get_state())
        self.test_result.end_tests()
        json_result = jsonresult.JSONResult()
        json_result.render(self.test_result, self.job)
        res = json.loads(open(self.job.args.json_output).read())
        check_item("[total]", res["total"], 1)
        check_item("[skip]", res["skip"], 0)
        check_item("[pass]", res["pass"], 1)
Example #28
0
 def test_result_job_without_id(self):
     args = argparse.Namespace()
     Result(FakeJob(args))
     self.assertRaises(AttributeError, Result, FakeJobMissingUniqueId(args))