Exemplo n.º 1
0
def test_report_json_binary_serialization(
    dummy_test_plan_report_with_binary_asserts,
):
    """JSON Serialized & deserialized reports should be equal."""
    test_plan_schema = TestReportSchema(strict=True)
    data = test_plan_schema.dumps(
        dummy_test_plan_report_with_binary_asserts
    ).data

    j = json.loads(data)
    bkey = EntriesField._BYTES_KEY

    # passing assertion
    hx_1_1 = j["entries"][1]["entries"][0]["entries"][1]["first"][bkey]
    hx_1_2 = j["entries"][1]["entries"][0]["entries"][1]["second"][bkey]
    assert ["0xF2"] == hx_1_1 == hx_1_2

    # failing assertion
    hx_2_1 = j["entries"][1]["entries"][0]["entries"][2]["first"][bkey]
    hx_2_2 = j["entries"][1]["entries"][0]["entries"][2]["second"][bkey]
    assert ["0x00", "0xB1", "0xC1"] == hx_2_1
    assert ["0x00", "0xB2", "0xC2"] == hx_2_2

    deserialized_report = test_plan_schema.loads(data).data
    check_report(
        actual=deserialized_report,
        expected=dummy_test_plan_report_with_binary_asserts,
    )
Exemplo n.º 2
0
def test_parametrization_tagging():
    """
        Parametrization report group should include tags
        generated by `tag_func` and native suite tags in `tag_index` attribute.
    """
    @testsuite(tags='foo')
    class DummySuite(object):
        @testcase(parameters=('red', 'blue', 'green'),
                  tags='alpha',
                  tag_func=lambda kwargs: {'color': kwargs['color']})
        def dummy_test(self, env, result, color):
            pass

    all_tags_index = {
        'simple': {'foo', 'alpha'},
        'color': {'red', 'blue', 'green'}
    }

    parametrization_group = TestGroupReport(
        name='dummy_test',
        category=Categories.PARAMETRIZATION,
        tags={'simple': {'alpha'}},
        entries=[
            TestCaseReport(
                name='dummy_test__color_red',
                tags={'color': {'red'}},
            ),
            TestCaseReport(
                name='dummy_test__color_blue',
                tags={'color': {'blue'}},
            ),
            TestCaseReport(
                name='dummy_test__color_green',
                tags={'color': {'green'}},
            ),
        ])

    multitest = MultiTest(name='MyMultitest', suites=[DummySuite()])

    plan = Testplan(name='plan', parse_cmdline=False)
    plan.add(multitest)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        plan.run()

    expected_report = TestReport(
        name='plan',
        entries=[
            TestGroupReport(name='MyMultitest',
                            category=Categories.MULTITEST,
                            entries=[
                                TestGroupReport(
                                    name='DummySuite',
                                    tags={'simple': {'foo'}},
                                    category=Categories.SUITE,
                                    entries=[parametrization_group])
                            ])
        ])

    check_report(expected_report, plan.report)
def check_parametrization(suite_kls, parametrization_group):

    multitest = MultiTest(name="MyMultitest", suites=[suite_kls()])

    plan = Testplan(name="plan", parse_cmdline=False)
    plan.add(multitest)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        plan.run()

    expected_report = TestReport(
        name="plan",
        entries=[
            TestGroupReport(
                name="MyMultitest",
                category=ReportCategories.MULTITEST,
                entries=[
                    TestGroupReport(
                        name="MySuite",
                        category=ReportCategories.TESTSUITE,
                        entries=[parametrization_group],
                    )
                ],
            )
        ],
    )

    check_report(expected_report, plan.report)
Exemplo n.º 4
0
def test_no_pre_post_steps(mockplan):
    """
    Runs a MultiTest w/o pre/post steps, expects no pre/post step report.
    """
    multitest = MultiTest(
        name="MyMultiTest",
        suites=[MySuite()],
    )
    mockplan.add(multitest)
    mockplan.run()

    expected_report = TestReport(
        name="plan",
        entries=[
            TestGroupReport(
                name="MyMultiTest",
                category=ReportCategories.MULTITEST,
                entries=[
                    TestGroupReport(
                        name="MySuite",
                        category=ReportCategories.TESTSUITE,
                        entries=[
                            TestCaseReport(name="test_one"),
                            TestCaseReport(
                                name="teardown",
                                entries=[{"type": "Attachment"}],
                            ),
                        ],
                    ),
                ],
            ),
        ],
    )

    check_report(expected_report, mockplan.report)
Exemplo n.º 5
0
def test_process_runner(mockplan, binary_path, expected_report, test_kwargs):

    process_test = DummyTest(name="MyTest", binary=binary_path, **test_kwargs)
    mockplan.add(process_test)
    assert mockplan.run().run is True

    check_report(expected=expected_report, actual=mockplan.report)
def test_parametrization_tagging():
    """
        Parametrization report group should include tags
        generated by `tag_func` and native suite tags in `tag_index` attribute.
    """

    @testsuite(tags="foo")
    class DummySuite(object):
        @testcase(
            parameters=("red", "blue", "green"),
            tags="alpha",
            tag_func=lambda kwargs: {"color": kwargs["color"]},
        )
        def dummy_test(self, env, result, color):
            pass

    parametrization_group = TestGroupReport(
        name="dummy_test",
        category=ReportCategories.PARAMETRIZATION,
        tags={"simple": {"alpha"}},
        entries=[
            TestCaseReport(
                name="dummy_test__color_red", tags={"color": {"red"}}
            ),
            TestCaseReport(
                name="dummy_test__color_blue", tags={"color": {"blue"}}
            ),
            TestCaseReport(
                name="dummy_test__color_green", tags={"color": {"green"}}
            ),
        ],
    )

    multitest = MultiTest(name="MyMultitest", suites=[DummySuite()])

    plan = Testplan(name="plan", parse_cmdline=False)
    plan.add(multitest)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        plan.run()

    expected_report = TestReport(
        name="plan",
        entries=[
            TestGroupReport(
                name="MyMultitest",
                category=ReportCategories.MULTITEST,
                entries=[
                    TestGroupReport(
                        name="DummySuite",
                        tags={"simple": {"foo"}},
                        category=ReportCategories.TESTSUITE,
                        entries=[parametrization_group],
                    )
                ],
            )
        ],
    )

    check_report(expected_report, plan.report)
Exemplo n.º 7
0
def check_parametrization(mockplan, suite_kls, report_list, tag_dict=None):
    tag_dict = tag_dict or {}
    multitest = MultiTest(name="MyMultitest", suites=[suite_kls()])
    mockplan.add(multitest)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        mockplan.run()

    expected_report = TestReport(
        name="plan",
        entries=[
            TestGroupReport(
                name="MyMultitest",
                category=ReportCategories.MULTITEST,
                entries=[
                    TestGroupReport(
                        name="MySuite",
                        tags=tag_dict,
                        category=ReportCategories.TESTSUITE,
                        entries=report_list,
                    )
                ],
            )
        ],
    )

    check_report(expected_report, mockplan.report)
Exemplo n.º 8
0
def test_local_pool_integration(
    report_dir,
    multitest_maker,
    expected_report,
    pdf_title,
    expected_plan_result,
    dependant_module,
):
    if dependant_module:
        importorxfail(dependant_module)

    pdf_path = report_dir.join(
        "test_report_local_{}.pdf".format(pdf_title)).strpath
    plan = Testplan(
        name="plan",
        parse_cmdline=False,
        exporters=[PDFExporter.with_config(pdf_path=pdf_path)],
    )

    plan.add(multitest_maker())

    assert not os.path.exists(pdf_path)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        assert plan.run().run is True

    for log in plan.report.flattened_logs:
        if all(word in log["message"] for word in ["tkinter", "TclError"]):
            pytest.xfail(reason="Tkinter not installed properly")

    check_report(expected=expected_report, actual=plan.report)

    assert plan.result.success is expected_plan_result
    assert os.path.exists(pdf_path)
    assert os.stat(pdf_path).st_size > 0
Exemplo n.º 9
0
def test_pre_post_steps(mockplan):

    multitest = MultiTest(
        name="MyMultitest",
        suites=[MySuite()],
        before_start=check_func_1,
        after_start=check_func_2,
        before_stop=check_func_3,
        after_stop=check_func_4,
    )
    mockplan.add(multitest)
    mockplan.run()

    expected_report = TestReport(
        name="plan",
        entries=[
            TestGroupReport(
                name="MyMultitest",
                category=ReportCategories.MULTITEST,
                entries=[
                    TestGroupReport(
                        name="Before/After Step Checks",
                        category=ReportCategories.TESTSUITE,
                        entries=[
                            TestCaseReport(
                                name="before_start - check_func_1",
                                entries=[{"type": "Equal", "passed": True}],
                            ),
                            TestCaseReport(name="after_start - check_func_2"),
                            TestCaseReport(name="before_stop - check_func_3"),
                            TestCaseReport(
                                name="after_stop - check_func_4",
                                entries=[
                                    {"type": "Equal", "passed": False},
                                    {"type": "Attachment"},
                                ],
                            ),
                        ],
                    ),
                    TestGroupReport(
                        name="MySuite",
                        category=ReportCategories.TESTSUITE,
                        entries=[
                            TestCaseReport(name="test_one"),
                            TestCaseReport(
                                name="teardown",
                                entries=[{"type": "Attachment"}],
                            ),
                        ],
                    ),
                ],
            )
        ],
    )

    check_report(expected_report, mockplan.report)
    assert len(mockplan.report.attachments) == 2
Exemplo n.º 10
0
def test_process_runner(mockplan, binary_path, expected_report, test_kwargs):

    process_test = DummyTest(name="MyTest", binary=binary_path, **test_kwargs)

    mockplan.add(process_test)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        assert mockplan.run().run is True

    check_report(expected=expected_report, actual=mockplan.report)
Exemplo n.º 11
0
def test_dry_run():
    """Test the "dry_run" method which generates an empty report skeleton."""
    mtest = multitest.MultiTest(name="MTest",
                                description="Basic multitest.",
                                suites=[Suite()],
                                **MTEST_DEFAULT_PARAMS)
    result = mtest.dry_run()
    report_skeleton = result.report

    # Comparing the reports to spot any inconsistencies.
    testing.check_report(expected=EXPECTED_REPORT_SKELETON,
                         actual=report_skeleton)
Exemplo n.º 12
0
def test_import(params):
    input_path = params.input_path
    importer = CPPUnitResultImporter(
        input_path,
        name=params.expected_report.name,
        description=params.expected_report.description,
    )
    result = importer.import_result()
    check_report(
        expected=params.expected_report,
        actual=result.as_test_report(),
    )
Exemplo n.º 13
0
def test_process_runner(binary_path, expected_report, test_kwargs):

    plan = Testplan(name="plan", parse_cmdline=False)

    process_test = DummyTest(name="MyTest", driver=binary_path, **test_kwargs)

    plan.add(process_test)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        assert plan.run().run is True

    check_report(expected=expected_report, actual=plan.report)
Exemplo n.º 14
0
def test_merge():
    mt_report = TestGroupReport(
        name="MyMultiTest",
        category="multitest",
        uid=0,
        tags={"color": {"green"}},
    )

    mt_report.merge(mt_report_alpha, strict=False)
    mt_report.merge(mt_report_beta_1, strict=False)
    mt_report.merge(mt_report_beta_2, strict=False)
    mt_report.merge(mt_report_gamma, strict=False)

    check_report(actual=mt_report, expected=expected_report)
Exemplo n.º 15
0
def test_merge():
    mt_report = TestGroupReport(
        name='MyMultiTest',
        category='multitest',
        uid=0,
        tags={'color': {'green'}},
    )

    mt_report.merge(mt_report_alpha, strict=False)
    mt_report.merge(mt_report_beta_1, strict=False)
    mt_report.merge(mt_report_beta_2, strict=False)
    mt_report.merge(mt_report_gamma, strict=False)

    check_report(actual=mt_report, expected=expected_report)
Exemplo n.º 16
0
def test_multitest_tagging(mockplan, multitest_tags, expected_report):

    multitest = MultiTest(
        name="MyMultitest",
        suites=[AlphaSuite(), BetaSuite(),
                GammaSuite()],
        tags=multitest_tags,
    )
    mockplan.add(multitest)
    mockplan.run()

    check_report(
        expected=TestReport(name="plan", entries=[expected_report]),
        actual=mockplan.report,
    )
Exemplo n.º 17
0
def test_report_json_binary_serialization(
    dummy_test_plan_report_with_binary_asserts, ):
    """JSON Serialized & deserialized reports should be equal."""
    test_plan_schema = TestReportSchema(strict=True)
    data = test_plan_schema.dumps(
        dummy_test_plan_report_with_binary_asserts).data

    j = json.loads(data)
    bkey = EntriesField._BYTES_KEY

    # passing assertion
    hx_1_1 = get_path(j, "entries.1.entries.0.entries.1.first")[bkey]
    hx_1_2 = get_path(j, "entries.1.entries.0.entries.1.second")[bkey]
    assert ["0xF2"] == hx_1_1 == hx_1_2

    # failing assertion
    hx_2_1 = get_path(j, "entries.1.entries.0.entries.2.first")[bkey]
    hx_2_2 = get_path(j, "entries.1.entries.0.entries.2.second")[bkey]
    assert ["0x00", "0xB1", "0xC1"] == hx_2_1
    assert ["0x00", "0xB2", "0xC2"] == hx_2_2

    # dict.match the schema for that producing list of tuples

    KEY_INDEX = 1
    FIRST_INDEX = 3
    SECOND_INDEX = 4

    comps = get_path(j, "entries.1.entries.0.entries.3.comparison")
    assert comps[0][KEY_INDEX][bkey] == EntriesField._binary_to_hex_list(
        b"binarykey\xB1")
    assert comps[1][FIRST_INDEX][1][bkey] == EntriesField._binary_to_hex_list(
        b"binary value\xB1")
    assert comps[1][SECOND_INDEX][1][bkey] == EntriesField._binary_to_hex_list(
        b"binary value\xB1")
    assert comps[3][FIRST_INDEX][1][bkey] == EntriesField._binary_to_hex_list(
        b"binary\xB1")
    assert comps[3][SECOND_INDEX][1][bkey] == EntriesField._binary_to_hex_list(
        b"binary\xB1")
    assert comps[7][FIRST_INDEX][1][bkey] == EntriesField._binary_to_hex_list(
        b"binary\xB1")
    assert comps[7][SECOND_INDEX][1][bkey] == EntriesField._binary_to_hex_list(
        b"binary\xB1")

    deserialized_report = test_plan_schema.loads(data).data
    check_report(
        actual=deserialized_report,
        expected=dummy_test_plan_report_with_binary_asserts,
    )
Exemplo n.º 18
0
def test_pre_post_steps(mockplan):

    multitest = MultiTest(
        name="MyMultitest",
        suites=[MySuite()],
        before_start=check_func_1,
        after_start=check_func_2,
        before_stop=check_func_3,
        after_stop=check_func_4,
    )

    mockplan.add(multitest)
    mockplan.run()

    check_report(expected_report, mockplan.report)
    assert len(mockplan.report.attachments) == 2
Exemplo n.º 19
0
def test_pre_post_steps():

    multitest = MultiTest(name='MyMultitest',
                          suites=[MySuite()],
                          before_start=check_func_1,
                          after_start=check_func_2,
                          before_stop=check_func_3,
                          after_stop=check_func_4)

    plan = Testplan(name='plan', parse_cmdline=False)
    plan.add(multitest)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        plan.run()

    check_report(expected_report, plan.report)
Exemplo n.º 20
0
def test_multitest_tagging(multitest_tags, expected_report):

    multitest = MultiTest(name='MyMultitest',
                          suites=[AlphaSuite(),
                                  BetaSuite(),
                                  GammaSuite()],
                          tags=multitest_tags)

    plan = Testplan(name='plan', parse_cmdline=False)
    plan.add(multitest)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        plan.run()

    check_report(expected=TestReport(name='plan', entries=[expected_report]),
                 actual=plan.report)
Exemplo n.º 21
0
def test_cppunit(mockplan, binary_dir, expected_report, report_status):

    binary_path = os.path.join(binary_dir, "runTests")

    if not os.path.exists(binary_path):
        msg = BINARY_NOT_FOUND_MESSAGE.format(binary_dir=binary_dir,
                                              binary_path=binary_path)
        pytest.skip(msg)

    mockplan.add(Cppunit(name="My Cppunit", binary=binary_path))

    assert mockplan.run().run is True

    check_report(expected=expected_report, actual=mockplan.report)

    assert mockplan.report.status == report_status
Exemplo n.º 22
0
def test_gtest(mockplan, binary_dir, expected_report, report_status):

    binary_path = os.path.join(binary_dir, "runTests")

    if not os.path.exists(binary_path):
        msg = BINARY_NOT_FOUND_MESSAGE.format(binary_dir=binary_dir,
                                              binary_path=binary_path)
        pytest.skip(msg)

    mockplan.add(GTest(name="My GTest", binary=binary_path))

    with log_propagation_disabled(TESTPLAN_LOGGER):
        assert mockplan.run().run is True

    check_report(expected=expected_report, actual=mockplan.report)

    assert mockplan.report.status == report_status
Exemplo n.º 23
0
def test_pre_post_steps(mockplan):

    multitest = MultiTest(
        name="MyMultitest",
        suites=[MySuite()],
        before_start=check_func_1,
        after_start=check_func_2,
        before_stop=check_func_3,
        after_stop=check_func_4,
    )

    mockplan.add(multitest)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        mockplan.run()

    check_report(expected_report, mockplan.report)
Exemplo n.º 24
0
def test_process_pool_integration(
    report_dir, fixture_dirname,
    expected_report, pdf_title,
    expected_plan_result, dependant_module
):
    if dependant_module:
        importorxfail(dependant_module)

    pool = ProcessPool(name='MyPool', size=1)
    pdf_path = report_dir.join('test_report_process_{}.pdf'.format(
        pdf_title)).strpath

    plan = Testplan(
        name='plan',
        parse_cmdline=False,
        exporters=[
            PDFExporter(pdf_path=pdf_path)
        ]
    )
    plan.add_resource(pool)

    runners_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    fixture_path = os.path.join(runners_path, 'fixtures', fixture_dirname)

    task = Task(
        target='make_multitest',
        module='suites',
        path=fixture_path,
    )
    plan.schedule(task, resource='MyPool')

    assert not os.path.exists(pdf_path)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        assert plan.run().run is True

    for log in plan.report.flattened_logs:
        if all(word in log['message'] for word in ['tkinter', 'TclError']):
            pytest.xfail(reason='Tkinter not installed properly')

    check_report(expected=expected_report, actual=plan.report)

    assert plan.result.success is expected_plan_result
    assert os.path.exists(pdf_path)
    assert os.stat(pdf_path).st_size > 0
Exemplo n.º 25
0
def test_empty_pre_post_steps(mockplan):
    """
    Runs a MultiTest without an empty after_start, expects it present and passing.
    """
    multitest = MultiTest(
        name="MyMultiTest",
        suites=[MySuite()],
        after_start=check_func_2,
    )
    mockplan.add(multitest)
    mockplan.run()

    expected_report = TestReport(
        name="plan",
        entries=[
            TestGroupReport(
                name="MyMultiTest",
                category=ReportCategories.MULTITEST,
                entries=[
                    TestGroupReport(
                        name="Before/After Step Checks",
                        category=ReportCategories.TESTSUITE,
                        entries=[
                            TestCaseReport(name="after_start - check_func_2"),
                        ],
                    ),
                    TestGroupReport(
                        name="MySuite",
                        category=ReportCategories.TESTSUITE,
                        entries=[
                            TestCaseReport(name="test_one"),
                            TestCaseReport(
                                name="teardown",
                                entries=[{"type": "Attachment"}],
                            ),
                        ],
                    ),
                ],
            ),
        ],
    )

    assert mockplan.report["MyMultiTest"].status == Status.PASSED
    assert mockplan.report.status == Status.PASSED
    check_report(expected_report, mockplan.report)
Exemplo n.º 26
0
def test_multitest_tagging(mockplan, multitest_tags, expected_report):

    multitest = MultiTest(
        name="MyMultitest",
        suites=[AlphaSuite(), BetaSuite(),
                GammaSuite()],
        tags=multitest_tags,
    )

    mockplan.add(multitest)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        mockplan.run()

    check_report(
        expected=TestReport(name="plan", entries=[expected_report]),
        actual=mockplan.report,
    )
Exemplo n.º 27
0
def test_process_pool_integration(
    runpath,
    fixture_dirname,
    expected_report,
    pdf_title,
    expected_plan_result,
    dependant_module,
):
    if dependant_module:
        importorxfail(dependant_module)

    pool = ProcessPool(name="MyProcessPool", size=1)
    pdf_path = os.path.join(
        runpath, "test_report_local_{}.pdf".format(pdf_title)
    )

    plan = TestplanMock(
        name="plan",
        exporters=[PDFExporter(pdf_path=pdf_path)],
        runpath=runpath,
    )
    plan.add_resource(pool)

    runners_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    fixture_path = os.path.join(runners_path, "fixtures", fixture_dirname)

    task = Task(target="make_multitest", module="suites", path=fixture_path)
    plan.schedule(task, resource="MyProcessPool")

    assert not os.path.exists(pdf_path)

    with log_propagation_disabled(TESTPLAN_LOGGER):
        assert plan.run().run is True

    for log in plan.report.flattened_logs:
        if all(word in log["message"] for word in ["tkinter", "TclError"]):
            pytest.xfail(reason="Tkinter not installed properly")

    check_report(expected=expected_report, actual=plan.report)

    assert plan.result.success is expected_plan_result
    assert os.path.exists(pdf_path)
    assert os.stat(pdf_path).st_size > 0
Exemplo n.º 28
0
def test_hobbestest(mockplan, binary_dir, expected_report):

    binary_path = os.path.join(binary_dir, "hobbes-test")

    if not os.path.exists(binary_path):
        msg = BINARY_NOT_FOUND_MESSAGE.format(binary_dir=binary_dir,
                                              binary_path=binary_path)
        pytest.skip(msg)

    mockplan.add(
        HobbesTest(
            name="My HobbesTest",
            binary=binary_path,
            tests=["Hog", "Net", "Recursives"],
        ))

    assert mockplan.run().run is True

    check_report(expected=expected_report, actual=mockplan.report)
Exemplo n.º 29
0
def test_run_test(mockplan):
    mockplan.add(
        junit.JUnit(
            name="My Junit",
            description="Junit example test",
            binary=JUNIT_FAKE_BIN,
            junit_args=["test"],
            results_dir=REPORT_PATH,
            proc_cwd=CURRENT_PATH,
        ))

    assert mockplan.run().run is True

    report = mockplan.report
    assert report.status == testplan.report.Status.FAILED

    mt_report = report.entries[0]
    assert len(mt_report.entries) == 3

    check_report(expect_report, mt_report)
Exemplo n.º 30
0
def test_gtest(binary_dir, expected_report):

    binary_path = os.path.join(binary_dir, 'runTests')

    if not os.path.exists(binary_path):
        msg = BINARY_NOT_FOUND_MESSAGE.format(binary_dir=binary_dir,
                                              binary_path=binary_path)
        pytest.skip(msg)

    plan = Testplan(
        name='plan',
        parse_cmdline=False,
    )

    plan.add(GTest(name='MyGTest', driver=binary_path))

    with log_propagation_disabled(TESTPLAN_LOGGER):
        assert plan.run().run is True

    check_report(expected=expected_report, actual=plan.report)