Пример #1
0
def main(source, target):

    with open(source) as source_file:
        data = json.loads(source_file.read())
        if data.get("version", 1) >= 2 or len(data.get("entries", [])) == 0:
            raise RuntimeError(
                "This utility can only process a single all-in-one JSON"
                " report, you can set `split_json_report` to False in"
                " `JSONExporter` while running Testplan script to get a"
                " single JSON report.")

        report_obj = TestReport.deserialize(data)
        print("Loaded report: {}".format(report_obj.name))

        # We can initialize an exporter object directly, without relying on
        # Testplan internals to trigger the export operation.
        exporter = PDFExporter(
            pdf_path=target,
            pdf_style=Style(
                passing=StyleEnum.ASSERTION_DETAIL,
                failing=StyleEnum.ASSERTION_DETAIL,
            ),
        )

        exporter.export(report_obj)
Пример #2
0
def main(plan):

    multi_test_1 = MultiTest(name='Primary', suites=[AlphaSuite()])
    multi_test_2 = MultiTest(
        name='Secondary',
        suites=[BetaSuite()],
        # Just print out assertion names / descriptions but not the details
        stdout_style=Style(passing='assertion', failing='assertion'))
    plan.add(multi_test_1)
    plan.add(multi_test_2)
Пример #3
0
def main(source, target):

    with open(source) as source_file:
        data = json.loads(source_file.read())
        report_obj = TestReport.deserialize(data)

        print('Loaded report: {}'.format(report_obj.name))

        # We can initialize an exporter object directly, without relying on
        # Testplan internals to trigger the export operation.
        exporter = PDFExporter(pdf_path=target,
                               pdf_style=Style(
                                   passing=StyleEnum.ASSERTION_DETAIL,
                                   failing=StyleEnum.ASSERTION_DETAIL))

        exporter.export(report_obj)
Пример #4
0

@testsuite
class BetaSuite(object):
    @testcase
    def testcase_one_passed(self, env, result):
        result.equal(1, 1, description="passing equality")

    @testcase
    def testcase_two_passed(self, env, result):
        result.equal("foo", "foo", description="another passing equality")


# The most verbose representation, prints out full
# assertion details for passing & failing testcases.
all_details_a = Style(passing="assertion-detail", failing="assertion-detail")
all_details_b = Style(passing=StyleEnum.ASSERTION_DETAIL,
                      failing=StyleEnum.ASSERTION_DETAIL)

# Terse representation, just prints out final result status, no details.
result_only_a = Style(passing="result", failing="result")
result_only_b = Style(passing=StyleEnum.RESULT, failing=StyleEnum.RESULT)

# A general good practice is to have more details for failing tests:

# Descriptions / names for passing assertions
# All details for failing assertions
style_1_a = Style(passing="assertion", failing="assertion-detail")
style_1_b = Style(passing=StyleEnum.ASSERTION,
                  failing=StyleEnum.ASSERTION_DETAIL)
Пример #5
0
        parameters=((2, 3, 5), (5, 10, 15)),
        docstring_func=interpolate_docstring,
    )
    def addition_two(self, env, result, first, second, expected):
        """
          Testing addition with: {first} + {second}
          Expected value: {expected}
        """
        return result.equal(first + second, expected)


@test_plan(
    name="Parametrization Example",
    # Using detailed assertions so we can
    # see testcase context for generated testcases
    stdout_style=Style("assertion-detail", "assertion-detail"),
)
def main(plan):
    plan.add(
        MultiTest(
            name="Primary",
            suites=[SimpleTest(),
                    ErrorTest(),
                    ProductTest(),
                    DocStringTest()],
        ))


if __name__ == "__main__":
    sys.exit(not main())
Пример #6
0
#!/usr/bin/env python
"""
This example is to demonstrate parallel test execution in a thread pool.
"""

import sys

from testplan import test_plan, Task
from testplan.parser import TestplanParser
from testplan.runners.pools import ThreadPool
from testplan.report.testing.styles import Style, StyleEnum

OUTPUT_STYLE = Style(StyleEnum.ASSERTION_DETAIL, StyleEnum.ASSERTION_DETAIL)


class CustomParser(TestplanParser):
    """Inheriting base parser."""
    def add_arguments(self, parser):
        """Defining custom arguments for this Testplan."""
        parser.add_argument(
            "--tasks-num",
            action="store",
            type=int,
            default=8,
            help="Number of tests to be scheduled.",
        )
        parser.add_argument(
            "--pool-size",
            action="store",
            type=int,
            default=4,
Пример #7
0
"""

import os
import sys

from testplan.testing.cpp import GTest
from testplan.report.testing.styles import Style

from testplan import test_plan

BINARY_PATH = os.path.join(os.path.dirname(__file__), "test", "runTests")


@test_plan(
    name="GTest Example",
    stdout_style=Style(passing="testcase", failing="assertion-detail"),
)
def main(plan):

    if not os.path.exists(BINARY_PATH):
        raise RuntimeError("You need to compile test binary first.")

    else:
        plan.add(
            GTest(
                name="My GTest",
                driver=BINARY_PATH,
                # You can apply GTest specific filtering via `gtest_filter` arg
                # gtest_filter='SquareRootTest.*',
                # You can also shuffle test order via `gtest_shuffle` arg
                # gtest_shuffle=True
Пример #8
0
            X[:, 1],
            "Feature 1",
            "Feature 2",
            c=y_pred,
        )
        result.matplot(plot)


# Hard-coding `pdf_path` and 'pdf_style' so that the downloadable example gives
# meaningful and presentable output. NOTE: this programmatic arguments passing
# approach will cause Testplan to ignore any command line arguments related to
# that functionality.
@test_plan(
    name="Basic Data Modelling Example",
    pdf_path=os.path.join(os.path.dirname(__file__), "report.pdf"),
    pdf_style=Style(passing="assertion-detail", failing="assertion-detail"),
)
def main(plan):
    """
    Testplan decorated main function to add and execute MultiTests.

    :return: Testplan result object.
    :rtype:  :py:class:`~testplan.base.TestplanResult`
    """
    model_examples = MultiTest(
        name="Model Examples", suites=[ModelExamplesSuite()]
    )
    plan.add(model_examples)


if __name__ == "__main__":
Пример #9
0
# Run test cases that:
# Belong to a suite that inherits from BaseSuite
# AND (have a minimum priority of 5 OR have a priority between 1 and 3)
composed_filter_2 = subclass_filter & composed_filter_1

# We can also compose custom filters with the built-in filters as well:
# Run test cases that:
# Belong to suites that inherit from BaseSuite
# AND have the name `test_2`
composed_filter_3 = subclass_filter & Pattern("*:*:test_2")

# Replace the `test_filter` argument with the
# filters declared above to see how they work.


@test_plan(
    name="Custom Test Filters",
    test_filter=priority_filter_1,
    # Using testcase level stdout so we can see filtered testcases
    stdout_style=Style("testcase", "testcase"),
)
def main(plan):

    multi_test = MultiTest(name="Sample", suites=[Alpha(), Beta(), Gamma()])

    plan.add(multi_test)


if __name__ == "__main__":
    sys.exit(not main())
Пример #10
0
# `@test_plan` accepts shortcut arguments `pdf_path` and `pdf_style`
# for PDF reports, meaning that you don't have to instantiate a PDFExporter
# explicitly for basic PDF report generation.

# A PDF report can also be generated via command line arguments like:
# ./test_plan.py --pdf <report-path> --pdf-style <report-style>

# <report-path> should be valid system file path and <report-style> should be
# one of: `result-only`, `summary`, `extended-summary`, `detailed`.

# If you want to test out command line configuration for PDF generation please
# remove `pdf_path` and `pdf_style` arguments from below as
# programmatic declaration overrides command line arguments.


@test_plan(
    name='Basic PDF Report Example',
    pdf_path=os.path.join(os.path.dirname(__file__), 'report.pdf'),
    pdf_style=Style(passing='testcase', failing='assertion-detail'),
)
def main(plan):

    multi_test_1 = MultiTest(name='Primary', suites=[AlphaSuite()])
    multi_test_2 = MultiTest(name='Secondary', suites=[BetaSuite()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == '__main__':
    sys.exit(not main())
Пример #11
0
                    {"x": "B", "y": 6},
                    {"x": "C", "y": 15},
                    {"x": "D", "y": 12},
                ],
            },
            description="Bar Graph",
            series_options={
                "Bar 1": {"colour": "green"},
                "Bar 2": {"colour": "purple"},
            },
            graph_options={"legend": True},
        )


# PDF style must be 'assertion-detail' to view
# non-assertion related detail like graphs or logs
@test_plan(
    name="Assertions Example",
    stdout_style=Style(
        passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
    ),
    pdf_path=os.path.join(os.path.dirname(__file__), "report.pdf"),
    pdf_style=Style(passing="assertion-detail", failing="assertion-detail"),
)
def main(plan):
    plan.add(MultiTest(name="Graph Assertions Test", suites=[SampleSuite()]))


if __name__ == "__main__":
    sys.exit(not main())
Пример #12
0
    def sort_testcases(self, testcases):
        return self.reverse_sort_by_name(testcases,
                                         operator.attrgetter('__name__'))


noop_sorter = NoopSorter()

custom_sorter_1 = ReverseNameLengthSorter(sort_type='testcases')

custom_sorter_2 = ReverseNameLengthSorter(sort_type=('suites', 'testcases'))


# Replace the `test_sorter` argument with the
# custom sorters declared above to see how they work.
@test_plan(
    name='Custom Sorter Example',
    test_sorter=noop_sorter,
    # Using testcase level stdout so we can see sorted testcases
    stdout_style=Style('testcase', 'testcase'))
def main(plan):

    multi_test_1 = MultiTest(name='Primary',
                             suites=[Alpha(), Beta(),
                                     Epsilon()])

    plan.add(multi_test_1)


if __name__ == '__main__':
    sys.exit(not main())
Пример #13
0
        ),
        docstring_func=interpolate_docstring
    )
    def addition_two(self, env, result, first, second, expected):
        """
          Testing addition with: {first} + {second}
          Expected value: {expected}
        """
        return result.equal(first + second, expected)


@test_plan(
    name='Parametrization Example',
    # Using detailed assertions so we can
    # see testcase context for generated testcases
    stdout_style=Style('assertion-detail', 'assertion-detail')
)
def main(plan):
    plan.add(
        MultiTest(
            name='Primary',
            suites=[
                SimpleTest(),
                ErrorTest(),
                ProductTest(),
                DocStringTest()
            ]
        )
    )

Пример #14
0
import os
import sys

from testplan.testing.cpp import GTest
from testplan.report.testing.styles import Style

from testplan import test_plan

BINARY_PATH = os.path.join(os.path.dirname(__file__), 'test', 'runTests')


@test_plan(
    name='GTest Example',
    stdout_style=Style(
        passing='case',
        failing='assertion-detail'
    )
)
def main(plan):

    if not os.path.exists(BINARY_PATH):
        raise RuntimeError('You need to compile test binary first.')

    else:
        plan.add(
            GTest(
                name='My GTest',
                driver=BINARY_PATH,
                # You can apply GTest specific filtering via `gtest_filter` arg
                # gtest_filter='SquareRootTest.*',
                # You can also shuffle test order via `gtest_shuffle` arg
Пример #15
0
@testsuite
class BetaSuite(object):

    @testcase
    def passing_testcase_one(self, env, result):
        result.equal(1, 1, description='passing equality')

    @testcase
    def passing_testcase_two(self, env, result):
        result.equal('foo', 'foo', description='another passing equality')


# The most verbose representation, prints out full
# assertion details for passing & failing testcases.
all_details_a = Style(passing='assertion-detail', failing='assertion-detail')
all_details_b = Style(
    passing=StyleEnum.ASSERTION_DETAIL,
    failing=StyleEnum.ASSERTION_DETAIL
)

# Terse representation, just prints out final result status, no details.
result_only_a = Style(passing='result', failing='result')
result_only_b = Style(passing=StyleEnum.RESULT, failing=StyleEnum.RESULT)

# A general good practice is to have more details for failing tests:

# Descriptions / names for passing assertions
# All details for failing assertions
style_1_a = Style(passing='assertion', failing='assertion-detail')
style_1_b = Style(
Пример #16
0
            absent_keys=["bar", "beta"],
        )

        # `dict.log` can be used to log a dictionary in human readable format.

        result.dict.log(dictionary={
            "foo": [1, 2, 3],
            "bar": {
                "color": "blue"
            },
            "baz": "hello world",
        })


@test_plan(
    name="Dict Assertions Example",
    stdout_style=Style(passing=StyleEnum.ASSERTION_DETAIL,
                       failing=StyleEnum.ASSERTION_DETAIL),
)
def main(plan):
    plan.add(MultiTest(
        name="Dict Assertions Test",
        suites=[
            DictSuite(),
        ],
    ))


if __name__ == "__main__":
    sys.exit(not main())
Пример #17
0
    ('suites', 'testcases'))
suite_testcase_shuffler_b = ShuffleSorter(
    shuffle_type=(SortType.SUITES, SortType.TEST_CASES))


# There is another built-in sorter that sorts the tests alphabetically:
testcase_alphanumeric_sorter_a = AlphanumericSorter('testcases')
suite_alphanumeric_sorter = AlphanumericSorter('suites')
suite_testcase_alphanumeric_sorter = AlphanumericSorter(('suites', 'testcases'))


# Replace the `test_sorter` argument with the
# sorters / shufflers declared above to see how they work.

@test_plan(
    name='Test Ordering / Shuffling basics (Programmatic)',
    test_sorter=noop_sorter,
    # Using testcase level stdout so we can see sorted testcases
    stdout_style=Style('case', 'case')
)
def main(plan):

    multi_test_1 = MultiTest(name='Primary', suites=[Alpha(), Beta()])
    multi_test_2 = MultiTest(name='Secondary', suites=[Gamma()])
    plan.add(multi_test_1)
    plan.add(multi_test_2)


if __name__ == '__main__':
    sys.exit(not main())
Пример #18
0
            y=self.y,
            label='Samples',
            c='black')
        y_test = pipeline.predict(self.X_test[:, np.newaxis])
        plot.plot(self.X_test, y_test, label='Model')
        plot.legend(loc='best')
        result.matplot(plot)


# Hard-coding `pdf_path` and 'pdf_style' so that the downloadable example gives
# meaningful and presentable output. NOTE: this programmatic arguments passing
# approach will cause Testplan to ignore any command line arguments related to
# that functionality.
@test_plan(name='Basic Data Modelling Example',
           pdf_path=os.path.join(os.path.dirname(__file__), 'report.pdf'),
           pdf_style=Style(passing='assertion-detail',
                           failing='assertion-detail'))
def main(plan):
    """
    Testplan decorated main function to add and execute MultiTests.

    :return: Testplan result object.
    :rtype:  :py:class:`~testplan.base.TestplanResult`
    """
    model_examples = MultiTest(name='Model Examples',
                               suites=[ModelExamplesSuite()])
    plan.add(model_examples)


if __name__ == '__main__':
    sys.exit(not main())