Ejemplo n.º 1
0
    def test_duration_is_totalled_up_and_outputted(self, stdout):
        features = [Mock(), Mock(), Mock(), Mock()]
        features[0].duration = 1.9
        features[0].status = 'passed'
        features[0].__iter__ = Mock(return_value=iter([]))
        features[1].duration = 2.7
        features[1].status = 'passed'
        features[1].__iter__ = Mock(return_value=iter([]))
        features[2].duration = 3.5
        features[2].status = 'passed'
        features[2].__iter__ = Mock(return_value=iter([]))
        features[3].duration = 4.3
        features[3].status = 'passed'
        features[3].__iter__ = Mock(return_value=iter([]))

        config = Mock()
        reporter = SummaryReporter(config)

        [reporter.feature(f) for f in features]
        eq_(round(reporter.duration, 3), 12.400)

        reporter.end()
        output = stdout.write.call_args_list[-1][0][0]
        minutes = int(reporter.duration / 60)
        seconds = reporter.duration % 60

        assert '%dm' % (minutes,) in output
        assert '%02.1f' % (seconds,) in output
Ejemplo n.º 2
0
    def test_scenario_status_is_collected_and_reported(self, format_summary,
                                                       stdout):
        feature = Mock()
        scenarios = [Mock(), Mock(), Mock(), Mock(), Mock()]
        scenarios[0].status = Status.failed
        scenarios[0].__iter__ = Mock(return_value=iter([]))
        scenarios[1].status = Status.failed
        scenarios[1].__iter__ = Mock(return_value=iter([]))
        scenarios[2].status = Status.skipped
        scenarios[2].__iter__ = Mock(return_value=iter([]))
        scenarios[3].status = Status.passed
        scenarios[3].__iter__ = Mock(return_value=iter([]))
        scenarios[4].status = Status.untested
        scenarios[4].__iter__ = Mock(return_value=iter([]))
        feature.status = Status.failed
        feature.duration = 12.3
        feature.__iter__ = Mock(return_value=iter(scenarios))

        config = Mock()
        sys.stdout.encoding = "UTF-8"
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            Status.passed.name: 1,
            Status.failed.name: 2,
            Status.skipped.name: 1,
            Status.untested.name: 1,
        }

        eq_(format_summary.call_args_list[1][0], ('scenario', expected))
Ejemplo n.º 3
0
    def test_feature_status_is_collected_and_reported(self, format_summary,
                                                      stdout):
        features = [Mock(), Mock(), Mock(), Mock(), Mock()]
        features[0].duration = 1.9
        features[0].status = Status.passed
        features[0].__iter__ = Mock(return_value=iter([]))
        features[1].duration = 2.7
        features[1].status = Status.failed
        features[1].__iter__ = Mock(return_value=iter([]))
        features[2].duration = 3.5
        features[2].status = Status.skipped
        features[2].__iter__ = Mock(return_value=iter([]))
        features[3].duration = 4.3
        features[3].status = Status.passed
        features[3].__iter__ = Mock(return_value=iter([]))
        features[4].duration = 5.1
        features[4].status = Status.untested
        features[4].__iter__ = Mock(return_value=iter([]))

        config = Mock()
        reporter = SummaryReporter(config)

        [reporter.feature(f) for f in features]
        reporter.end()

        expected = {
            Status.passed.name: 2,
            Status.failed.name: 1,
            Status.skipped.name: 1,
            Status.untested.name: 1,
        }

        eq_(format_summary.call_args_list[0][0], ('feature', expected))
Ejemplo n.º 4
0
    def test_feature_status_is_collected_and_reported(self, format_summary,
                                                      stdout):
        # pylint: disable=W0621
        #   W0621   Redefining name ... from outer scope (format_summary)
        features = [Mock(), Mock(), Mock(), Mock(), Mock()]
        features[0].duration = 1.9
        features[0].status = 'passed'
        features[0].__iter__ = Mock(return_value=iter([]))
        features[1].duration = 2.7
        features[1].status = 'failed'
        features[1].__iter__ = Mock(return_value=iter([]))
        features[2].duration = 3.5
        features[2].status = 'skipped'
        features[2].__iter__ = Mock(return_value=iter([]))
        features[3].duration = 4.3
        features[3].status = 'passed'
        features[3].__iter__ = Mock(return_value=iter([]))
        features[4].duration = 5.1
        features[4].status = None
        features[4].__iter__ = Mock(return_value=iter([]))

        config = Mock()
        reporter = SummaryReporter(config)

        [reporter.feature(f) for f in features]
        reporter.end()

        expected = {
            'passed': 2,
            'failed': 1,
            'skipped': 2,
            'untested': 0,
        }

        eq_(format_summary.call_args_list[0][0], ('feature', expected))
Ejemplo n.º 5
0
    def test_duration_is_totalled_up_and_outputted(self, stdout):
        features = [Mock(), Mock(), Mock(), Mock()]
        features[0].duration = 1.9
        features[0].status = 'passed'
        features[0].__iter__ = Mock(return_value=iter([]))
        features[1].duration = 2.7
        features[1].status = 'passed'
        features[1].__iter__ = Mock(return_value=iter([]))
        features[2].duration = 3.5
        features[2].status = 'passed'
        features[2].__iter__ = Mock(return_value=iter([]))
        features[3].duration = 4.3
        features[3].status = 'passed'
        features[3].__iter__ = Mock(return_value=iter([]))

        config = Mock()
        reporter = SummaryReporter(config)

        [reporter.feature(f) for f in features]
        eq_(round(reporter.duration, 3), 12.400)

        reporter.end()
        output = stdout.write.call_args_list[-1][0][0]
        minutes = int(reporter.duration / 60)
        seconds = reporter.duration % 60

        assert '%dm' % (minutes, ) in output
        assert '%02.1f' % (seconds, ) in output
Ejemplo n.º 6
0
    def test_feature_status_is_collected_and_reported(self, format_summary,
                                                      stdout):
        features = [Mock(), Mock(), Mock(), Mock(), Mock()]
        features[0].duration = 1.9
        features[0].status = 'passed'
        features[0].__iter__ = Mock(return_value=iter([]))
        features[1].duration = 2.7
        features[1].status = 'failed'
        features[1].__iter__ = Mock(return_value=iter([]))
        features[2].duration = 3.5
        features[2].status = 'skipped'
        features[2].__iter__ = Mock(return_value=iter([]))
        features[3].duration = 4.3
        features[3].status = 'passed'
        features[3].__iter__ = Mock(return_value=iter([]))
        features[4].duration = 5.1
        features[4].status = None
        features[4].__iter__ = Mock(return_value=iter([]))

        config = Mock()
        reporter = SummaryReporter(config)

        [reporter.feature(f) for f in features]
        reporter.end()

        expected = {
            'passed': 2,
            'failed': 1,
            'skipped': 2,
            'untested': 0,
        }

        eq_(format_summary.call_args_list[0][0], ('feature', expected))
Ejemplo n.º 7
0
    def test_scenario_status_is_collected_and_reported(self, format_summary,
                                                       stdout):
        feature = Mock()
        scenarios = [Mock(), Mock(), Mock(), Mock(), Mock()]
        scenarios[0].status = 'failed'
        scenarios[0].__iter__ = Mock(return_value=iter([]))
        scenarios[1].status = 'failed'
        scenarios[1].__iter__ = Mock(return_value=iter([]))
        scenarios[2].status = 'skipped'
        scenarios[2].__iter__ = Mock(return_value=iter([]))
        scenarios[3].status = 'passed'
        scenarios[3].__iter__ = Mock(return_value=iter([]))
        scenarios[4].status = None
        scenarios[4].__iter__ = Mock(return_value=iter([]))
        feature.status = 'failed'
        feature.duration = 12.3
        feature.__iter__ = Mock(return_value=iter(scenarios))

        config = Mock()
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            'passed': 1,
            'failed': 2,
            'skipped': 2,
            'untested': 0,
        }

        eq_(format_summary.call_args_list[1][0], ('scenario', expected))
Ejemplo n.º 8
0
    def test_feature_status_is_collected_and_reported(self, format_summary,
                                                      stdout):
        features = [Mock(), Mock(), Mock(), Mock(), Mock()]
        features[0].duration = 1.9
        features[0].status = Status.passed
        features[0].__iter__ = Mock(return_value=iter([]))
        features[1].duration = 2.7
        features[1].status = Status.failed
        features[1].__iter__ = Mock(return_value=iter([]))
        features[2].duration = 3.5
        features[2].status = Status.skipped
        features[2].__iter__ = Mock(return_value=iter([]))
        features[3].duration = 4.3
        features[3].status = Status.passed
        features[3].__iter__ = Mock(return_value=iter([]))
        features[4].duration = 5.1
        features[4].status = Status.untested
        features[4].__iter__ = Mock(return_value=iter([]))

        config = Mock()
        sys.stdout.encoding = "UTF-8"
        reporter = SummaryReporter(config)

        [reporter.feature(f) for f in features]
        reporter.end()

        expected = {
            "all": 5,
            Status.passed.name: 2,
            Status.failed.name: 1,
            Status.skipped.name: 1,
            Status.untested.name: 1,
        }
        expected_parts = ("feature", expected)
        assert format_summary.call_args_list[0][0] == expected_parts
Ejemplo n.º 9
0
    def test_scenario_outline_status_is_collected_and_reported(self, stdout,
                                                               format_summary):
        feature = Mock()
        scenarios = [ ScenarioOutline(u"<string>", 0, u"scenario_outline", u"name"),
                      Mock(), Mock(), Mock() ]
        subscenarios = [ Mock(), Mock(), Mock(), Mock() ]
        subscenarios[0].status = Status.passed
        subscenarios[0].__iter__ = Mock(return_value=iter([]))
        subscenarios[1].status = Status.failed
        subscenarios[1].__iter__ = Mock(return_value=iter([]))
        subscenarios[2].status = Status.failed
        subscenarios[2].__iter__ = Mock(return_value=iter([]))
        subscenarios[3].status = Status.skipped
        subscenarios[3].__iter__ = Mock(return_value=iter([]))
        scenarios[0]._scenarios = subscenarios
        scenarios[1].status = Status.failed
        scenarios[1].__iter__ = Mock(return_value=iter([]))
        scenarios[2].status = Status.skipped
        scenarios[2].__iter__ = Mock(return_value=iter([]))
        scenarios[3].status = Status.passed
        scenarios[3].__iter__ = Mock(return_value=iter([]))
        feature.status = Status.failed
        feature.duration = 12.4
        feature.__iter__ = Mock(return_value=iter(scenarios))

        config = Mock()
        sys.stdout.encoding = "UTF-8"
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            "all": 7,
            Status.passed.name: 2,
            Status.failed.name: 3,
            Status.skipped.name: 2,
            Status.untested.name: 0,
        }
        scenario_index = 1  # -- HINT: Index for scenarios if no Rules are used.
        expected_parts = ("scenario", expected)
        assert format_summary.call_args_list[scenario_index][0] == expected_parts
Ejemplo n.º 10
0
    def test_scenario_outline_status_is_collected_and_reported(self, stdout,
                                                               format_summary):
        # FIX: issue40
        # ENSURE: ScenarioOutline's scenarios are walked and collected.
        feature = Mock()
        scenarios = [ ScenarioOutline(u"<string>", 0, u"scenario_outline", u"name"),
                      Mock(), Mock(), Mock() ]
        subscenarios = [ Mock(), Mock(), Mock(), Mock() ]
        subscenarios[0].status = 'passed'
        subscenarios[0].__iter__ = Mock(return_value=iter([]))
        subscenarios[1].status = 'failed'
        subscenarios[1].__iter__ = Mock(return_value=iter([]))
        subscenarios[2].status = 'failed'
        subscenarios[2].__iter__ = Mock(return_value=iter([]))
        subscenarios[3].status = 'skipped'
        subscenarios[3].__iter__ = Mock(return_value=iter([]))
        scenarios[0]._scenarios = subscenarios
        scenarios[1].status = 'failed'
        scenarios[1].__iter__ = Mock(return_value=iter([]))
        scenarios[2].status = 'skipped'
        scenarios[2].__iter__ = Mock(return_value=iter([]))
        scenarios[3].status = 'passed'
        scenarios[3].__iter__ = Mock(return_value=iter([]))
        feature.status = 'failed'
        feature.duration = 12.4
        feature.__iter__ = Mock(return_value=iter(scenarios))

        config = Mock()
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            'passed': 2,
            'failed': 3,
            'skipped': 2,
            'untested': 0,
            }

        eq_(format_summary.call_args_list[1][0], ('scenario', expected))
Ejemplo n.º 11
0
    def test_scenario_status_is_collected_and_reported(self, format_summary,
                                                       stdout):
        feature = Mock()
        scenarios = [Mock(), Mock(), Mock(), Mock(), Mock()]
        scenarios[0].status = 'failed'
        scenarios[0].__iter__ = Mock(return_value=iter([]))
        scenarios[1].status = 'failed'
        scenarios[1].__iter__ = Mock(return_value=iter([]))
        scenarios[2].status = 'skipped'
        scenarios[2].__iter__ = Mock(return_value=iter([]))
        scenarios[3].status = 'passed'
        scenarios[3].__iter__ = Mock(return_value=iter([]))
        scenarios[4].status = None
        scenarios[4].__iter__ = Mock(return_value=iter([]))
        feature.status = 'failed'
        feature.duration = 12.3
        feature.__iter__ = Mock(return_value=iter(scenarios))

        config = Mock()
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            'passed': 1,
            'failed': 2,
            'skipped': 2,
            'untested': 0,
        }

        eq_(format_summary.call_args_list[1][0], ('scenario', expected))
Ejemplo n.º 12
0
    def test_scenario_status_is_collected_and_reported(self, format_summary,
                                                       stdout):
        feature = Mock()
        scenarios = [Mock(), Mock(), Mock(), Mock(), Mock()]
        scenarios[0].status = Status.failed
        scenarios[0].__iter__ = Mock(return_value=iter([]))
        scenarios[1].status = Status.failed
        scenarios[1].__iter__ = Mock(return_value=iter([]))
        scenarios[2].status = Status.skipped
        scenarios[2].__iter__ = Mock(return_value=iter([]))
        scenarios[3].status = Status.passed
        scenarios[3].__iter__ = Mock(return_value=iter([]))
        scenarios[4].status = Status.untested
        scenarios[4].__iter__ = Mock(return_value=iter([]))
        feature.status = Status.failed
        feature.duration = 12.3
        feature.__iter__ = Mock(return_value=iter(scenarios))

        config = Mock()
        sys.stdout.encoding = "UTF-8"
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            Status.passed.name: 1,
            Status.failed.name: 2,
            Status.skipped.name: 1,
            Status.untested.name: 1,
        }

        eq_(format_summary.call_args_list[1][0], ('scenario', expected))
Ejemplo n.º 13
0
    def test_step_status_is_collected_and_reported(self, format_summary,
                                                   stdout):
        feature = Mock()
        scenario = Mock()
        steps = [Mock(), Mock(), Mock(), Mock(), Mock()]
        steps[0].status = Status.failed
        steps[0].__iter__ = Mock(return_value=iter([]))
        steps[1].status = Status.passed
        steps[1].__iter__ = Mock(return_value=iter([]))
        steps[2].status = Status.passed
        steps[2].__iter__ = Mock(return_value=iter([]))
        steps[3].status = Status.skipped
        steps[4].__iter__ = Mock(return_value=iter([]))
        steps[4].status = Status.undefined
        steps[4].__iter__ = Mock(return_value=iter([]))
        feature.status = Status.failed
        feature.duration = 12.3
        feature.__iter__ = Mock(return_value=iter([scenario]))
        scenario.status = Status.failed
        scenario.__iter__ = Mock(return_value=iter(steps))

        config = Mock()
        sys.stdout.encoding = "UTF-8"
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            "all": 5,
            Status.passed.name: 2,
            Status.failed.name: 1,
            Status.skipped.name: 1,
            Status.untested.name: 0,
            Status.undefined.name: 1,
        }

        step_index = 2  # HINT: Index for steps if not rules are used.
        expected_parts = ("step", expected)
        assert format_summary.call_args_list[step_index][0] == expected_parts
Ejemplo n.º 14
0
    def test_scenario_outline_status_is_collected_and_reported(self, stdout,
                                                               format_summary):
        feature = Mock()
        scenarios = [ ScenarioOutline(u"<string>", 0, u"scenario_outline", u"name"),
                      Mock(), Mock(), Mock() ]
        subscenarios = [ Mock(), Mock(), Mock(), Mock() ]
        subscenarios[0].status = Status.passed
        subscenarios[0].__iter__ = Mock(return_value=iter([]))
        subscenarios[1].status = Status.failed
        subscenarios[1].__iter__ = Mock(return_value=iter([]))
        subscenarios[2].status = Status.failed
        subscenarios[2].__iter__ = Mock(return_value=iter([]))
        subscenarios[3].status = Status.skipped
        subscenarios[3].__iter__ = Mock(return_value=iter([]))
        scenarios[0]._scenarios = subscenarios
        scenarios[1].status = Status.failed
        scenarios[1].__iter__ = Mock(return_value=iter([]))
        scenarios[2].status = Status.skipped
        scenarios[2].__iter__ = Mock(return_value=iter([]))
        scenarios[3].status = Status.passed
        scenarios[3].__iter__ = Mock(return_value=iter([]))
        feature.status = Status.failed
        feature.duration = 12.4
        feature.__iter__ = Mock(return_value=iter(scenarios))

        config = Mock()
        sys.stdout.encoding = "UTF-8"
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            Status.passed.name: 2,
            Status.failed.name: 3,
            Status.skipped.name: 2,
            Status.untested.name: 0,
            }

        eq_(format_summary.call_args_list[1][0], ('scenario', expected))
Ejemplo n.º 15
0
    def __init__(self):
        self.formatters = []
        self.reporters = []
        self.name_re = None
        load_configuration(self.defaults)
        parser.set_defaults(**self.defaults)

        args = parser.parse_args()
        for key, value in args.__dict__.items():
            if key.startswith('_'):
                continue
            setattr(self, key, value)

        if args.outfile and args.outfile != '-':
            self.output = open(args.outfile, 'w')
        else:
            self.output = sys.stdout

        if self.wip:
            # Only run scenarios tagged with "wip". Additionally: use the
            # "plain" formatter, do not capture stdout or logging output and
            # stop at the first failure.
            self.format = ['plain']
            self.tags = ['wip']
            self.stop = True
            self.log_capture = False
            self.stdout_capture = False

        self.tags = TagExpression(self.tags or [])

        if self.quiet:
            self.show_source = False
            self.show_snippets = False

        if self.exclude_re:
            self.exclude_re = re.compile(self.exclude_re)

        if self.include_re:
            self.include_re = re.compile(self.include_re)
        if self.name:
            # -- SELECT: Scenario-by-name, build regular expression.
            self.name_re = self.build_name_re(self.name)

        if self.junit:
            # Buffer the output (it will be put into Junit report)
            self.stdout_capture = True
            self.stderr_capture = True
            self.log_capture = True
            self.reporters.append(JUnitReporter(self))
        if self.summary:
            self.reporters.append(SummaryReporter(self))
Ejemplo n.º 16
0
    def test_step_status_is_collected_and_reported(self, format_summary,
                                                   stderr):
        # pylint: disable=W0621
        #   W0621   Redefining name ... from outer scope (format_summary)
        feature = Mock()
        scenario = Mock()
        steps = [Mock(), Mock(), Mock(), Mock(), Mock()]
        steps[0].status = 'failed'
        steps[0].__iter__ = Mock(return_value=iter([]))
        steps[1].status = 'undefined'
        steps[1].__iter__ = Mock(return_value=iter([]))
        steps[2].status = 'passed'
        steps[2].__iter__ = Mock(return_value=iter([]))
        steps[3].status = 'passed'
        steps[3].__iter__ = Mock(return_value=iter([]))
        steps[4].status = None
        steps[4].__iter__ = Mock(return_value=iter([]))
        feature.status = 'failed'
        feature.duration = 12.3
        feature.__iter__ = Mock(return_value=iter([scenario]))
        scenario.status = 'failed'
        scenario.__iter__ = Mock(return_value=iter(steps))

        config = Mock()
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            'passed': 2,
            'failed': 1,
            'skipped': 1,
            'untested': 0,
            'undefined': 1,
        }

        eq_(format_summary.call_args_list[2][0], ('step', expected))
Ejemplo n.º 17
0
    def test_scenario_outline_status_is_collected_and_reported(
            self, stdout, format_summary):
        feature = Mock()
        scenarios = [
            ScenarioOutline(u"<string>", 0, u"scenario_outline", u"name"),
            Mock(),
            Mock(),
            Mock()
        ]
        subscenarios = [Mock(), Mock(), Mock(), Mock()]
        subscenarios[0].status = Status.passed
        subscenarios[0].__iter__ = Mock(return_value=iter([]))
        subscenarios[1].status = Status.failed
        subscenarios[1].__iter__ = Mock(return_value=iter([]))
        subscenarios[2].status = Status.failed
        subscenarios[2].__iter__ = Mock(return_value=iter([]))
        subscenarios[3].status = Status.skipped
        subscenarios[3].__iter__ = Mock(return_value=iter([]))
        scenarios[0]._scenarios = subscenarios
        scenarios[1].status = Status.failed
        scenarios[1].__iter__ = Mock(return_value=iter([]))
        scenarios[2].status = Status.skipped
        scenarios[2].__iter__ = Mock(return_value=iter([]))
        scenarios[3].status = Status.passed
        scenarios[3].__iter__ = Mock(return_value=iter([]))
        feature.status = Status.failed
        feature.duration = 12.4
        feature.__iter__ = Mock(return_value=iter(scenarios))

        config = Mock()
        sys.stdout.encoding = "UTF-8"
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            "all": 7,
            Status.passed.name: 2,
            Status.failed.name: 3,
            Status.skipped.name: 2,
            Status.untested.name: 0,
        }
        scenario_index = 1  # -- HINT: Index for scenarios if no Rules are used.
        expected_parts = ("scenario", expected)
        assert format_summary.call_args_list[scenario_index][
            0] == expected_parts
Ejemplo n.º 18
0
    def test_scenario_outline_status_is_collected_and_reported(
            self, stdout, format_summary):
        feature = Mock()
        scenarios = [
            ScenarioOutline(u"<string>", 0, u"scenario_outline", u"name"),
            Mock(),
            Mock(),
            Mock()
        ]
        subscenarios = [Mock(), Mock(), Mock(), Mock()]
        subscenarios[0].status = Status.passed
        subscenarios[0].__iter__ = Mock(return_value=iter([]))
        subscenarios[1].status = Status.failed
        subscenarios[1].__iter__ = Mock(return_value=iter([]))
        subscenarios[2].status = Status.failed
        subscenarios[2].__iter__ = Mock(return_value=iter([]))
        subscenarios[3].status = Status.skipped
        subscenarios[3].__iter__ = Mock(return_value=iter([]))
        scenarios[0]._scenarios = subscenarios
        scenarios[1].status = Status.failed
        scenarios[1].__iter__ = Mock(return_value=iter([]))
        scenarios[2].status = Status.skipped
        scenarios[2].__iter__ = Mock(return_value=iter([]))
        scenarios[3].status = Status.passed
        scenarios[3].__iter__ = Mock(return_value=iter([]))
        feature.status = Status.failed
        feature.duration = 12.4
        feature.__iter__ = Mock(return_value=iter(scenarios))

        config = Mock()
        sys.stdout.encoding = "UTF-8"
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            Status.passed.name: 2,
            Status.failed.name: 3,
            Status.skipped.name: 2,
            Status.untested.name: 0,
        }

        eq_(format_summary.call_args_list[1][0], ('scenario', expected))
Ejemplo n.º 19
0
    def test_scenario_outline_status_is_collected_and_reported(
            self, stderr, format_summary):
        feature = Mock()
        scenarios = [
            ScenarioOutline(u"<string>", 0, u"scenario_outline", u"name"),
            Mock(),
            Mock(),
            Mock()
        ]
        subscenarios = [Mock(), Mock(), Mock(), Mock()]
        subscenarios[0].status = 'passed'
        subscenarios[0].__iter__ = Mock(return_value=iter([]))
        subscenarios[1].status = 'failed'
        subscenarios[1].__iter__ = Mock(return_value=iter([]))
        subscenarios[2].status = 'failed'
        subscenarios[2].__iter__ = Mock(return_value=iter([]))
        subscenarios[3].status = 'skipped'
        subscenarios[3].__iter__ = Mock(return_value=iter([]))
        scenarios[0]._scenarios = subscenarios
        scenarios[1].status = 'failed'
        scenarios[1].__iter__ = Mock(return_value=iter([]))
        scenarios[2].status = 'skipped'
        scenarios[2].__iter__ = Mock(return_value=iter([]))
        scenarios[3].status = 'passed'
        scenarios[3].__iter__ = Mock(return_value=iter([]))
        feature.status = 'failed'
        feature.duration = 12.4
        feature.__iter__ = Mock(return_value=iter(scenarios))

        config = Mock()
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            'passed': 2,
            'failed': 3,
            'skipped': 2,
            'untested': 0,
        }

        eq_(format_summary.call_args_list[1][0], ('scenario', expected))
Ejemplo n.º 20
0
    def test_step_status_is_collected_and_reported(self, format_summary,
                                                   stdout):
        feature = Mock()
        scenario = Mock()
        steps = [Mock(), Mock(), Mock(), Mock(), Mock()]
        steps[0].status = Status.failed
        steps[0].__iter__ = Mock(return_value=iter([]))
        steps[1].status = Status.passed
        steps[1].__iter__ = Mock(return_value=iter([]))
        steps[2].status = Status.passed
        steps[2].__iter__ = Mock(return_value=iter([]))
        steps[3].status = Status.skipped
        steps[4].__iter__ = Mock(return_value=iter([]))
        steps[4].status = Status.undefined
        steps[4].__iter__ = Mock(return_value=iter([]))
        feature.status = Status.failed
        feature.duration = 12.3
        feature.__iter__ = Mock(return_value=iter([scenario]))
        scenario.status = Status.failed
        scenario.__iter__ = Mock(return_value=iter(steps))

        config = Mock()
        sys.stdout.encoding = "UTF-8"
        reporter = SummaryReporter(config)

        reporter.feature(feature)
        reporter.end()

        expected = {
            "all": 5,
            Status.passed.name: 2,
            Status.failed.name: 1,
            Status.skipped.name: 1,
            Status.untested.name: 0,
            Status.undefined.name: 1,
        }

        step_index = 2  # HINT: Index for steps if not rules are used.
        expected_parts = ("step", expected)
        assert format_summary.call_args_list[step_index][0] == expected_parts
Ejemplo n.º 21
0
    def __init__(self,
                 command_args=None,
                 load_config=True,
                 verbose=None,
                 **kwargs):
        """
        Constructs a behave configuration object.
          * loads the configuration defaults (if needed).
          * process the command-line args
          * store the configuration results

        :param command_args: Provide command args (as sys.argv).
            If command_args is None, sys.argv[1:] is used.
        :type command_args: list<str>, str
        :param load_config: Indicate if configfile should be loaded (=true)
        :param verbose: Indicate if diagnostic output is enabled
        :param kwargs:  Used to hand-over/overwrite default values.
        """
        # pylint: disable=too-many-branches, too-many-statements
        if command_args is None:
            command_args = sys.argv[1:]
        elif isinstance(command_args, six.string_types):
            encoding = select_best_encoding() or "utf-8"
            if six.PY2 and isinstance(command_args, six.text_type):
                command_args = command_args.encode(encoding)
            elif six.PY3 and isinstance(command_args, six.binary_type):
                command_args = command_args.decode(encoding)
            command_args = shlex.split(command_args)
        elif isinstance(command_args, (list, tuple)):
            command_args = to_texts(command_args)

        if verbose is None:
            # -- AUTO-DISCOVER: Verbose mode from command-line args.
            verbose = ("-v" in command_args) or ("--verbose" in command_args)

        self.version = None
        self.tags_help = None
        self.lang_list = None
        self.lang_help = None
        self.default_tags = None
        self.junit = None
        self.logging_format = None
        self.logging_datefmt = None
        self.name = None
        self.scope = None
        self.steps_catalog = None
        self.userdata = None
        self.wip = None

        defaults = self.defaults.copy()
        for name, value in six.iteritems(kwargs):
            defaults[name] = value
        self.defaults = defaults
        self.formatters = []
        self.reporters = []
        self.name_re = None
        self.outputs = []
        self.include_re = None
        self.exclude_re = None
        self.scenario_outline_annotation_schema = None  # pylint: disable=invalid-name
        self.steps_dir = "steps"
        self.environment_file = "environment.py"
        self.userdata_defines = None
        self.more_formatters = None
        if load_config:
            load_configuration(self.defaults, verbose=verbose)
        parser = setup_parser()
        parser.set_defaults(**self.defaults)
        args = parser.parse_args(command_args)
        for key, value in six.iteritems(args.__dict__):
            if key.startswith("_") and key not in self.cmdline_only_options:
                continue
            setattr(self, key, value)

        self.paths = [os.path.normpath(path) for path in self.paths]
        self.setup_outputs(args.outfiles)

        if self.steps_catalog:
            # -- SHOW STEP-CATALOG: As step summary.
            self.default_format = "steps.catalog"
            self.format = ["steps.catalog"]
            self.dry_run = True
            self.summary = False
            self.show_skipped = False
            self.quiet = True

        if self.wip:
            # Only run scenarios tagged with "wip".
            # Additionally:
            #  * use the "plain" formatter (per default)
            #  * do not capture stdout or logging output and
            #  * stop at the first failure.
            self.default_format = "plain"
            self.tags = ["wip"] + self.default_tags.split()
            self.color = False
            self.stop = True
            self.log_capture = False
            self.stdout_capture = False

        self.tags = TagExpression(self.tags or self.default_tags.split())

        if self.quiet:
            self.show_source = False
            self.show_snippets = False

        if self.exclude_re:
            self.exclude_re = re.compile(self.exclude_re)

        if self.include_re:
            self.include_re = re.compile(self.include_re)
        if self.name:
            # -- SELECT: Scenario-by-name, build regular expression.
            self.name_re = self.build_name_re(self.name)

        if self.stage is None:  # pylint: disable=access-member-before-definition
            # -- USE ENVIRONMENT-VARIABLE, if stage is undefined.
            self.stage = os.environ.get("BEHAVE_STAGE", None)
        self.setup_stage(self.stage)
        self.setup_model()
        self.setup_userdata()

        # -- FINALLY: Setup Reporters and Formatters
        # NOTE: Reporters and Formatters can now use userdata information.
        if self.junit:
            # Buffer the output (it will be put into Junit report)
            self.stdout_capture = True
            self.stderr_capture = True
            self.log_capture = True
            self.reporters.append(JUnitReporter(self))
        if self.summary:
            self.reporters.append(SummaryReporter(self))

        self.setup_formats()
        unknown_formats = self.collect_unknown_formats()
        if unknown_formats:
            parser.error("format=%s is unknown" % ", ".join(unknown_formats))
Ejemplo n.º 22
0
    def __init__(self, command_args=None, verbose=None):
        """
        Constructs a behave configuration object.
          * loads the configuration defaults.
          * process the command-line args
          * store the configuration results

        :param command_args: Provide command args (as sys.argv).
            If command_args is None, sys.argv[1:] is used.
        :type command_args: list<str>, str
        :param verbose: Indicate if diagnostic output is enabled
        """
        if command_args is None:
            command_args = sys.argv[1:]
        elif isinstance(command_args, basestring):
            command_args = shlex.split(command_args)
        if verbose is None:
            # -- AUTO-DISCOVER: Verbose mode from command-line args.
            verbose = ('-v' in command_args) or ('--verbose' in command_args)

        self.formatters = []
        self.reporters = []
        self.name_re = None
        self.outputs = []
        self.include_re = None
        self.exclude_re = None
        load_configuration(self.defaults, verbose=verbose)
        parser.set_defaults(**self.defaults)
        args = parser.parse_args(command_args)
        for key, value in args.__dict__.items():
            if key.startswith('_'):
                continue
            setattr(self, key, value)

        self.paths = [os.path.normpath(path) for path in self.paths]
        if not args.outfiles:
            self.outputs.append(StreamOpener(stream=sys.stdout))
        else:
            for outfile in args.outfiles:
                if outfile and outfile != '-':
                    self.outputs.append(StreamOpener(outfile))
                else:
                    self.outputs.append(StreamOpener(stream=sys.stdout))

        if self.wip:
            # Only run scenarios tagged with "wip". Additionally: use the
            # "plain" formatter, do not capture stdout or logging output and
            # stop at the first failure.
            self.format = ['plain']
            self.tags = ['wip']
            self.stop = True
            self.log_capture = False
            self.stdout_capture = False

        self.tags = TagExpression(self.tags or [])

        if self.quiet:
            self.show_source = False
            self.show_snippets = False

        if self.exclude_re:
            self.exclude_re = re.compile(self.exclude_re)

        if self.include_re:
            self.include_re = re.compile(self.include_re)
        if self.name:
            # -- SELECT: Scenario-by-name, build regular expression.
            self.name_re = self.build_name_re(self.name)

        if self.junit:
            # Buffer the output (it will be put into Junit report)
            self.stdout_capture = True
            self.stderr_capture = True
            self.log_capture = True
            self.reporters.append(JUnitReporter(self))
        if self.summary:
            self.reporters.append(SummaryReporter(self))

        unknown_formats = self.collect_unknown_formats()
        if unknown_formats:
            parser.error("format=%s is unknown" % ", ".join(unknown_formats))
Ejemplo n.º 23
0
    def __init__(self):
        self.formatters = []
        self.reporters = []
        self.name_re = None
        self.outputs = []
        load_configuration(self.defaults)
        parser.set_defaults(**self.defaults)

        args = parser.parse_args()
        for key, value in args.__dict__.items():
            if key.startswith('_'):
                continue
            setattr(self, key, value)

        self.paths = [os.path.normpath(path) for path in self.paths]
        if not args.outfiles:
            self.outputs.append(StreamOpener(stream=sys.stdout))
        else:
            for outfile in args.outfiles:
                if outfile and outfile != '-':
                    self.outputs.append(StreamOpener(outfile))
                else:
                    self.outputs.append(StreamOpener(stream=sys.stdout))

        if self.wip:
            # Only run scenarios tagged with "wip". Additionally: use the
            # "plain" formatter, do not capture stdout or logging output and
            # stop at the first failure.
            self.format = ['plain']
            self.tags = ['wip']
            self.stop = True
            self.log_capture = False
            self.stdout_capture = False

        self.tags = TagExpression(self.tags or [])

        if self.quiet:
            self.show_source = False
            self.show_snippets = False

        if self.exclude_re:
            self.exclude_re = re.compile(self.exclude_re)

        if self.include_re:
            self.include_re = re.compile(self.include_re)
        if self.name:
            # -- SELECT: Scenario-by-name, build regular expression.
            self.name_re = self.build_name_re(self.name)

        if self.junit:
            # Buffer the output (it will be put into Junit report)
            self.stdout_capture = True
            self.stderr_capture = True
            self.log_capture = True
            self.reporters.append(JUnitReporter(self))
        if self.summary:
            self.reporters.append(SummaryReporter(self))

        unknown_formats = self.collect_unknown_formats()
        if unknown_formats:
            parser.error("format=%s is unknown" % ", ".join(unknown_formats))
Ejemplo n.º 24
0
    def __init__(self, command_args=None, load_config=True, verbose=None,
                 **kwargs):
        """
        Constructs a behave configuration object.
          * loads the configuration defaults (if needed).
          * process the command-line args
          * store the configuration results

        :param command_args: Provide command args (as sys.argv).
            If command_args is None, sys.argv[1:] is used.
        :type command_args: list<str>, str
        :param load_config: Indicate if configfile should be loaded (=true)
        :param verbose: Indicate if diagnostic output is enabled
        :param kwargs:  Used to hand-over/overwrite default values.
        """
        if command_args is None:
            command_args = sys.argv[1:]
        elif isinstance(command_args, string_types):
            if isinstance(command_args, unicode):
                command_args = command_args.encode("utf-8")
            command_args = shlex.split(command_args)
        if verbose is None:
            # -- AUTO-DISCOVER: Verbose mode from command-line args.
            verbose = ('-v' in command_args) or ('--verbose' in command_args)

        defaults = self.defaults.copy()
        for name, value in kwargs.items():
            defaults[name] = value
        self.defaults = defaults
        self.formatters = []
        self.reporters = []
        self.name_re = None
        self.outputs = []
        self.include_re = None
        self.exclude_re = None
        self.scenario_outline_annotation_schema = None
        self.steps_dir = "steps"
        self.environment_file = "environment.py"
        self.userdata_defines = None
        if load_config:
            load_configuration(self.defaults, verbose=verbose)
        parser = setup_parser()
        parser.set_defaults(**self.defaults)
        args = parser.parse_args(command_args)
        for key, value in args.__dict__.items():
            if key.startswith('_') and key not in self.cmdline_only_options:
                continue
            setattr(self, key, value)

        self.paths = [os.path.normpath(path) for path in self.paths]
        if not args.outfiles:
            self.outputs.append(StreamOpener(stream=sys.stdout))
        else:
            for outfile in args.outfiles:
                if outfile and outfile != '-':
                    self.outputs.append(StreamOpener(outfile))
                else:
                    self.outputs.append(StreamOpener(stream=sys.stdout))

        if self.wip:
            # Only run scenarios tagged with "wip".
            # Additionally:
            #  * use the "plain" formatter (per default)
            #  * do not capture stdout or logging output and
            #  * stop at the first failure.
            self.default_format = "plain"
            self.tags = ["wip"]
            self.color = False
            self.stop = True
            self.log_capture = False
            self.stdout_capture = False

        self.tags = TagExpression(self.tags or [])

        if self.quiet:
            self.show_source = False
            self.show_snippets = False

        if self.exclude_re:
            self.exclude_re = re.compile(self.exclude_re)

        if self.include_re:
            self.include_re = re.compile(self.include_re)
        if self.name:
            # -- SELECT: Scenario-by-name, build regular expression.
            self.name_re = self.build_name_re(self.name)

        if self.junit:
            # Buffer the output (it will be put into Junit report)
            self.stdout_capture = True
            self.stderr_capture = True
            self.log_capture = True
            self.reporters.append(JUnitReporter(self))
        if self.summary:
            self.reporters.append(SummaryReporter(self))

        unknown_formats = self.collect_unknown_formats()
        if unknown_formats:
            parser.error("format=%s is unknown" % ", ".join(unknown_formats))

        if self.stage is None:
            # -- USE ENVIRONMENT-VARIABLE, if stage is undefined.
            self.stage = os.environ.get("BEHAVE_STAGE", None)
        self.setup_stage(self.stage)
        self.setup_model()
        self.setup_userdata()
Ejemplo n.º 25
0
    def __init__(self):
        self.formatters = []
        self.reporters = []

        defaults = dict(
            color=sys.platform != 'win32',
            stdout_capture=True,
            stderr_capture=True,
            show_snippets=True,
            show_skipped=True,
            log_capture=True,
            dry_run=False,
            show_source=True,
            logging_format='%(levelname)s:%(name)s:%(message)s',
            summary=True,
            junit=False,
        )
        load_configuration(defaults)
        parser.set_defaults(**defaults)

        args = parser.parse_args()
        for key, value in args.__dict__.items():
            if key.startswith('_'):
                continue
            setattr(self, key, value)

        if args.outfile and args.outfile != '-':
            self.output = open(args.outfile, 'w')
        else:
            self.output = sys.stdout

        if self.wip:
            # Only run scenarios tagged with "wip". Additionally: use the
            # "plain" formatter, do not capture stdout or logging output and
            # stop at the first failure.
            self.format = ['plain']
            self.tags = ['wip']
            self.stop = True
            self.log_capture = False
            self.stdout_capture = False

        self.tags = TagExpression(self.tags or [])

        if self.quiet:
            self.show_source = False
            self.show_snippets = False

        if self.exclude_re:
            self.exclude_re = re.compile(self.exclude_re)

        if self.include_re:
            self.include_re = re.compile(self.include_re)

        if self.junit:
            # Buffer the output (it will be put into Junit report)
            self.stdout_capture = True
            self.stderr_capture = True
            self.log_capture = True
            self.reporters.append(JUnitReporter(self))
        if self.summary:
            self.reporters.append(SummaryReporter(self))