Ejemplo n.º 1
0
def main():
    config = Configuration()

    if config.version:
        print "behave " + __version__
        sys.exit(0)

    if config.tags_help:
        print TAG_HELP
        sys.exit(0)

    if config.lang_list:
        iso_codes = languages.keys()
        iso_codes.sort()
        print "Languages available:"
        for iso_code in iso_codes:
            native = languages[iso_code]['native'][0]
            name = languages[iso_code]['name'][0]
            print u'%s: %s / %s' % (iso_code, native, name)
        sys.exit(0)

    if config.lang_help:
        if config.lang_help not in languages:
            sys.exit('%s is not a recognised language: try --lang-list' %
                     config.lang_help)
        trans = languages[config.lang_help]
        print u"Translations for %s / %s" % (trans['name'][0],
              trans['native'][0])
        for kw in trans:
            if kw in 'name native'.split():
                continue
            print u'%16s: %s' % (kw.title().replace('_', ' '),
                  u', '.join(w for w in trans[kw] if w != '*'))
        sys.exit(0)

    if not config.format:
        default_format = config.defaults["default_format"]
        config.format = [ default_format ]
    elif config.format and "format" in config.defaults:
        # -- CASE: Formatter are specified in behave configuration file.
        #    Check if formatter are provided on command-line, too.
        if len(config.format) == len(config.defaults["format"]):
            # -- NO FORMATTER on command-line: Add default formatter.
            default_format = config.defaults["default_format"]
            config.format.append(default_format)
    if 'help' in config.format:
        print "Available formatters:"
        formatters.list_formatters(sys.stdout)
        sys.exit(0)

    if len(config.outputs) > len(config.format):
        print 'CONFIG-ERROR: More outfiles (%d) than formatters (%d).' % \
              (len(config.outputs), len(config.format))
        sys.exit(1)

    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError, e:
        sys.exit("ParseError: %s" % e)
Ejemplo n.º 2
0
def handle(path):
    file_conf = ConfigObj(os.path.join(path, 'features', 'config.ini'))
    behave_options = file_conf['behave']['options']

    conf = Configuration(behave_options)
    conf.paths = [os.path.join(path, 'features')]
    runner = Runner(conf)
    runner.run()
Ejemplo n.º 3
0
    def test_setup_userdata(self):
        config = Configuration("", load_config=False)
        config.userdata = dict(person1="Alice", person2="Bob")
        config.userdata_defines = [("person2", "Charly")]
        config.setup_userdata()

        expected_data = dict(person1="Alice", person2="Charly")
        eq_(config.userdata, expected_data)
Ejemplo n.º 4
0
def handle(path):
    file_conf = ConfigObj(os.path.join(path, 'features', 'config.ini'))
    behave_options = file_conf['behave']['options']

    conf = Configuration(behave_options)
    conf.paths = [os.path.join(path, 'features')]
    runner = Runner(conf)
    runner.run()
Ejemplo n.º 5
0
    def test_update_userdata__without_cmdline_defines(self):
        config = Configuration("", load_config=False)
        config.userdata = UserData(person1="AAA", person3="Charly")
        config.update_userdata(dict(person1="Alice", person2="Bob"))

        expected_data = dict(person1="Alice", person2="Bob", person3="Charly")
        eq_(config.userdata, expected_data)
        self.assertFalse(config.userdata_defines)
Ejemplo n.º 6
0
    def test_update_userdata__without_cmdline_defines(self):
        config = Configuration("", load_config=False)
        config.userdata = UserData(person1="AAA", person3="Charly")
        config.update_userdata(dict(person1="Alice", person2="Bob"))

        expected_data = dict(person1="Alice", person2="Bob", person3="Charly")
        eq_(config.userdata, expected_data)
        self.assertFalse(config.userdata_defines)
Ejemplo n.º 7
0
    def test_update_userdata__with_cmdline_defines(self):
        # -- NOTE: cmdline defines are reapplied.
        config = Configuration("-D person2=Bea", load_config=False)
        config.userdata = UserData(person1="AAA", person3="Charly")
        config.update_userdata(dict(person1="Alice", person2="Bob"))

        expected_data = dict(person1="Alice", person2="Bea", person3="Charly")
        eq_(config.userdata, expected_data)
        eq_(config.userdata_defines, [("person2", "Bea")])
Ejemplo n.º 8
0
    def test_update_userdata__with_cmdline_defines(self):
        # -- NOTE: cmdline defines are reapplied.
        config = Configuration("-D person2=Bea", load_config=False)
        config.userdata = UserData(person1="AAA", person3="Charly")
        config.update_userdata(dict(person1="Alice", person2="Bob"))

        expected_data = dict(person1="Alice", person2="Bea", person3="Charly")
        eq_(config.userdata, expected_data)
        eq_(config.userdata_defines, [("person2", "Bea")])
Ejemplo n.º 9
0
def main():
    config = Configuration()

    if config.version:
        print "behave " + __version__
        sys.exit(0)

    if config.tags_help:
        print TAG_HELP
        sys.exit(0)

    if config.lang_list:
        iso_codes = languages.keys()
        iso_codes.sort()
        print "Languages available:"
        for iso_code in iso_codes:
            native = languages[iso_code]['native'][0]
            name = languages[iso_code]['name'][0]
            print u'%s: %s / %s' % (iso_code, native, name)
        sys.exit(0)

    if config.lang_help:
        if config.lang_help not in languages:
            sys.exit('%s is not a recognised language: try --lang-list' %
                     config.lang_help)
        trans = languages[config.lang_help]
        print u"Translations for %s / %s" % (trans['name'][0],
              trans['native'][0])
        for kw in trans:
            if kw in 'name native'.split():
                continue
            print u'%16s: %s' % (kw.title().replace('_', ' '),
                  u', '.join(w for w in trans[kw] if w != '*'))
        sys.exit(0)

    if not config.format:
        format0 = config.defaults["format0"]
        config.format = [ format0 ]
    elif 'help' in config.format:
        print "Available formatters:"
        formatters.list_formatters(sys.stdout)
        sys.exit(0)
    # -- SANITY: Use at most one formatter, more cause various problems.
    # PROBLEM DESCRIPTION:
    #   1. APPEND MODE: configfile.format + --format
    #   2. Daisy chaining of formatter does not work
    #     => behave.formatter.formatters.get_formatter()
    #     => Stream methods, stream.write(), stream.flush are missing
    #        in Formatter interface
    if DISABLE_MULTI_FORMATTERS:
        config.format = config.format[-1:]

    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError, e:
        sys.exit(str(e))
Ejemplo n.º 10
0
def main():
    # pylint: disable=R0912,R0915
    #   R0912   Too many branches (17/12)
    #   R0915   Too many statements (57/50)
    config = Configuration()

    if config.version:
        print "behave " + __version__
        sys.exit(0)

    if config.tags_help:
        print TAG_HELP
        sys.exit(0)

    if config.lang_list:
        iso_codes = languages.keys()
        iso_codes.sort()
        print "Languages available:"
        for iso_code in iso_codes:
            native = languages[iso_code]["native"][0]
            name = languages[iso_code]["name"][0]
            print u"%s: %s / %s" % (iso_code, native, name)
        sys.exit(0)

    if config.lang_help:
        if config.lang_help not in languages:
            sys.exit("%s is not a recognised language: try --lang-list" % config.lang_help)
        trans = languages[config.lang_help]
        print u"Translations for %s / %s" % (trans["name"][0], trans["native"][0])
        for kw in trans:
            if kw in "name native".split():
                continue
            print u"%16s: %s" % (kw.title().replace("_", " "), u", ".join(w for w in trans[kw] if w != "*"))
        sys.exit(0)

    if not config.format:
        format0 = config.defaults["format0"]
        config.format = [format0]
    elif "help" in config.format:
        print "Available formatters:"
        formatters.list_formatters(sys.stdout)
        sys.exit(0)
    # -- SANITY: Use at most one formatter, more cause various problems.
    # PROBLEM DESCRIPTION:
    #   1. APPEND MODE: configfile.format + --format
    #   2. Daisy chaining of formatters does not work
    #     => behave.formatter.formatters.get_formatter()
    #     => Stream methods, stream.write(), stream.flush are missing
    #        in Formatter interface
    config.format = config.format[-1:]

    stream = config.output
    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError, e:
        sys.exit(str(e))
Ejemplo n.º 11
0
def main():
    config = Configuration()

    if config.version:
        print "behave " + __version__
        sys.exit(0)

    if config.tags_help:
        print TAG_HELP
        sys.exit(0)

    if config.lang_list:
        iso_codes = languages.keys()
        iso_codes.sort()
        print "Languages available:"
        for iso_code in iso_codes:
            native = languages[iso_code]["native"][0]
            name = languages[iso_code]["name"][0]
            print u"%s: %s / %s" % (iso_code, native, name)
        sys.exit(0)

    if config.lang_help:
        if config.lang_help not in languages:
            sys.exit("%s is not a recognised language: try --lang-list" % config.lang_help)
        trans = languages[config.lang_help]
        print u"Translations for %s / %s" % (trans["name"][0], trans["native"][0])
        for kw in trans:
            if kw in "name native".split():
                continue
            print u"%16s: %s" % (kw.title().replace("_", " "), u", ".join(w for w in trans[kw] if w != "*"))
        sys.exit(0)

    if not config.format:
        default_format = config.defaults["default_format"]
        config.format = [default_format]
    elif config.format and "format" in config.defaults:
        # -- CASE: Formatter are specified in behave configuration file.
        #    Check if formatter are provided on command-line, too.
        if len(config.format) == len(config.defaults["format"]):
            # -- NO FORMATTER on command-line: Add default formatter.
            default_format = config.defaults["default_format"]
            config.format.append(default_format)
    if "help" in config.format:
        print "Available formatters:"
        formatters.list_formatters(sys.stdout)
        sys.exit(0)

    if len(config.outputs) > len(config.format):
        print "CONFIG-ERROR: More outfiles (%d) than formatters (%d)." % (len(config.outputs), len(config.format))
        sys.exit(1)

    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError, e:
        sys.exit("ParseError: %s" % e)
Ejemplo n.º 12
0
def handle(path, args=None):
    file_conf = ConfigObj(os.path.join(path, "features", "config.ini"))
    try:
        behave_options = file_conf["behave"]["options"]
    except KeyError:
        raise ValueError("Behave config not found." " Are you running with the right path?")
    if args:
        behave_options += " " + " ".join(args)

    conf = Configuration(behave_options)
    conf.paths = [os.path.join(path, "features")]
    runner = Runner(conf)
    runner.run()
Ejemplo n.º 13
0
def handle(path, args=None):
    file_conf = ConfigObj(os.path.join(path, 'features', 'config.ini'))
    try:
        behave_options = file_conf['behave']['options']
    except KeyError:
        raise ValueError("Behave config not found."
                         " Are you running with the right path?")
    if args:
        behave_options += ' ' + ' '.join(args)

    conf = Configuration(behave_options)
    conf.paths = [os.path.join(path, 'features')]
    runner = Runner(conf)
    runner.run()
Ejemplo n.º 14
0
def handle(path, args=None):
    file_conf = ConfigObj(os.path.join(path, 'features', 'config.ini'))
    try:
        behave_options = file_conf['behave']['options']
    except KeyError:
        raise ValueError("Behave config not found."
            " Are you running with the right path?")
    if args:
        behave_options += ' ' + ' '.join(args)

    conf = Configuration(behave_options)
    conf.paths = [os.path.join(path, 'features')]
    runner = Runner(conf)
    return runner.run()
Ejemplo n.º 15
0
def main():
    config = Configuration()

    if config.version:
        print "behave " + __version__
        sys.exit(0)

    if config.tags_help:
        print TAG_HELP
        sys.exit(0)

    if config.lang_list:
        iso_codes = languages.keys()
        iso_codes.sort()
        print "Languages available:"
        for iso_code in iso_codes:
            native = languages[iso_code]['native'][0]
            name = languages[iso_code]['name'][0]
            print u'%s: %s / %s' % (iso_code, native, name)
        sys.exit(0)

    if config.lang_help:
        if config.lang_help not in languages:
            sys.exit('%s is not a recognised language: try --lang-list' %
                config.lang_help)
        trans = languages[config.lang_help]
        print u"Translations for %s / %s" % (trans['name'][0],
            trans['native'][0])
        for kw in trans:
            if kw in 'name native'.split():
                continue
            print u'%16s: %s' % (kw.title().replace('_', ' '),
                u', '.join(w for w in trans[kw] if w != '*'))
        sys.exit(0)

    if not config.format:
        config.format = ['pretty']
    elif 'help' in config.format:
        print "Available formatters:"
        formatters.list_formatters(sys.stdout)
        sys.exit(0)

    stream = config.output

    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError, e:
        sys.exit(str(e))
Ejemplo n.º 16
0
def main():
    config = Configuration()

    if config.version:
        print "behave " + __version__
        sys.exit(0)

    if config.tags_help:
        print TAG_HELP
        sys.exit(0)

    if config.lang_list:
        iso_codes = languages.keys()
        iso_codes.sort()
        print "Languages available:"
        for iso_code in iso_codes:
            native = languages[iso_code]['native'][0]
            name = languages[iso_code]['name'][0]
            print u'%s: %s / %s' % (iso_code, native, name)
        sys.exit(0)

    if config.lang_help:
        if config.lang_help not in languages:
            sys.exit('%s is not a recognised language: try --lang-list' %
                config.lang_help)
        trans = languages[config.lang_help]
        print u"Translations for %s / %s" % (trans['name'][0],
            trans['native'][0])
        for kw in trans:
            if kw in 'name native'.split():
                continue
            print u'%16s: %s' % (kw.title().replace('_', ' '),
                u', '.join(w for w in trans[kw] if w != '*'))
        sys.exit(0)

    if not config.format:
        config.format = ['pretty']
    elif 'help' in config.format:
        print "Available formatters:"
        formatters.list_formatters(sys.stdout)
        sys.exit(0)

    stream = config.output

    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError, e:
        sys.exit(str(e))
Ejemplo n.º 17
0
    def test_run_exclude_named_scenarios_with_regexp(self):
        # -- NOTE: Works here only because it is run against Mocks.
        scenarios = [Mock(), Mock(), Mock()]
        scenarios[0].name = "Alice in Florida"
        scenarios[1].name = "Alice and Bob"
        scenarios[2].name = "Bob in Paris"
        scenarios[0].tags = []
        scenarios[1].tags = []
        scenarios[2].tags = []
        # -- FAKE-CHECK:
        scenarios[0].should_run_with_name_select.return_value = False
        scenarios[1].should_run_with_name_select.return_value = False
        scenarios[2].should_run_with_name_select.return_value = True

        for scenario in scenarios:
            scenario.run.return_value = False

        self.config.tags.check.return_value = True  # pylint: disable=no-member
        self.config.name = ["(?!Alice)"]    # Exclude all scenarios with "Alice"
        self.config.name_re = Configuration.build_name_re(self.config.name)

        feature = Feature('foo.feature', 1, u'Feature', u'foo',
                          scenarios=scenarios)

        feature.run(self.runner)

        assert not scenarios[0].run.called
        scenarios[0].should_run_with_name_select.assert_called_with(self.config)
        scenarios[1].should_run_with_name_select.assert_called_with(self.config)
        scenarios[2].should_run_with_name_select.assert_called_with(self.config)
        scenarios[0].run.assert_not_called()
        scenarios[1].run.assert_not_called()
        scenarios[2].run.assert_called_with(self.runner)
Ejemplo n.º 18
0
    def test_should_run_with_name_select(self):
        scenario_name = u"first scenario"
        scenario = Scenario("foo.feature", 17, u"Scenario", scenario_name)
        self.config.name = ['first .*', 'second .*']
        self.config.name_re = Configuration.build_name_re(self.config.name)

        assert scenario.should_run_with_name_select(self.config)
Ejemplo n.º 19
0
def feature_new(request):
    print(request.POST)
    form = json.loads(request.body.decode('utf-8'))
    feature = Feature(description=form['description'], finality=form['finality'], who=form['who'], purpose=form['purpose'], project_id=form['project'])
    feature.save()

    scenario = Scenario(given=form['given'], when=form['when'], then=form['then'], title=form['title'], feature=feature)
    scenario.save()
    gen_feature_file(feature.id)
    conf = Configuration('media/features/{}.feature'.format(feature.id))
    conf.format = [ conf.default_format ]
    runner = Runner(conf)
    runner.run()
    filename = make_test_funcs(runner.undefined_steps, feature.id)
    add_to_repo(filename, feature.description)
    return HttpResponse()
Ejemplo n.º 20
0
    def test_run_runs_named_scenarios(self):
        scenarios = [Mock(Scenario), Mock(Scenario)]
        scenarios[0].name = 'first scenario'
        scenarios[1].name = 'second scenario'
        scenarios[0].tags = []
        scenarios[1].tags = []
        # -- FAKE-CHECK:
        scenarios[0].should_run_with_name_select.return_value = True
        scenarios[1].should_run_with_name_select.return_value = False

        for scenario in scenarios:
            scenario.run.return_value = False

        self.config.tags.check.return_value = True  # pylint: disable=no-member
        self.config.name = ['first', 'third']
        self.config.name_re = Configuration.build_name_re(self.config.name)

        feature = Feature('foo.feature', 1, u'Feature', u'foo',
                          scenarios=scenarios)

        feature.run(self.runner)

        scenarios[0].run.assert_called_with(self.runner)
        assert not scenarios[1].run.called
        scenarios[0].should_run_with_name_select.assert_called_with(self.config)
        scenarios[1].should_run_with_name_select.assert_called_with(self.config)
Ejemplo n.º 21
0
    def test_run_runs_named_scenarios(self):
        scenarios = [Mock(Scenario), Mock(Scenario)]
        scenarios[0].name = "first scenario"
        scenarios[1].name = "second scenario"
        scenarios[0].tags = []
        scenarios[1].tags = []
        # -- FAKE-CHECK:
        scenarios[0].should_run_with_name_select.return_value = True
        scenarios[1].should_run_with_name_select.return_value = False

        for scenario in scenarios:
            scenario.run.return_value = False

        self.config.tag_expression.check.return_value = True  # pylint: disable=no-member
        self.config.name = ["first", "third"]
        self.config.name_re = Configuration.build_name_re(self.config.name)

        feature = Feature("foo.feature", 1, u"Feature", u"foo",
                          scenarios=scenarios)

        feature.run(self.runner)

        scenarios[0].run.assert_called_with(self.runner)
        assert not scenarios[1].run.called
        scenarios[0].should_run_with_name_select.assert_called_with(self.config)
        scenarios[1].should_run_with_name_select.assert_called_with(self.config)
Ejemplo n.º 22
0
    def test_run_runs_named_scenarios(self):
        scenarios = [Mock(Scenario), Mock(Scenario)]
        scenarios[0].name = "first scenario"
        scenarios[1].name = "second scenario"
        scenarios[0].tags = []
        scenarios[1].tags = []
        # -- FAKE-CHECK:
        scenarios[0].should_run_with_name_select.return_value = True
        scenarios[1].should_run_with_name_select.return_value = False

        for scenario in scenarios:
            scenario.run.return_value = False

        self.config.tag_expression.check.return_value = True  # pylint: disable=no-member
        self.config.name = ["first", "third"]
        self.config.name_re = Configuration.build_name_re(self.config.name)

        feature = Feature("foo.feature",
                          1,
                          u"Feature",
                          u"foo",
                          scenarios=scenarios)

        feature.run(self.runner)

        scenarios[0].run.assert_called_with(self.runner)
        assert not scenarios[1].run.called
        scenarios[0].should_run_with_name_select.assert_called_with(
            self.config)
        scenarios[1].should_run_with_name_select.assert_called_with(
            self.config)
Ejemplo n.º 23
0
 def test_settings_without_stage(self):
     # -- OR: Setup with default, unnamed stage.
     self.ensure_stage_environment_is_not_set()
     assert "BEHAVE_STAGE" not in os.environ
     config = Configuration()
     eq_("steps", config.steps_dir)
     eq_("environment.py", config.environment_file)
Ejemplo n.º 24
0
    def test_run_runs_named_scenarios_with_regexp(self):
        scenarios = [Mock(), Mock()]
        scenarios[0].name = 'first scenario'
        scenarios[1].name = 'second scenario'
        scenarios[0].tags = []
        scenarios[1].tags = []
        # -- FAKE-CHECK:
        scenarios[0].should_run_with_name_select.return_value = False
        scenarios[1].should_run_with_name_select.return_value = True

        for scenario in scenarios:
            scenario.run.return_value = False

        self.config.tags.check.return_value = True
        self.config.name = ['third .*', 'second .*']
        self.config.name_re = Configuration.build_name_re(self.config.name)

        feature = model.Feature('foo.feature',
                                1,
                                u'Feature',
                                u'foo',
                                scenarios=scenarios)

        feature.run(self.runner)

        assert not scenarios[0].run.called
        scenarios[1].run.assert_called_with(self.runner)
        scenarios[0].should_run_with_name_select.assert_called_with(
            self.config)
        scenarios[1].should_run_with_name_select.assert_called_with(
            self.config)
Ejemplo n.º 25
0
    def test_should_run_with_name_select(self):
        scenario_name = u"first scenario"
        scenario = Scenario("foo.feature", 17, u"Scenario", scenario_name)
        self.config.name = ['first .*', 'second .*']
        self.config.name_re = Configuration.build_name_re(self.config.name)

        assert scenario.should_run_with_name_select(self.config)
Ejemplo n.º 26
0
def main(args=None):
    """Main function to run behave (as program).

    :param args:    Command-line args (or string) to use.
    :return: 0, if successful. Non-zero, in case of errors/failures.
    """
    config = Configuration(args)

    rclass = Runner
    if getattr(config, 'proc_count'):
        try:
            from behave.runner_mp import MultiProcRunner_Feature, MultiProcRunner_Scenario
            pelem = getattr(config, 'parallel_element', False)
            if not pelem:
                print ("INFO: Without giving --parallel-element, defaulting to 'scenario'...")
                pelem = 'scenario'

            if pelem == 'scenario':
                rclass = MultiProcRunner_Scenario
            elif pelem == 'feature':
                rclass = MultiProcRunner_Feature
            else:
                print ("ERROR: When using --processes, --parallel-element"
                    " option must be set to 'feature' or 'scenario'. You gave "
                    "'%s', which isn't valid." % pelem)
                return 1

        except ImportError as e:
            print ("DEBUG: import error: %s" % e)
            print ("ERROR: Cannot import multiprocessing module."
            " If you're on python2.5, go get the backport")
            return 1
    return run_behave(config, runner_class=rclass)
Ejemplo n.º 27
0
    def test_run_runs_named_scenarios(self):
        scenarios = [Mock(Scenario), Mock(Scenario)]
        scenarios[0].name = 'first scenario'
        scenarios[1].name = 'second scenario'
        scenarios[0].tags = []
        scenarios[1].tags = []
        # -- FAKE-CHECK:
        scenarios[0].should_run_with_name_select.return_value = True
        scenarios[1].should_run_with_name_select.return_value = False

        for scenario in scenarios:
            scenario.run.return_value = False

        self.config.tags.check.return_value = True  # pylint: disable=no-member
        self.config.name = ['first', 'third']
        self.config.name_re = Configuration.build_name_re(self.config.name)

        feature = Feature('foo.feature',
                          1,
                          u'Feature',
                          u'foo',
                          scenarios=scenarios)

        feature.run(self.runner)

        scenarios[0].run.assert_called_with(self.runner)
        assert not scenarios[1].run.called
        scenarios[0].should_run_with_name_select.assert_called_with(
            self.config)
        scenarios[1].should_run_with_name_select.assert_called_with(
            self.config)
Ejemplo n.º 28
0
def run_tests(room, vendor, tags, override):
    app = create_app()
    with app.app_context():
        test_run = TestRun()
        db.session.add(test_run)
        db.session.commit()

        def on_snapshot(snapshot, plan):
            test_run.save_snapshot(snapshot, plan)
            socketio.emit('snapshot', test_run.event, room=room)

            db.session.commit()

        try:
            output = io.StringIO()
            output_stream = StreamOpener(stream=output)
            config = Configuration(
                outputs=[output_stream],
                format=['json.chunked'],
                on_snapshot=on_snapshot,
                vendor=vendor,
                override=override,
                command_args=[],
                tags=[','.join(tags)],
            )
            runner = Runner(config)

            runner.run()
        except Exception as err:  # pylint: disable=broad-except
            socketio.emit('global_error', str(err), room=room)
        finally:
            socketio.emit('tests_complete', room=room)
Ejemplo n.º 29
0
    def test_run_runs_named_scenarios_with_regexp(self):
        scenarios = [Mock(), Mock()]
        scenarios[0].name = 'first scenario'
        scenarios[1].name = 'second scenario'
        scenarios[0].tags = []
        scenarios[1].tags = []
        # -- FAKE-CHECK:
        scenarios[0].should_run_with_name_select.return_value = False
        scenarios[1].should_run_with_name_select.return_value = True

        for scenario in scenarios:
            scenario.run.return_value = False

        self.config.tags.check.return_value = True
        self.config.name = ['third .*', 'second .*']
        self.config.name_re = Configuration.build_name_re(self.config.name)

        feature = model.Feature('foo.feature', 1, u'Feature', u'foo',
                                scenarios=scenarios)

        feature.run(self.runner)

        assert not scenarios[0].run.called
        scenarios[1].run.assert_called_with(self.runner)
        scenarios[0].should_run_with_name_select.assert_called_with(self.config)
        scenarios[1].should_run_with_name_select.assert_called_with(self.config)
Ejemplo n.º 30
0
def main(args=None):
    """Main function to run behave (as program).

    :param args:    Command-line args (or string) to use.
    :return: 0, if successful. Non-zero, in case of errors/failures.
    """
    config = Configuration(args)
    return run_behave(config)
Ejemplo n.º 31
0
    def __init__(self, result_dir):
        self.listener = AllureListener(Configuration())

        if not hasattr(_storage, 'file_logger'):
            _storage.file_logger = AllureFileLogger(result_dir)
            allure_commons.plugin_manager.register(_storage.file_logger)

        allure_commons.plugin_manager.register(self.listener)
Ejemplo n.º 32
0
 def test_cmdline_defines__with_quoted_value(self):
     cmdlines = [
         '-D person="Alice and Bob"',
         "-D person='Alice and Bob'",
     ]
     for cmdline in cmdlines:
         config = Configuration(cmdline, load_config=False)
         eq_(config.userdata, dict(person="Alice and Bob"))
Ejemplo n.º 33
0
 def test_cmdline_defines_override_configfile(self):
     userdata_init = {"foo": "XXX", "bar": "ZZZ", "baz": 42}
     config = Configuration("-D foo=foo_value --define bar=123",
                            load_config=False,
                            userdata=userdata_init)
     eq_("foo_value", config.userdata["foo"])
     eq_("123", config.userdata["bar"])
     eq_(42, config.userdata["baz"])
Ejemplo n.º 34
0
def home(request):
    """Renders the home page."""
    assert isinstance(request, HttpRequest)

    if request.method != 'POST':
        form = FeatureForm()
        return render(request,
                      'app/index.html',
                      context_instance=RequestContext(
                          request, {
                              'title': 'Home Page',
                              'year': datetime.now().year,
                              'form': form,
                          }))
    else:
        form = FeatureForm(request.POST)
        if form.is_valid():
            feature = Feature(description=form.cleaned_data['description'],
                              finality=form.cleaned_data['finality'],
                              who=form.cleaned_data['who'],
                              purpose=form.cleaned_data['purpose'])
            feature.save()

            scenario = Scenario(given=form.cleaned_data['given'],
                                when=form.cleaned_data['when'],
                                then=form.cleaned_data['then'],
                                title=form.cleaned_data['title'],
                                feature=feature)
            scenario.save()
            gen_feature_file(feature.id)
            conf = Configuration(
                os.path.join(settings.PROJECT_ROOT, 'media', 'features',
                             '{}.feature'.format(feature.id)))
            conf.format = [conf.default_format]
            runner = Runner(conf)
            runner.run()
            filename = make_test_funcs(runner.undefined_steps, feature.id)
            add_to_repo(filename, feature.description)
        return render(request,
                      'app/index.html',
                      context_instance=RequestContext(
                          request, {
                              'title': 'Home Page',
                              'year': datetime.now().year,
                              'form': form,
                          }))
Ejemplo n.º 35
0
 def test_cmdline_defines__with_quoted_name_value_pair(self):
     cmdlines = [
         '-D "person=Alice and Bob"',
         "-D 'person=Alice and Bob'",
     ]
     for cmdline in cmdlines:
         config = Configuration(cmdline, load_config=False)
         assert config.userdata == dict(person="Alice and Bob")
Ejemplo n.º 36
0
 def test_cmdline_defines(self):
     config = Configuration([
         "-D", "foo=foo_value",
         "--define=bar=bar_value",
         "--define", "baz=BAZ_VALUE",
     ])
     eq_("foo_value", config.userdata["foo"])
     eq_("bar_value", config.userdata["bar"])
     eq_("BAZ_VALUE", config.userdata["baz"])
Ejemplo n.º 37
0
def main(args=None, project=None):
    """Main function to run behave (as program).

    :param args:    Command-line args (or string) to use.
    :return: 0, if successful. Non-zero, in case of errors/failures.
    """
    config = Configuration(args)
    # project = config.domain
    print("启动功能域:" + ",".join(project))  #by guohongjie 增加domain参数,传入功能域
    return run_behave(config, project=project)
Ejemplo n.º 38
0
    def get_rules_from(features_list):
        """Execute gherkins and return rules."""
        base_context = {'final_rules': []}
        runner = Runner(Configuration())

        with empty_argv():
            for data in features_list:
                with local_context(base_context, runner) as context:
                    parse_feature(data.strip(), None, None).run(runner)
                    yield from context.final_rules
Ejemplo n.º 39
0
def get_available_steps(step_dirs=None):
    """
    gets all the steps that the core is aware of. useful for doing real-time
    'syntax' checking for uis

    you dont need to call goh_behave prior

    :param step_dirs: optional. list of dirs to additionally load steps from

    :return: a list of found steps' text
    :rtype: list[string]
    """
    # setup config for behave's runner
    # use dry run so only steps are evaluated, nothing is run
    config = Configuration([u'--dry-run'])
    # i dont want std output about the running test, because there is no test
    # whoa...
    config.format = [u'null']

    # append steps from the bdd core
    if not step_dirs:
        step_dirs = []

    # no features, we just want the implemented steps
    feature_dirs = []
    runner = HackedRunner(config, feature_dirs, step_dirs)

    # this is the key part. this will cause behave to run through all the step
    # dirs and execute them, which will cause steps to be registered.
    runner.load_step_definitions()

    # reach into behave's step registry and serialize the known steps into a list
    found_steps = []
    for steps in step_registry.registry.steps.values():
        for step in steps:
            if step.step_type is None or u'step' == step.step_type:
                for real_key in RealSteps:
                    found_steps.append(u'{} {}'.format(real_key, step.string))
            else:
                found_steps.append(u'{} {}'.format(step.step_type, step.string))
            found_steps.append(u'{} {}'.format(u'And', step.string))

    return found_steps
Ejemplo n.º 40
0
def run_model_with_cmdline(model, cmdline):
    reset_model(model.features)
    command_args = cmdline
    config = Configuration(command_args,
                           load_config=False,
                           default_format="null",
                           stdout_capture=False,
                           stderr_capture=False,
                           log_capture=False)
    model_runner = ModelRunner(config, model.features)
    return model_runner.run()
Ejemplo n.º 41
0
def main():
    """Entry point."""
    args = parser.parse_args()
    config = Configuration(args.command_args)
    runner = Runner(config)

    with runner.path_manager:
        runner.setup_paths()
        runner.load_step_definitions()  # This populates the step_registry.

    exit_code = run(steps=step_registry.registry.steps, output_to=args.path)
    return sys.exit(exit_code)
Ejemplo n.º 42
0
def home(request):
    """Renders the home page."""
    assert isinstance(request, HttpRequest)

    if request.method != 'POST':
        form = FeatureForm()
        return render(
            request,
            'app/index.html',
            context_instance = RequestContext(request,
            {
                'title':'Home Page',
                'year':datetime.now().year,
                'form': form,
            })
        )
    else:
        form = FeatureForm(request.POST)
        if form.is_valid():
            feature = Feature(description=form.cleaned_data['description'], finality=form.cleaned_data['finality'], who=form.cleaned_data['who'], purpose=form.cleaned_data['purpose'])
            feature.save()

            scenario = Scenario(given=form.cleaned_data['given'], when=form.cleaned_data['when'], then=form.cleaned_data['then'], title=form.cleaned_data['title'], feature=feature)
            scenario.save()
            gen_feature_file(feature.id)
            conf = Configuration(os.path.join(settings.PROJECT_ROOT, 'media', 'features', '{}.feature'.format(feature.id)))
            conf.format = [ conf.default_format ]
            runner = Runner(conf)
            runner.run()
            filename = make_test_funcs(runner.undefined_steps, feature.id)
            add_to_repo(filename, feature.description)
        return render(         request,
            'app/index.html',
            context_instance = RequestContext(request,
            {
                'title':'Home Page',
                'year':datetime.now().year,
                'form': form,
            })
        )
Ejemplo n.º 43
0
    def test_setup_userdata(self):
        config = Configuration("", load_config=False)
        config.userdata = dict(person1="Alice", person2="Bob")
        config.userdata_defines = [("person2", "Charly")]
        config.setup_userdata()

        expected_data = dict(person1="Alice", person2="Charly")
        eq_(config.userdata, expected_data)
Ejemplo n.º 44
0
    def run(context, **kwargs):
        cmd_args = '-v -f allure_behave.formatter:AllureFormatter -f pretty'
        cmd = '{options} {cmd}'.format(cmd=cmd_args,
                                       options=kwargs.get('args', ''))
        config = Configuration(command_args=cmd)

        result_tmp_dir = mkdtemp(dir=os.environ.get('TEST_TMP', None))
        stream_opener = StreamOpener(filename=result_tmp_dir)

        model_runner = ModelRunner(config, [context.feature_definition])
        model_runner.formatters = make_formatters(config, [stream_opener])
        model_runner.run()

        context.allure_report = AllureReport(result_tmp_dir)
Ejemplo n.º 45
0
    def setupBehave(self):
        # Create a sys.argv suitable for Behave to parse
        old_argv = sys.argv
        (sys.argv, our_opts) = parse_argv(old_argv, self.option_info)
        self.behave_config = Configuration()
        sys.argv = old_argv
        self.behave_config.browser = our_opts["browser"]

        self.behave_config.server_url = self.live_server_url  # property of LiveServerTestCase
        self.behave_config.paths = self.get_features_dir()
        self.behave_config.format = ['pretty']
        # disable these in case you want to add set_trace in the tests you're developing
        self.behave_config.stdout_capture = False
        self.behave_config.stderr_capture = False
Ejemplo n.º 46
0
def feature_new(request):
    print(request.POST)
    form = json.loads(request.body.decode('utf-8'))
    feature = Feature(description=form['description'],
                      finality=form['finality'],
                      who=form['who'],
                      purpose=form['purpose'],
                      project_id=form['project'])
    feature.save()

    scenario = Scenario(given=form['given'],
                        when=form['when'],
                        then=form['then'],
                        title=form['title'],
                        feature=feature)
    scenario.save()
    gen_feature_file(feature.id)
    conf = Configuration('media/features/{}.feature'.format(feature.id))
    conf.format = [conf.default_format]
    runner = Runner(conf)
    runner.run()
    filename = make_test_funcs(runner.undefined_steps, feature.id)
    add_to_repo(filename, feature.description)
    return HttpResponse()
Ejemplo n.º 47
0
def run_behave_with_allure(context, **kwargs):
    with test_context():
        cmd_args = '-f allure_behave.formatter:AllureFormatter'
        cmd = '{options} {cmd}'.format(cmd=cmd_args,
                                       options=kwargs.get('args', ''))
        config = Configuration(command_args=cmd)
        result_tmp_dir = mkdtemp(dir=os.environ.get('TEST_TMP', None))
        stream_opener = StreamOpener(filename=result_tmp_dir)
        model_runner = ModelRunner(config, context.feature_definition)
        model_runner.formatters = make_formatters(config, [stream_opener])
        model_runner.hooks = getattr(context, 'globals', dict())
        model_runner.run()
        context.allure_report = AllureReport(result_tmp_dir)

    os.environ.pop("ALLURE_TESTPLAN_PATH", None)
Ejemplo n.º 48
0
    def test_run_runs_named_scenarios(self):
        scenarios = [Mock(), Mock()]
        scenarios[0].name = 'first scenario'
        scenarios[1].name = 'second scenario'
        scenarios[0].tags = []
        scenarios[1].tags = []

        for scenario in scenarios:
            scenario.run.return_value = False

        self.config.tags.check.return_value = True
        self.config.name = ['first', 'third']
        self.config.name_re = Configuration.build_name_re(self.config.name)

        feature = model.Feature('foo.feature', 1, u'Feature', u'foo',
                                scenarios=scenarios)

        feature.run(self.runner)

        scenarios[0].run.assert_called_with(self.runner)
        assert not scenarios[1].run.called
Ejemplo n.º 49
0
def main(args=None):
    config = Configuration(args)
    if config.version:
        print("behave " + __version__)
        return 0

    if config.tags_help:
        print(TAG_HELP)
        return 0

    if config.lang_list:
        from behave.i18n import languages
        iso_codes = languages.keys()
        iso_codes.sort()
        print("Languages available:")
        for iso_code in iso_codes:
            native = languages[iso_code]['native'][0]
            name = languages[iso_code]['name'][0]
            print(u'%s: %s / %s' % (iso_code, native, name))
        return 0

    if config.lang_help:
        from behave.i18n import languages
        if config.lang_help not in languages:
            print('%s is not a recognised language: try --lang-list' % \
                    config.lang_help)
            return 1
        trans = languages[config.lang_help]
        print(u"Translations for %s / %s" % (trans['name'][0],
                                             trans['native'][0]))
        for kw in trans:
            if kw in 'name native'.split():
                continue
            print(u'%16s: %s' % (kw.title().replace('_', ' '),
                  u', '.join(w for w in trans[kw] if w != '*')))
        return 0

    if not config.format:
        config.format = [ config.default_format ]
    elif config.format and "format" in config.defaults:
        # -- CASE: Formatter are specified in behave configuration file.
        #    Check if formatter are provided on command-line, too.
        if len(config.format) == len(config.defaults["format"]):
            # -- NO FORMATTER on command-line: Add default formatter.
            config.format.append(config.default_format)
    if 'help' in config.format:
        print_formatters("Available formatters:")
        return 0

    if len(config.outputs) > len(config.format):
        print('CONFIG-ERROR: More outfiles (%d) than formatters (%d).' % \
              (len(config.outputs), len(config.format)))
        return 1

    failed = True
    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError as e:
        print(u"ParseError: %s" % e)
    except ConfigError as e:
        print(u"ConfigError: %s" % e)
    except FileNotFoundError as e:
        print(u"FileNotFoundError: %s" % e)
    except InvalidFileLocationError as e:
        print(u"InvalidFileLocationError: %s" % e)
    except InvalidFilenameError as e:
        print(u"InvalidFilenameError: %s" % e)
    except Exception as e:
        # -- DIAGNOSTICS:
        text = _text(e)
        print(u"Exception %s: %s" % (e.__class__.__name__, text))
        raise

    if config.show_snippets and runner.undefined_steps:
        print_undefined_step_snippets(runner.undefined_steps,
                                      colored=config.color)

    return_code = 0
    if failed:
        return_code = 1
    return return_code
Ejemplo n.º 50
0
def main():
    # pylint: disable=R0912,R0915
    #   R0912   Too many branches (17/12)
    #   R0915   Too many statements (57/50)
    config = Configuration()

    if config.version:
        print "behave " + __version__
        sys.exit(0)

    if config.tags_help:
        print TAG_HELP
        sys.exit(0)

    if config.lang_list:
        iso_codes = languages.keys()
        iso_codes.sort()
        print "Languages available:"
        for iso_code in iso_codes:
            native = languages[iso_code]['native'][0]
            name = languages[iso_code]['name'][0]
            print u'%s: %s / %s' % (iso_code, native, name)
        sys.exit(0)

    if config.lang_help:
        if config.lang_help not in languages:
            sys.exit('%s is not a recognised language: try --lang-list' %
                     config.lang_help)
        trans = languages[config.lang_help]
        print u"Translations for %s / %s" % (trans['name'][0],
              trans['native'][0])
        for kw in trans:
            if kw in 'name native'.split():
                continue
            print u'%16s: %s' % (kw.title().replace('_', ' '),
                  u', '.join(w for w in trans[kw] if w != '*'))
        sys.exit(0)

    if not config.format:
        default_format = config.defaults["default_format"]
        config.format = [ default_format ]
    elif config.format and "format" in config.defaults:
        # -- CASE: Formatter are specified in behave configuration file.
        #    Check if formatter are provided on command-line, too.
        if len(config.format) == len(config.defaults["format"]):
            # -- NO FORMATTER on command-line: Add default formatter.
            default_format = config.defaults["default_format"]
            config.format.append(default_format)
    elif 'help' in config.format:
        print "Available formatters:"
        formatters.list_formatters(sys.stdout)
        sys.exit(0)

    if len(config.outputs) > len(config.format):
        print 'CONFIG-ERROR: More outfiles (%d) than formatters (%d).' % \
              (len(config.outputs), len(config.format))
        sys.exit(1)

    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError, e:
        sys.exit(str(e))
Ejemplo n.º 51
0
def start():
    u'''
    Точка входа в приложение, которая активируется при запуске его
    из командной строки.
    '''
    setup_logging()
    config = {}

    try:
        arg_delim_index = sys.argv.index('--')
        behave_args = sys.argv[arg_delim_index+1:]
        sys.argv = sys.argv[:arg_delim_index]
    except ValueError:
        behave_args = []

    opt_parser = OptionParser()
    opt_parser.add_option('', '--var-file', dest='var_file',
                          help='Load template variables from .py file.',
                          metavar='<FILE>')
    opt_parser.add_option('', '--cfg-file', dest='cfg_file',
                          help='Load configuration from YAML file.',
                          metavar='<FILE>')
    (options, _) = opt_parser.parse_args()

    if options.cfg_file:
        cfg_fn = options.cfg_file
        try:
            with open(cfg_fn, 'r') as fp:
                config = yaml.load(fp.read()) or {}
        except Exception as ex:
            logger.error('Can\'t load {0}: {1}'.format(cfg_fn, unicode(ex)))
        else:
            logger.info('Loaded configuration from {0}.'.format(cfg_fn))

    if options.var_file:
        template_vars = load_vars_from_pyfile(options.var_file)
        # Есть смысл включать режим шаблонизации только при наличии переменных,
        # ради которых все и затевалось
        if template_vars:
            template_vars.pop('__builtins__', {})
            sys.meta_path = [TemplateImportHooker(template_vars)]

    # Изменяем sys.argv для обхода поведения behave<=1.2.3, который
    # всегда пытается получить опции из командной строки.
    # TODO: с выходом стабильной 1.2.4 поменять на передачу command_args
    sys.argv = [sys.argv[0], ] + behave_args
    behave_cfg = Configuration()
    if not behave_cfg.format:
        behave_cfg.format = ['pretty', ]

    from behave.runner import Runner
    runner = Runner(behave_cfg)

    if 'enabled_plugins' in config:
        runner.hooks = StackedHookDictWrapper()
        # Ищем все доступные плагины...
        plugins = find_plugins()
        plugin_configs = config.get('plugins', {})

        for p_id in config['enabled_plugins']:
            # TODO: убрать, если будет возможность подключать свои плагины
            assert p_id in plugins, 'Unknown plugin: {}!'.format(p_id)
            plugin = plugins[p_id]

            # Подключаем ещё один набор функций окружения. С точки зрения
            # behave'а, это будет одна функция, которая в свою очередь
            # по порядку будет вызывать обработчики _каждого_ плагина.
            logger.info('Loading plugin "{}"...'.format(p_id))
            custom_hooks = plugin.prepare_environment(plugin_configs.get(p_id, {}))
            logger.debug('Plugin "{}" sets hooks: {}'.format(p_id, ', '.join(custom_hooks.keys())))
            for hook, handler in custom_hooks.items():
                runner.hooks[hook] = handler

    runner.run()
Ejemplo n.º 52
0
def goh_behave(feature_dirs=None, step_dirs=None, test_artifact_dir=None,
               listeners=None, dry_run=False, webdriver_url=None,
               webdriver_processor=None, tags=None, show_skipped=True, config_file=None,
               test_config=None):
    """
    runs behave

    :param feature_dirs: list of paths to feature files to run, or a single path. Defaults to singleton list of "features"
    :type feature_dirs: list
    :param step_dirs: list of paths to load steps from. the steps paths will be
        searched recursssssively
    :type step_dirs: list
    :param test_artifact_dir: path to where to store test artifacts. if None, no test
        artifacts will be written. note that setting this to None will prevent
        screenshots from automatically being taken after each step
    :param listeners: list of Listener objects to call for diff pts of the test
    :type listeners: list
    :param dry_run: if True, behave will just check if all steps are defined
    :param webdriver_url: optional. webdriver node/grid url to hit to execute
        tests
    :param webdriver_processor: provides the ability to process things like
        capabilities before they're actually used
    :param config_file: a configuration file, formatted as JSON data, that contains
        all of the other parameters listed here (except for listeners or webdriver_processor)
    :param test_config: a configuration that is a dictionary of the parameters. If a config_file is
        also passed, the values in the config_file will be written to the test_config as well. Any existing
        keys will have the values overwritten by the value in the config_file

    :return: True if the tests passed, else False
    :rtype: bool

    :raise ParserError: when a feature file couldnt be parsed
    :raise FileNotFoundError: when a feature file couldnt be found
    :raise InvalidFileLocationError: when a feature path was bad
    :raise InvalidFilenameError: when a feature file name was bad
    :raise UndefinedStepsError: if some steps were undefined
    """

    if config_file:
        try:
            with open(config_file) as config:
                try:
                    json_config = json.load(config)
                except ValueError as e:
                    raise ValueError(u'Could not parse {} config file as JSON. See sample.config for an example config file. {}'.format(config_file, e))
                if test_config:
                    test_config.update(json_config)
                else:
                    test_config = json_config
        except EnvironmentError as e:
            raise IOError(u'Could not open the {} config file. See sample.config for an example config file. {}'.format(config_file, e))

    if test_config:
        log.debug(u'Using test_config:')
        for key in test_config:
            log.debug(u'    {}: {}'.format(key, test_config[key]))
        if u'feature_dirs' in test_config and not feature_dirs:
            feature_dirs = test_config[u'feature_dirs']
        if u'step_dirs' in test_config and not step_dirs:
            step_dirs = test_config[u'step_dirs']
        if u'test_artifact_dir' in test_config and not test_artifact_dir:
            test_artifact_dir = test_config[u'test_artifact_dir']
        if u'dry_run' in test_config:
            dry_run = test_config[u'dry_run']
        if u'webdriver_url' in test_config and not webdriver_url:
            webdriver_url = test_config[u'webdriver_url']
        if u'tags' in test_config and not tags:
            tags = test_config[u'tags']
        if u'show_skipped' in test_config:
            show_skipped = test_config[u'show_skipped']

    if not feature_dirs:
        feature_dirs = ["features"]
    if isinstance(feature_dirs, basestring):
        feature_dirs = [feature_dirs]

    args = [u'']

    # first run in dry run mode to catch any undefined steps
    # if the user specifies dry run mode, then don't do this, since it'll be
    # done anyway
    # but we're running it always anyway. so what's the difference? if user
    # specifies dry run mode, they might also give listeners, etc.
    # auto dry run mode is meant to happen silently
    if not dry_run:
        config = Configuration([u'--dry-run'])
        config.format = []
        _run_behave(feature_dirs, config, step_dirs=step_dirs)

    # output test artifacts
    if test_artifact_dir:
        args.append(u'--junit')
        args.append(u'--junit-directory')
        args.append(test_artifact_dir)

    if dry_run:
        args.append(u'--dry-run')

    # setup config for behave's runner
    config = Configuration(args)
    config.format = [
        # outputs pretty output in to stdout while tests are running
        u'pretty'
    ]

    # Set the tags if there are any
    if tags:
        log.debug(u'Running Scenarios with Tag(s): {}'.format(tags))
        if isinstance(tags, list):
            log.debug(u'Running Scenarios with Tag List')
            config.tags = TagExpression(tags)
        else:
            config.tags = TagExpression(tags.split())

    config.show_skipped = show_skipped
    if not show_skipped:
        log.debug(u'Not showing skipped scenarios.')

    # only add html reporter if the test artifacts are enabled
    if test_artifact_dir:
        config.reporters.append(HtmlReporter(config, test_artifact_dir))

    try:
        return _run_behave(
            feature_dirs,
            config,
            step_dirs=step_dirs,
            webdriver_url=webdriver_url,
            webdriver_processor=webdriver_processor,
            listeners=listeners,
            test_config = test_config
        )
    except ConfigError as e:
        # since we control the configuration file, we shouldn't expose this to
        # the user, since there's nothing they can do
        log.error(e)
        return False
Ejemplo n.º 53
0
def main():
    config = Configuration()

    if config.version:
        print("behave " + __version__)
        sys.exit(0)

    if config.tags_help:
        print(TAG_HELP)
        sys.exit(0)

    if config.lang_list:
        iso_codes = list(languages.keys())
        iso_codes.sort()
        print("Languages available:")
        for iso_code in iso_codes:
            native = languages[iso_code]['native'][0]
            name = languages[iso_code]['name'][0]
            print('%s: %s / %s' % (iso_code, native, name))
        sys.exit(0)

    if config.lang_help:
        if config.lang_help not in languages:
            sys.exit('%s is not a recognised language: try --lang-list' %
                     config.lang_help)
        trans = languages[config.lang_help]
        print("Translations for %s / %s" % (trans['name'][0],
              trans['native'][0]))
        for kw in trans:
            if kw in 'name native'.split():
                continue
            print('%16s: %s' % (kw.title().replace('_', ' '),
                  ', '.join(w for w in trans[kw] if w != '*')))
        sys.exit(0)

    if not config.format:
        default_format = config.defaults["default_format"]
        config.format = [ default_format ]
    elif config.format and "format" in config.defaults:
        # -- CASE: Formatter are specified in behave configuration file.
        #    Check if formatter are provided on command-line, too.
        if len(config.format) == len(config.defaults["format"]):
            # -- NO FORMATTER on command-line: Add default formatter.
            default_format = config.defaults["default_format"]
            config.format.append(default_format)
    if 'help' in config.format:
        print("Available formatters:")
        formatters.list_formatters(sys.stdout)
        sys.exit(0)

    if len(config.outputs) > len(config.format):
        print('CONFIG-ERROR: More outfiles (%d) than formatters (%d).' % \
              (len(config.outputs), len(config.format)))
        sys.exit(1)

    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError as e:
        sys.exit("ParseError: %s" % e)
    except ConfigError as e:
        sys.exit("ConfigError: %s" % e)
    except FileNotFoundError as e:
        sys.exit("FileNotFoundError: %s" % e)
    except InvalidFileLocationError as e:
        sys.exit("InvalidFileLocationError: %s" % e)
    except InvalidFilenameError as e:
        sys.exit("InvalidFilenameError: %s" % e)


    if config.show_snippets and runner.undefined:
        msg = "\nYou can implement step definitions for undefined steps with "
        msg += "these snippets:\n\n"
        printed = set()
        for step in runner.undefined:
            if step in printed:
                continue
            printed.add(step)
            msg += make_undefined_step_snippet(step)

        # -- OOPS: Unclear if stream supports ANSI coloring.
        sys.stderr.write(escapes['undefined'] + msg + escapes['reset'])
        sys.stderr.flush()

    if failed:
        sys.exit(1)
    # -- OTHERWISE: Successful run.
    sys.exit(0)
Ejemplo n.º 54
0
def main(args=None):
    config = Configuration(args)
    if config.version:
        print "behave " + __version__
        return 0

    if config.tags_help:
        print TAG_HELP
        return 0

    if config.lang_list:
        from behave.i18n import languages
        iso_codes = languages.keys()
        iso_codes.sort()
        print "Languages available:"
        for iso_code in iso_codes:
            native = languages[iso_code]['native'][0]
            name = languages[iso_code]['name'][0]
            print u'%s: %s / %s' % (iso_code, native, name)
        return 0

    if config.lang_help:
        from behave.i18n import languages
        if config.lang_help not in languages:
            print '%s is not a recognised language: try --lang-list' % \
                    config.lang_help
            return 1
        trans = languages[config.lang_help]
        print u"Translations for %s / %s" % (trans['name'][0],
              trans['native'][0])
        for kw in trans:
            if kw in 'name native'.split():
                continue
            print u'%16s: %s' % (kw.title().replace('_', ' '),
                  u', '.join(w for w in trans[kw] if w != '*'))
        return 0

    if not config.format:
        default_format = config.defaults["default_format"]
        config.format = [ default_format ]
    elif config.format and "format" in config.defaults:
        # -- CASE: Formatter are specified in behave configuration file.
        #    Check if formatter are provided on command-line, too.
        if len(config.format) == len(config.defaults["format"]):
            # -- NO FORMATTER on command-line: Add default formatter.
            default_format = config.defaults["default_format"]
            config.format.append(default_format)
    if 'help' in config.format:
        from behave.formatter import formatters
        print "Available formatters:"
        formatters.list_formatters(sys.stdout)
        return 0

    if len(config.outputs) > len(config.format):
        print 'CONFIG-ERROR: More outfiles (%d) than formatters (%d).' % \
              (len(config.outputs), len(config.format))
        return 1

    failed = True
    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError, e:
        print "ParseError: %s" % e
Ejemplo n.º 55
0
def main(args=None):
    config = Configuration(args)
    if config.version:
        print("behave " + __version__)
        return 0

    if config.tags_help:
        print(TAG_HELP)
        return 0

    if config.lang_list:
        from behave.i18n import languages

        stdout = sys.stdout
        if six.PY2:
            # -- PYTHON2: Overcome implicit encode problems (encoding=ASCII).
            stdout = codecs.getwriter("UTF-8")(sys.stdout)
        iso_codes = languages.keys()
        print("Languages available:")
        for iso_code in sorted(iso_codes):
            native = languages[iso_code]["native"][0]
            name = languages[iso_code]["name"][0]
            print(u"%s: %s / %s" % (iso_code, native, name), file=stdout)
        return 0

    if config.lang_help:
        from behave.i18n import languages

        if config.lang_help not in languages:
            print("%s is not a recognised language: try --lang-list" % config.lang_help)
            return 1
        trans = languages[config.lang_help]
        print(u"Translations for %s / %s" % (trans["name"][0], trans["native"][0]))
        for kw in trans:
            if kw in "name native".split():
                continue
            print(u"%16s: %s" % (kw.title().replace("_", " "), u", ".join(w for w in trans[kw] if w != "*")))
        return 0

    if not config.format:
        config.format = [config.default_format]
    elif config.format and "format" in config.defaults:
        # -- CASE: Formatter are specified in behave configuration file.
        #    Check if formatter are provided on command-line, too.
        if len(config.format) == len(config.defaults["format"]):
            # -- NO FORMATTER on command-line: Add default formatter.
            config.format.append(config.default_format)
    if "help" in config.format:
        print_formatters("Available formatters:")
        return 0

    if len(config.outputs) > len(config.format):
        print("CONFIG-ERROR: More outfiles (%d) than formatters (%d)." % (len(config.outputs), len(config.format)))
        return 1

    failed = True
    runner = Runner(config)
    try:
        failed = runner.run()
    except ParserError as e:
        print(u"ParseError: %s" % e)
    except ConfigError as e:
        print(u"ConfigError: %s" % e)
    except FileNotFoundError as e:
        print(u"FileNotFoundError: %s" % e)
    except InvalidFileLocationError as e:
        print(u"InvalidFileLocationError: %s" % e)
    except InvalidFilenameError as e:
        print(u"InvalidFilenameError: %s" % e)
    except Exception as e:
        # -- DIAGNOSTICS:
        text = _text(e)
        print(u"Exception %s: %s" % (e.__class__.__name__, text))
        raise

    if config.show_snippets and runner.undefined_steps:
        print_undefined_step_snippets(runner.undefined_steps, colored=config.color)

    return_code = 0
    if failed:
        return_code = 1
    return return_code
Ejemplo n.º 56
0
if __name__ == "__main__":
    from behave.formatter import _registry
    from behave.configuration import Configuration
    from behave.runner import Runner
    from teamcity.jb_behave_formatter import TeamcityFormatter

    _registry.register_as("TeamcityFormatter", TeamcityFormatter)
    configuration = Configuration()
    configuration.format = ["TeamcityFormatter"]
    configuration.stdout_capture = False
    configuration.stderr_capture = False
    Runner(configuration).run()