def test_parse_feature_after_parser_error(): parser = Parser() with assert_raises(ParserError): parser.parse(TokenScanner('# a comment\n' + 'Feature: Foo\n' + ' Scenario: Bar\n' + ' Given x\n' + ' ```\n' + ' unclosed docstring\n')) feature = parser.parse(TokenScanner('Feature: Foo\n' + ' Scenario: Bar\n' + ' Given x\n' ' """\n' ' closed docstring\n' ' """\n')) expected = [{ 'name': u'Bar', 'keyword': u'Scenario', 'tags': [], 'steps': [{ 'text': u'x', 'type': 'Step', 'location': {'column': 5, 'line': 3}, 'keyword': u'Given ', 'argument': { 'content': u'closed docstring', 'type': 'DocString', 'location': {'column': 7, 'line': 4}}}], 'location': {'column': 3, 'line': 2}, 'type': 'Scenario'}] assert_equals(expected, feature['scenarioDefinitions'])
def test_parse_multiple_features(): parser = Parser() f1 = parser.parse(TokenScanner("Feature: 1")) f2 = parser.parse(TokenScanner("Feature: 2")) assert_equals("1", f1['name']) assert_equals("2", f2['name'])
def process_gherkin(gherkin_filename, basedir, output): print(f"Processing gherkin: {gherkin_filename}") if verbose: print(f"Basedir: {basedir}") print(f"Output: {output}") with open(gherkin_filename, 'r') as f: content = f.read() parser = Parser() feature = parser.parse(content) global settings_lines, test_cases_lines, keywords_lines, seen_steps settings_lines = [] test_cases_lines = [] keywords_lines = [] seen_steps = {} process_feature(feature) feature_base = os.path.dirname(gherkin_filename) feature_sub = None if feature_base.startswith(basedir): feature_sub = feature_base[len(basedir) + 1:] else: feature_sub = feature_base generate_robot_script(os.path.join(output, feature_sub), feature['name'])
def parse(cls, string=None, filename=None, language=None): """ Parse either a string or a file. """ parser = Parser() # pylint:disable=redefined-variable-type # https://bitbucket.org/logilab/pylint/issues/710 if language: if language == 'pt-br': language = 'pt' token_matcher = LanguageTokenMatcher(language) else: token_matcher = TokenMatcher() # pylint:enable=redefined-variable-type if string: token_scanner = TokenScanner(string=string) else: token_scanner = TokenScanner(filename=filename) try: return cls( parser.parse(token_scanner, token_matcher=token_matcher), filename=filename, ) except ParserError as ex: raise AloeSyntaxError(filename, str(ex))
def dump_gherkin(gherkin_filename): with open(gherkin_filename, 'r') as f: content = f.read() parser = Parser() feature = parser.parse(content) print(yaml.dump(feature))
def test_parse_feature_after_parser_error(): parser = Parser() with assert_raises(ParserError): parser.parse(TokenScanner('# a comment\n' + 'Feature: Foo\n' + ' Scenario: Bar\n' + ' Given x\n' + ' ```\n' + ' unclosed docstring\n')) feature = parser.parse(TokenScanner('Feature: Foo\n' + ' Scenario: Bar\n' + ' Given x\n' ' """\n' ' closed docstring\n' ' """\n')) expected = [{ 'name': u'Bar', 'keyword': u'Scenario', 'tags': [], 'steps': [{ 'text': u'x', 'type': 'Step', 'location': {'column': 5, 'line': 3}, 'keyword': u'Given ', 'argument': { 'content': u'closed docstring', 'type': 'DocString', 'contentType': u'', 'location': {'column': 7, 'line': 4}}}], 'location': {'column': 3, 'line': 2}, 'type': 'Scenario'}] assert_equals(expected, feature['scenarioDefinitions'])
def gherkin_pieces_grouped_by_featurename(features_dir): """ For a list of BDD feature files, discover the parts that are tagged with FM feature names (features and scenarios) and group them by the FM feature names. """ gherkin_parser = Parser() pieces_grouped_by_tag = {} for feature_file in listdir(features_dir): feature_file = open(path.join(features_dir, feature_file), "r") feature_parsed = gherkin_parser.parse(feature_file.read()) for tag in feature_parsed['tags']: tag_name = tag['name'][1:] # remove @ if tag_name not in pieces_grouped_by_tag: pieces_grouped_by_tag[tag_name] = [] pieces_grouped_by_tag[tag_name].append(feature_parsed['name']) for scenario in feature_parsed['scenarioDefinitions']: for tag in scenario['tags']: tag_name = tag['name'][1:] # remove @ if tag_name not in pieces_grouped_by_tag: pieces_grouped_by_tag[tag_name] = [] pieces_grouped_by_tag[tag_name].append(scenario['name']) return pieces_grouped_by_tag
def dump_gherkin(gherkin_filename): with open(gherkin_filename, 'r') as f: str = f.read() parser = Parser() feature = parser.parse(str) print yaml.dump(feature)
def test_parser(): parser = Parser() feature = parser.parse(TokenScanner("Feature: Foo")) expected = {'comments': [], 'keyword': u'Feature', 'language': 'en', 'location': {'column': 1, 'line': 1}, 'name': u'Foo', 'scenarioDefinitions': [], 'tags': [], 'type': 'Feature'} assert_equals(expected, feature)
def test_change_the_default_language(): parser = Parser() matcher = TokenMatcher('no') feature = parser.parse(TokenScanner("Egenskap: i18n support"), matcher) expected = {'comments': [], 'keyword': u'Egenskap', 'language': 'no', 'location': {'column': 1, 'line': 1}, 'name': u'i18n support', 'scenarioDefinitions': [], 'tags': [], 'type': 'Feature'} assert_equals(expected, feature)
class Reader: def __init__(self): self.parser = Parser() def strip_comments(self, step): comment = step.find(GHERKIN_COMMENT) if comment != -1: return step[:comment].rstrip() return step def filter_definitions(self, defs): f = lambda prev, curr: prev + list(map(self.strip_comments, pluck(curr['steps'], 'text'))) return reduce(f, defs, []) def read_steps(self, filename): try: feature = self.parser.parse(TokenScanner(filename)) defs = [] if GHERKIN_SCENARIOS in feature: defs += feature[GHERKIN_SCENARIOS] if GHERKIN_BACKGROUND in feature: defs.append(feature[GHERKIN_BACKGROUND]) steps = self.filter_definitions(defs) cache.set(filename, steps) except CompositeParserException as e: steps = [] print(e) return steps def get_steps(self, filename): if cache.has(filename): return cache.get(filename) return self.read_steps(filename)
def process_gherkin(gherkin_filename, basedir, output): with open(gherkin_filename, 'r') as f: content = f.read() parser = Parser() feature = parser.parse(content) global settings_lines, test_cases_lines, keywords_lines, seen_steps settings_lines = [] test_cases_lines = [] keywords_lines = [] seen_steps = set([]) process_feature(feature) feature_base = os.path.dirname(gherkin_filename) feature_sub = feature_base[len(basedir)+1:] generate_robot_script(os.path.join(output, feature_sub), feature['name'])
class Reader: def __init__(self): self.parser = Parser() def strip_comments(self, step): comment = step.find(GHERKIN_COMMENT) if comment != -1: return step[:comment].rstrip() return step def filter_definitions(self, defs): f = lambda prev, curr: prev + list( map(self.strip_comments, pluck(curr['steps'], 'text'))) return reduce(f, defs, []) def read_steps(self, filename): try: feature = self.parser.parse(TokenScanner(filename)) defs = [] if GHERKIN_SCENARIOS in feature: defs += feature[GHERKIN_SCENARIOS] if GHERKIN_BACKGROUND in feature: defs.append(feature[GHERKIN_BACKGROUND]) steps = self.filter_definitions(defs) cache.set(filename, steps) except CompositeParserException as e: steps = [] print(e) return steps def get_steps(self, filename): if cache.has(filename): return cache.get(filename) return self.read_steps(filename)
def parse(cls, string=None, filename=None, language=None): """ Parse either a string or a file. """ parser = Parser() if language: if language == 'pt-br': language = 'pt' token_matcher = LanguageTokenMatcher(language) else: token_matcher = TokenMatcher() if string: token_scanner = TokenScanner(string=string) else: token_scanner = TokenScanner(filename=filename) return cls( parser.parse(token_scanner, token_matcher=token_matcher), filename=filename, )
def test_compiles_a_scenario_outline_with_i18n_characters(): feature_text = textwrap.dedent("""\ Feature: f Scenario Outline: with 'é' in title Given <with-é> Examples: | with-é | | passing | """) output = Parser().parse(feature_text) pickle = compiler.compile(output, 'features/hello.feature') expected_pickle = textwrap.dedent("""\ [ { "name": "Scenario: with 'é' in title", "steps": [ { "text": "passing", "arguments": [], "locations": [ { "line": 6, "column": 5, "path": "features/hello.feature" }, { "line": 3, "column": 11, "path": "features/hello.feature" } ] } ], "tags": [], "locations": [ { "line": 6, "column": 5, "path": "features/hello.feature" }, { "line": 2, "column": 3, "path": "features/hello.feature" } ] } ] """) assert_equals(pickle, json.loads(expected_pickle))
def test_compiles_a_scenario(): feature_text = textwrap.dedent("""\ Feature: f Scenario: s Given passing """) output = Parser().parse(feature_text) pickle = compiler.compile(output, 'features/hello.feature') expected_pickle = textwrap.dedent("""\ [ { "name": "Scenario: s", "steps": [ { "text": "passing", "arguments": [], "locations": [ { "line": 3, "column": 11, "path": "features/hello.feature" } ] } ], "tags": [], "locations": [ { "line": 2, "column": 3, "path": "features/hello.feature" } ] } ] """) assert_equals(pickle, json.loads(expected_pickle))
import json import pprint import hashlib import pprint arg_parser = argparse.ArgumentParser(description='Draw dot file from FeatureIDE model.xml') arg_parser.add_argument('model_xml_file', help="Location of the model XML file") arg_parser.add_argument('features_dir', help="Location of the feature files") arg_parser.add_argument('reports_dir', help="Location of the test results files for products") arg_parser.add_argument('output_dir', help="Location where the output file should be rendered") arg_parser.add_argument('--output_filename', default="feature_model", help="Name to use for the output file") arg_parser.add_argument('--productconfig', default="all", help="") args = arg_parser.parse_args() gherkin_parser = Parser() tags = {} for feature_file in listdir(args.features_dir): feature_file = open(path.join(args.features_dir, feature_file), "r") feature_parsed = gherkin_parser.parse(feature_file.read()) for tag in feature_parsed['tags']: tag_name = tag['name'][1:] # remove @ if tag_name not in tags: tags[tag_name] = [] tags[tag_name].append("F: " + feature_parsed['name']) for scenario in feature_parsed['scenarioDefinitions']: for tag in scenario['tags']: tag_name = tag['name'][1:] # remove @
def __init__(self): self.parser = Parser()