Example #1
0
def test_parse_feature_after_parser_error():
    parser = Parser()
    with assert_raises(ParserError):
        parser.parse(TokenScanner('# a comment\n' +
                                  'Feature: Foo\n' +
                                  '  Scenario: Bar\n' +
                                  '    Given x\n' +
                                  '      ```\n' +
                                  '      unclosed docstring\n'))
    feature = parser.parse(TokenScanner('Feature: Foo\n' +
                                        '  Scenario: Bar\n' +
                                        '    Given x\n'
                                        '      """\n'
                                        '      closed docstring\n'
                                        '      """\n'))
    expected = [{
        'name': u'Bar',
        'keyword': u'Scenario',
        'tags': [],
        'steps': [{
            'text': u'x',
            'type': 'Step',
            'location': {'column': 5, 'line': 3},
            'keyword': u'Given ',
            'argument': {
                'content': u'closed docstring',
                'type': 'DocString',
                'location': {'column': 7, 'line': 4}}}],
        'location': {'column': 3, 'line': 2},
        'type': 'Scenario'}]

    assert_equals(expected, feature['children'])
 def test_should_count_tags(self):
     tag_counts = {}
     dummy = SexpRecorder()
     formatter = TagCountFormatter(dummy, tag_counts)
     parser = Parser(formatter)
     
     here = os.path.dirname(__file__)
     fixtures = os.path.join(here, '..', '..', '..', 'spec', 'gherkin')
     path = os.path.join(fixtures, 'fixtures', 'complex_with_tags.feature')
     gherkin = open(path).read()
     
     parser.parse(gherkin, 'f.feature', 0)
     
     tools.eq_(tag_counts, {
         u"@hamster": ["f.feature:58"],
         u"@tag1":    ["f.feature:18","f.feature:23","f.feature:39",
                       "f.feature:52","f.feature:58"],
         u"@tag2":    ["f.feature:18","f.feature:23","f.feature:39",
                       "f.feature:52","f.feature:58"],
         u"@tag3":    ["f.feature:18", "f.feature:23"],
         u"@tag4":    ["f.feature:18"],
         u"@neat":    ["f.feature:52"],
         u"@more":    ["f.feature:52", "f.feature:58"]
     })
     
Example #3
0
    def parse(cls, string=None, filename=None, language=None):
        """
        Parse either a string or a file.
        """

        parser = Parser()
        # pylint:disable=redefined-variable-type
        # https://bitbucket.org/logilab/pylint/issues/710
        if language:
            if language == 'pt-br':
                language = 'pt'
            token_matcher = LanguageTokenMatcher(language)
        else:
            token_matcher = TokenMatcher()
        # pylint:enable=redefined-variable-type

        if string:
            token_scanner = TokenScanner(string=string)
        else:
            token_scanner = TokenScanner(filename=filename)

        try:
            return cls(
                parser.parse(token_scanner, token_matcher=token_matcher),
                filename=filename,
            )
        except ParserError as ex:
            raise AloeSyntaxError(filename, str(ex))
Example #4
0
def test_parse_multiple_features():
    parser = Parser()
    f1 = parser.parse(TokenScanner("Feature: 1"))
    f2 = parser.parse(TokenScanner("Feature: 2"))

    assert_equals("1", f1['name'])
    assert_equals("2", f2['name'])
Example #5
0
def info(input, verbose, pyformat, **kwargs):
    """
    Provides info about the input. Requires valid input.
    """
    if not input:
        input = "-"
    with click.open_file(input, mode="rb") as f:
        parser = Parser()
        feature_text = f.read()
        feature = parser.parse(feature_text)

        metrics = {}
        steps = [a[-1] for d, k, v, a in walk_items(feature) if k == "type" and v == "Step"]
        scenarios = [a[-1] for d, k, v, a in walk_items(feature) if k == "type" and v == "Scenario"]
        # tables = [a[-1] for d, k, v, a in walk_items(feature) if k == 'type' and v == 'DataTable']
        ctr_type = Counter((v for d, k, v in walk_items(feature, ancestors=False) if k == "type"))
        ctr_kw = Counter((v for d, k, v in walk_items(feature, ancestors=False) if k == "keyword"))
        metrics.update({"count": {"Keywords": ctr_kw, "Types": ctr_type}})
        metrics.update({"content": {"Scenarios": [d["name"] for d in scenarios], "Steps": [d["text"] for d in steps]}})
        data = metrics

        if verbose:
            data["_object"] = {"type": type(feature), "members": sorted(varsdict(feature).keys())}
        if pyformat:
            s = pformat(data)
        else:
            s = json.dumps(data, indent=2, sort_keys=True)
        click.echo(s)
 def verify_filter(self, filters, *line_ranges):
     io = StringIO.StringIO()
     pretty_formatter = PrettyFormatter(io, True, False)
     filter_formatter = FilterFormatter(pretty_formatter, filters)
     parser = Parser(filter_formatter)
     
     path = os.path.dirname(__file__)
     path = os.path.join(path, '..', '..', '..', 'spec', 'gherkin')
     path = os.path.join(path, 'fixtures', self.file)
     source = open(path).read() + "# __EOF__"
     parser.parse(source, path, 0)
     
     source_lines = source.split('\n')
     expected = []
     for line_range in line_ranges:
         expected.extend(source_lines[line_range[0] - 1:line_range[1]])
     expected = '\n'.join(expected)
     expected = expected.replace('# __EOF__', '')
     tools.eq_(io.getvalue(), expected)
Example #7
0
def test_parser():
    parser = Parser()
    feature = parser.parse(TokenScanner("Feature: Foo"))
    expected = {
        'comments': [],
        'keyword': u'Feature',
        'language': 'en',
        'location': {'column': 1, 'line': 1},
        'name': u'Foo',
        'children': [],
        'tags': [],
        'type': 'Feature'}

    assert_equals(expected, feature)
Example #8
0
def test_change_the_default_language():
    parser = Parser()
    matcher = TokenMatcher('no')
    feature = parser.parse(TokenScanner("Egenskap: i18n support - åæø"), matcher)
    expected = {
        'comments': [],
        'keyword': u'Egenskap',
        'language': 'no',
        'location': {'column': 1, 'line': 1},
        'name': u'i18n support - åæø',
        'children': [],
        'tags': [],
        'type': 'Feature'}

    assert_equals(expected, feature)
Example #9
0
    def __init__(self, file_path, commit=None, test_dir=None,
                 strip_extension=False):
        """
        :param file_path: Path to a feature file within the Git repository.
        :type file_path: str
        :param commit: Commit SHA at which to view the Feature file
        :type commit: str
        :param test_dir: Part of the file path to strip from the beginning
                          of the file path when returning the name of the test
                          definition.
        :type test_dir: str
        :param strip_extension: Boolean flag to control stripping file
                                     extensions from feature test definitions
        :type strip_extension: bool
        """
        self.file_path = file_path
        self.commit = commit
        self.test_dir = test_dir
        self.strip_extension = strip_extension

        with checkout(self.file_path, self.commit) as file_handle:
            parser = Parser()
            self._original_definition = parser.parse(file_handle.read())

        # process scenarios
        self.definition = deepcopy(self._original_definition)
        # gherkin.parser is changing the key name for the scenarios from
        # 'scenarioDefinitions' to 'children'.  Handle both for now.
        if 'children' in self.definition:
            child_key = 'children'
        elif 'scenarioDefinitions' in self.definition:
            child_key = 'scenarioDefinitions'
        else:
            child_key = None

        self.scenarios = {}
        if child_key:
            children = self.definition[child_key]
            for child in children:
                if child['type'].lower() == 'scenario':
                    scenario = Scenario(child)
                    self.scenarios[scenario.name] = scenario

            # Erase all scenarios from the definition in order
            # to compare the common elements in this Feature file
            # to the common elements in another feature file
            self.definition[child_key] = [
                x for x in children if x['type'].lower() != 'scenario']
Example #10
0
    def collect(self):
        parser = Parser()
        with self.fspath.open() as handle:
            feature = parser.parse(handle.read())

            # Group the feature's children by type
            children = defaultdict(list)
            for child in feature["feature"].get("children", []):
                children[child["type"]].append(child)

            backgrounds = children.get("Background", [])

            self.obj = dict()

            for scenario_index, scenario_outline in enumerate(
                children["ScenarioOutline"]
            ):
                for example in self._get_example_sets(scenario_outline["examples"]):
                    example_values = "-".join([v for d in example for v in d.values()])

                    function = ScenarioOutline(
                        name=scenario_outline["name"] + ": " + example_values,
                        parent=self,
                        spec=scenario_outline,
                        scenario_index=scenario_index,
                        example=example,
                        backgrounds=backgrounds,
                    )
                    for mark in MARKS:
                        function = getattr(pytest.mark, mark)(function)
                    yield function

            for scenario_index, scenario_outline in enumerate(
                children["Scenario"], -1000000
            ):
                function = ScenarioOutline(
                    name=scenario_outline["name"],
                    parent=self,
                    spec=scenario_outline,
                    scenario_index=scenario_index,
                    backgrounds=backgrounds,
                )
                for mark in MARKS:
                    function = getattr(pytest.mark, mark)(function)
                yield function
Example #11
0
def test_change_the_default_language():
    parser = Parser()
    matcher = TokenMatcher('no')
    feature_file = parser.parse(TokenScanner("Egenskap: i18n support - åæø"), matcher)
    expected = {
        'comments': [],
        'feature': {
            'keyword': u'Egenskap',
            'language': 'no',
            'location': {'column': 1, 'line': 1},
            'name': u'i18n support - åæø',
            'description': '',
            'children': [],
            'tags': []
        },
    }

    assert_equals(expected, feature_file)
Example #12
0
def test_parser():
    parser = Parser()
    feature = parser.parse(TokenScanner("Feature: Foo"))
    expected = {
        'comments': [],
        'keyword': u'Feature',
        'language': 'en',
        'location': {
            'column': 1,
            'line': 1
        },
        'name': u'Foo',
        'scenarioDefinitions': [],
        'tags': [],
        'type': 'Feature'
    }

    assert_equals(expected, feature)
Example #13
0
    def parse(cls, string=None, filename=None, language=None):
        """
        Parse either a string or a file.
        """

        parser = Parser()
        if language:
            if language == "pt-br":
                language = "pt"
            token_matcher = LanguageTokenMatcher(language)
        else:
            token_matcher = TokenMatcher()

        try:
            return cls(parser.parse(string or filename,
                                    token_matcher=token_matcher),
                       filename=filename)
        except ParserError as ex:
            raise AloeSyntaxError(filename, str(ex))
Example #14
0
def test_parser():
    parser = Parser()
    feature_file = parser.parse(TokenScanner("Feature: Foo"))
    expected = {
        'comments': [],
        'feature': {
            'keyword': u'Feature',
            'language': 'en',
            'location': {
                'column': 1,
                'line': 1
            },
            'name': u'Foo',
            'children': [],
            'tags': []
        },
    }

    assert_equals(expected, feature_file)
Example #15
0
    def parse(cls, string=None, filename=None, language=None):
        """
        Parse either a string or a file.
        """

        parser = Parser()
        if language:
            if language == 'pt-br':
                language = 'pt'
            token_matcher = LanguageTokenMatcher(language)
        else:
            token_matcher = TokenMatcher()

        try:
            return cls(
                parser.parse(string or filename, token_matcher=token_matcher),
                filename=filename,
            )
        except ParserError as ex:
            raise AloeSyntaxError(filename, str(ex))
Example #16
0
def test_change_the_default_language():
    parser = Parser()
    matcher = TokenMatcher('no')
    feature = parser.parse(TokenScanner("Egenskap: i18n support - åæø"),
                           matcher)
    expected = {
        'comments': [],
        'keyword': u'Egenskap',
        'language': 'no',
        'location': {
            'column': 1,
            'line': 1
        },
        'name': u'i18n support - åæø',
        'scenarioDefinitions': [],
        'tags': [],
        'type': 'Feature'
    }

    assert_equals(expected, feature)
Example #17
0
class BDDTester:
    def __init__(self, step_path):
        self._load_step_definitions(step_path)
        self.gherkinparser = GherkinParser()

    def _load_step_definitions(self, filepath):
        reload(utils)
        if six.PY3:
            SourceFileLoader('', filepath).load_module()
        else:
            load_source('', filepath)
        self.store = dict(utils.store)

    def load_feature(self, feature_filepath):
        with open(feature_filepath) as f:
            feature_txt = f.read()
        return self._gherkinify_feature(feature_txt)

    def _gherkinify_feature(self, feature_txt):
        feature_dict = self.gherkinparser.parse(feature_txt)['feature']
        return Feature(feature_dict, self)
Example #18
0
def parse(input, pyformat, sort_keys, **kwargs):
    """
    Converts the input into a structured object hierarchy. Requires valid input.
    """
    if not input:
        input = "-"
    with click.open_file(input, mode="rb") as f:
        parser = Parser()
        feature_text = f.read()
        feature = parser.parse(feature_text)
        # click.echo(feature)
        # pickles = compile(feature, "path/to/the.feature")
        # click.echo(pickles)

        data = {}
        data.update(feature)
        if pyformat:
            s = pformat(data)
        else:
            s = json.dumps(data, indent=2, sort_keys=sort_keys)
        click.echo(s)
Example #19
0
def read_feature(feature_path):
    """
    Read a specific feature
    :param feature_path: path of the file that contains the feature
    :return: Feature object
    TODO: Refactor to use this method into for loop in read_all_bdds() method
    """
    feature = Feature()
    with open(feature_path) as fp:
        fp.seek(0)
        parser = Parser()
        print(feature_path)
        feature_file = parser.parse(TokenScanner(fp.read()))

        feature.feature_name = feature_file['feature']['name']
        feature.language = feature_file['feature']['language']
        feature.path_name = feature_path
        feature.tags = feature_file['feature']['tags']
        feature.line = feature_file['feature']['location']['line']
        feature.scenarios = get_scenarios(feature_file['feature']['children'])

    return feature
Example #20
0
class GherkinEvents:
    def __init__(self, options):
        self.options = options
        self.parser = Parser()

    def enum(self, source_event):
        uri = source_event['uri']
        source = source_event['data']

        events = []

        try:
            gherkin_document = self.parser.parse(source)

            if (self.options.print_source):
                events.append(source_event)

            if (self.options.print_ast):
                events.append({
                    'type': 'gherkin-document',
                    'uri': uri,
                    'document': gherkin_document
                })

            if (self.options.print_pickles):
                pickles = compile(gherkin_document)
                for pickle in pickles:
                    events.append({
                        'type': 'pickle',
                        'uri': uri,
                        'pickle': pickle
                    })
        except CompositeParserException as e:
            add_errors(events, e.errors, uri)
        except ParserError as e:
            add_errors(events, [e], uri)

        return events
Example #21
0
class GherkinEvents:
    def __init__(self, options):
        self.options = options
        self.parser = Parser()

    def enum(self, source_event):
        uri = source_event['uri']
        source = source_event['data']

        events = []

        try:
            gherkin_document = self.parser.parse(source)

            if (self.options.print_source):
                events.append(source_event)

            if (self.options.print_ast):
                events.append({
                    'type': 'gherkin-document',
                    'uri': uri,
                    'document': gherkin_document
                })

            if (self.options.print_pickles):
                pickles = compile(gherkin_document)
                for pickle in pickles:
                    events.append({
                        'type': 'pickle',
                        'uri': uri,
                        'pickle': pickle
                    })
        except CompositeParserException as e:
            add_errors(events, e.errors, uri)
        except ParserError as e:
            add_errors(events, [e], uri)

        return events
Example #22
0
def read_all_bdds(url):
    features = []
    for root, dirs, files in os.walk(url + '/features/desktop/'):
        for file in files:
            if file.endswith(".feature"):
                feature = Feature()
                file_path = os.path.join(root, file)
                with open(file_path) as fp:
                    fp.seek(0)
                    parser = Parser()
                    print(file_path)
                    feature_file = parser.parse(TokenScanner(fp.read()))

                    feature.feature_name = feature_file['feature']['name']
                    feature.language = feature_file['feature']['language']
                    feature.path_name = file_path
                    feature.tags = feature_file['feature']['tags']
                    feature.line = feature_file['feature']['location']['line']
                    feature.scenarios = get_scenarios(
                        feature_file['feature']['children'])

                    features.append(feature)
    return features
Example #23
0
from gherkin.parser import Parser
from os import listdir

DATA_DIR = "cuc/"
OUT_DIR = "res/"
OUTPUT = OUT_DIR + "output.txt"

try:
    p = Parser()
    with open(OUTPUT, mode='w') as op:
        for f in listdir(DATA_DIR):
            res = p.parse(DATA_DIR + f)
            op.write("filename: {}\n\n".format(DATA_DIR + f))
            for ft in res['feature']['children']:
                for t in ft['tags']:
                    if t['name'].startswith("@business") or \
                        t['name'].startswith("@ID"):
                        op.write("{}\t".format(t['name']))
                op.write("\t{}\n".format(ft['name']))

except Exception as e:
    print(e)
Example #24
0
def parseFeatureFile(path, files):
    parser = Parser()
    feature = parser.parse(TokenScanner(path + "/" + files))
    return feature
Example #25
0
def parsing():
    """This function handles parsing command line arguments


    """

    descr = 'Ghenerate, the Gherkin Python Step Generator from Quantarhei'
    parser = argparse.ArgumentParser(description=descr + ' ...')

    parser.add_argument("file",
                        metavar='file',
                        type=str,
                        help='feature file to be processed',
                        nargs='?')

    #
    # Generator options
    #
    parser.add_argument("-v",
                        "--version",
                        action="store_true",
                        help="shows Quantarhei package version")
    parser.add_argument("-i",
                        "--info",
                        action='store_true',
                        help="shows detailed information about Quantarhei" +
                        " installation")
    parser.add_argument("-d",
                        "--destination",
                        type=str,
                        help="specifies destination directory for the" +
                        " generated step file")
    parser.add_argument("-n",
                        "--no-pass",
                        action="store_true",
                        help="empty tests should not pass (default is" +
                        " passing empty tests)")
    parser.add_argument("-f",
                        "--start-from",
                        type=int,
                        help="step functions will be numberred starting" +
                        " from this value")

    #
    # Parsing all arguments
    #
    args = parser.parse_args()

    #
    # show longer info
    #
    if args.info:
        qr.printlog("\n" +
                    "ghenerate: Quantarhei Gherkin Python Step Generator\n",
                    verbose=True,
                    loglevel=0)

        if not args.version:
            qr.printlog("Package version: ",
                        qr.Manager().version,
                        "\n",
                        verbose=True,
                        loglevel=0)
        return 0

    #
    # show just Quantarhei version number
    #
    if args.version:
        qr.printlog("Quantarhei package version: ",
                    qr.Manager().version,
                    "\n",
                    verbose=True,
                    loglevel=0)
        return 0

    if args.destination:
        ddir = args.destination
    else:
        ddir = "ghen"

    if args.file:

        print("")
        print(descr + " ...")

        filename = args.file

    else:
        print("No file specified: quiting")
        parser.print_help()
        return 0

    steps_pass = True
    if args.no_pass:
        steps_pass = False

    k_from = 0
    if args.start_from:
        k_from = args.start_from

    try:
        with open(filename, 'r') as myfile:
            data = myfile.read()
    except:
        raise Exception("Problems reading file: " + filename)

    parser = Parser()
    try:
        feature_file = parser.parse(TokenScanner(data))
    except:
        raise Exception("Problem parsing file: " + filename +
                        " - is it a feature file?")

    try:
        children = feature_file["feature"]["children"]
    except:
        raise Exception("No scenarii or scenario outlines")

    return dict(children=children,
                ddir=ddir,
                steps_pass=steps_pass,
                filename=filename,
                k_from=k_from)
def main(resources_path):

    parser = Parser()

    nlp_ready_resources = {}

    for root, dirs, files in os.walk(resources_path):
        for file_name in files:
            if file_name.endswith('.resource'):
                resource = os.path.splitext(basename(file_name))[0]
                resource = ' '.join(resource.split('-'))
                resource = ' '.join(resource.split('_'))
                parsed_resource_file = parser.parse(
                    os.path.join(root, file_name))
                nlp_ready_resources[resource] = {}
                for child in parsed_resource_file['feature']['children']:
                    if child['type'] == 'Background':
                        nlp_ready_resources[resource]['background'] = {}
                        nlp_ready_resources[resource]['background'][
                            'Given'] = []
                        for step in child['steps']:
                            sentence = step['keyword'] + step['text']
                            nlp_ready_resources[resource]['background'][
                                'Given'].append({'sentence': sentence})
                    elif child['type'] == 'Scenario':
                        ordered_step_types = OrderedDict({
                            'Given': [],
                            'When': [],
                            'Then': []
                        })
                        ordered_step_types.move_to_end('When')
                        ordered_step_types.move_to_end('Then')
                        nlp_ready_resources[resource][
                            child['name']] = ordered_step_types
                        in_step = ''
                        for step in child['steps']:
                            data_table = []
                            sentence = step['keyword'] + step['text']
                            if step['keyword'] == 'When ' or step[
                                    'keyword'] == 'Then ' or step[
                                        'keyword'] == 'Given ':
                                in_step = step['keyword'].strip(
                                )  #note: there is a space here after the keyword
                            if 'argument' in step:
                                if step['argument']['type'] == 'DataTable':
                                    data_table = parse_table(step)
                            if not in_step == 'Given':
                                nlp_ready_resources[resource][
                                    child['name']][in_step].append({
                                        'sentence':
                                        sentence,
                                        'data_table':
                                        data_table,
                                        'scenario_name':
                                        child['name']
                                    })
                                if 'description' in child:
                                    nlp_ready_resources[resource][
                                        child['name']][
                                            'Scenario Description'] = child[
                                                'description']
                            else:
                                nlp_ready_resources[resource][
                                    child['name']][in_step].append({
                                        'sentence':
                                        sentence,
                                        'scenario_name':
                                        child['name']
                                    })
    return nlp_ready_resources
Example #27
0
 def parse(self, cucumber_file):
     # self.backend.configure()
     parser = Parser()
     gherkin_file = parser.parse(TokenScanner(cucumber_file))
     data = parser.parse(gherkin_file)
Example #28
0
def parsing():
    """This function handles parsing command line arguments


    """

    descr = 'Ghenerate, the Gherkin Python Step Generator from Quantarhei'
    parser = argparse.ArgumentParser(description=descr+' ...')


    parser.add_argument("file", metavar='file', type=str,
                        help='feature file to be processed', nargs='?')

    #
    # Generator options
    #
    parser.add_argument("-v", "--version", action="store_true",
                        help="shows Quantarhei package version")
    parser.add_argument("-i", "--info", action='store_true',
                        help="shows detailed information about Quantarhei"+
                        " installation")
    parser.add_argument("-d", "--destination", type=str,
                        help="specifies destination directory for the"+
                        " generated step file")
    parser.add_argument("-n", "--no-pass", action="store_true",
                        help="empty tests should not pass (default is"
                        +" passing empty tests)")
    parser.add_argument("-f", "--start-from", type=int,
                        help="step functions will be numberred starting"
                        +" from this value")

    #
    # Parsing all arguments
    #
    args = parser.parse_args()

    #
    # show longer info
    #
    if args.info:
        qr.printlog("\n"
                    +"ghenerate: Quantarhei Gherkin Python Step Generator\n",
                    verbose=True, loglevel=0)

        if not args.version:
            qr.printlog("Package version: ", qr.Manager().version, "\n",
                        verbose=True, loglevel=0)
        return 0

    #
    # show just Quantarhei version number
    #
    if args.version:
        qr.printlog("Quantarhei package version: ", qr.Manager().version, "\n",
                    verbose=True, loglevel=0)
        return 0

    if args.destination:
        ddir = args.destination
    else:
        ddir = "ghen"

    if args.file:

        print("")
        print(descr+" ...")

        filename = args.file

    else:
        print("No file specified: quiting")
        parser.print_help()
        return 0

    steps_pass = True
    if args.no_pass:
        steps_pass = False

    k_from = 0
    if args.start_from:
        k_from = args.start_from

    try:
        with open(filename, 'r') as myfile:
            data = myfile.read()
    except:
        raise Exception("Problems reading file: "+filename)

    parser = Parser()
    try:
        feature_file = parser.parse(TokenScanner(data))
    except:
        raise Exception("Problem parsing file: "+filename+
                        " - is it a feature file?")

    try:
        children = feature_file["feature"]["children"]
    except:
        raise Exception("No scenarii or scenario outlines")

    return dict(children=children, ddir=ddir,
                steps_pass=steps_pass, filename=filename, k_from=k_from)
Example #29
0
class StepsQueue:
    def __init__(self, methods, webdriver):
        self.features = []
        self.queue_list = []
        self.methods = methods
        self.parser = Parser()
        self.webdriver = webdriver
        self.passed_steps = 0
        self.passed_scenarios = 0
        self.failed_steps = 0
        self.failed_scenarios = 0
        self.wip_tag_flag = False
        self.first_scenario_flag = True
        self.log = ''

    def prepare_steps(self):
        try:
            self._load_features_files()
            self._status_update()

        except ParserException as e:
            raise ParserException(e)

        except CompositeParserException as e:
            print_error(e)
            raise ParserException

    def start(self):
        start = time()
        for feature in self.features:
            if feature.status is Status.PENDING:
                self._each_feature(feature)

        finish_time = '%.2f' % (time() - start)
        self._print_summary(finish_time)

    def _each_feature(self, feature):
        self._print_gherkin(Type.FEATURE, feature.text)
        for scenario in feature.scenarios:
            if scenario.status is Status.PENDING:
                self._each_scenario(scenario)

    def _each_scenario(self, scenario):
        if self.first_scenario_flag:
            self.first_scenario_flag = False

        else:
            self.webdriver.refresh_webdriver()

        self._print_gherkin(Type.SCENARIO, scenario.text)
        for step in scenario.steps:
            if self._each_step(step) is not Status.SUCCESS:
                scenario.status = Status.FAILED
                self.failed_scenarios += 1
                return

        scenario.status = Status.SUCCESS
        self.passed_scenarios += 1

    def _each_step(self, step):
        try:
            step.add_webdriver_to_params(self.webdriver)
            step.call_method(self.methods)
            step.status = Status.SUCCESS

        except CustomException as e:
            step.status = Status.FAILED
            step.error = e

        except Exception as e:  #todo
            step.status = Status.FAILED
            step.error = e

        finally:
            self._print_gherkin(Type.STEP, step.text, status=step.status)
            if step.error is not None:
                print_error(step.error)

            if step.status is None:
                raise

            if step.status is Status.SUCCESS:
                self.passed_steps += 1

            elif step.status is Status.FAILED:
                self.failed_steps += 1

            return step.status

    def _print_gherkin(self, type, text, status=None):
        tab = '    '
        if type == Type.FEATURE:
            self._print_and_log()
            self._print_and_log(Colours.CYAN + text + Colours.DEFAULT)

        elif type == Type.SCENARIO:
            self._print_and_log(Colours.CYAN + tab + text + Colours.DEFAULT)

        elif type == Type.STEP:
            if status == Status.SUCCESS:
                self._print_and_log(tab + tab + Colours.GREEN + text + Colours.DEFAULT, status)

            elif status == Status.FAILED:
                self._print_and_log(tab + tab + Colours.RED + text + Colours.DEFAULT, status)

    def _print_summary(self, finish_time):
        self._print_and_log()
        self._print_and_log()
        self._print_and_log(Colours.GREEN + 'PASSED SCENARIOS: ' + str(self.passed_scenarios) + Colours.DEFAULT)
        self._print_and_log(Colours.RED + 'FAILED SCENARIOS: ' + str(self.failed_scenarios) + Colours.DEFAULT)
        self._print_and_log()
        self._print_and_log(Colours.GREEN + 'PASSED STEPS: ' + str(self.passed_steps) + Colours.DEFAULT)
        self._print_and_log(Colours.RED + 'FAILED STEPS: ' + str(self.failed_steps) + Colours.DEFAULT)
        self._print_and_log()
        self._print_and_log(Colours.GREEN + 'Finished in: ' + finish_time + 'sec' + Colours.DEFAULT)

    def _print_and_log(self, text='', status=''):
        if status != '':
            status = ' (' + status + ')'
        print(text)
        self.log = '\n'.join((self.log, text + status))

    def _load_features_files(self):
        try:
            if not os.listdir(config['directory_path']['paths']['features']):
                raise ParserException('features directory is empty')

        except FileNotFoundError:
            raise ParserException('features directory not found')

        for feature_file in os.listdir(config['directory_path']['paths']['features']):
            self._open_feature_file(config['directory_path']['paths']['features'] + feature_file)

    def _open_feature_file(self, file_path):
        with open(file_path, 'r') as feature_file:
            gherkin_document = self.parser.parse(feature_file.read())
            self._read_gherkin(gherkin_document)

    def _read_gherkin(self, gherkin_document):
        try:
            pickles = compile(gherkin_document)
            feature = gherkin_document['feature']
            self._read_gherkin_feature(pickles, feature)

        except KeyError:
            pass

    def _read_gherkin_feature(self, pickles, feature_object):
        feature = feature_object['name']
        self.features.append(Feature(feature))
        for scenario in pickles:
            self._read_gherkin_scenario(scenario)

    def _read_gherkin_scenario(self, scenario_object):
        scenario = scenario_object['name']
        self.features[-1].add_scenario(Scenario(scenario))
        tags = scenario_object['tags']
        for tag in tags:
            self._read_gherkin_tag(tag)

        steps = scenario_object['steps']
        for step in steps:
            self._read_gherkin_step(step)

    def _read_gherkin_tag(self, tag):
        self.features[-1].tag = tag['name']
        self.features[-1].scenarios[-1].tag = tag['name']
        if tag['name'] == Tags.WIP:
            self.wip_tag_flag = True

    def _read_gherkin_step(self, step):
        self.features[-1].scenarios[-1].add_step(Step(step['text']))

    def _status_update(self):
        for feature in self.features:
            self._status_update_feature(feature)

    def _status_update_feature(self, feature):
        feature.status = Status.SKIPPED
        for scenario in feature.scenarios:
            self._status_update_scenario(feature, scenario)

    def _status_update_scenario(self,feature, scenario):
        self._skip_or_pending(scenario)
        for step in scenario.steps:
            self._status_update_step(scenario, step)

        if scenario.status is Status.PENDING:
            feature.status = Status.PENDING

    def _status_update_step(self, scenario, step):
        if scenario.status is Status.SKIPPED:
            step.status = Status.SKIPPED

        else:
            step.status = Status.PENDING

    def _skip_or_pending(self, item):
        if self.wip_tag_flag and item.tag == Tags.WIP:
            item.status = Status.PENDING
            return

        if not self.wip_tag_flag and (item.tag != Tags.DISABLED):  # todo != or is not
            item.status = Status.PENDING
            return

        item.status = Status.SKIPPED
Example #30
0
class Foxpath():
    mappings = []

    def __init__(self, stepfile_filepath):
        self._load_step_definitions(stepfile_filepath)
        self.gherkinparser = GherkinParser()

    def _load_step_definitions(self, filepath):
        Foxpath.mappings = []
        # remarkably, this seems to be sufficient
        if six.PY3:
            SourceFileLoader('', filepath).load_module()
        else:
            load_source('', filepath)

    def load_feature(self, feature_txt, codelists={}, today=None):
        if today:
            today = datetime.strptime(today, '%Y-%m-%d').date()
        else:
            today = datetime.today().date()
        kwargs = {
            'codelists': codelists,
            'today': today,
        }
        return self._gherkinify_feature(feature_txt, **kwargs)

    def _find_matching_expr(self, mappings, line):
        for regex, fn in mappings:
            r = regex.match(line)
            if r:
                return fn, r.groups()
        print('I did not understand {}'.format(line))

    def _parse(self, ctx, **kwargs):
        def __parse(activity):
            for step_type, expr_fn, expr_groups in ctx:
                result = True
                try:
                    if expr_groups:
                        expr_fn(activity, *expr_groups, **kwargs)
                    else:
                        expr_fn(activity, **kwargs)
                except StepException as e:
                    result = False
                    explain = str(e)
                if step_type == 'then':
                    if result:
                        return True, ''
                    else:
                        return False, explain
                else:
                    if not result:
                        return None, explain
                    else:
                        pass
        return __parse

    def _gherkinify_feature(self, feature_txt, **kwargs):
        feature = self.gherkinparser.parse(feature_txt)
        feature = feature['feature']
        feature_name = feature['name']
        tests = []
        for test in feature['children']:
            test_name = test['name']
            test_steps = test['steps']
            ctx = []
            step_type = 'given'
            for step in test_steps:
                if step['keyword'].lower().strip() == 'then':
                    step_type = 'then'
                expr_fn, expr_groups = self._find_matching_expr(
                    Foxpath.mappings, step['text'])
                ctx.append((step_type, expr_fn, expr_groups))
            tests.append((test_name, self._parse(ctx, **kwargs)))
        return (feature_name, tests)
Example #31
0
def validate_feature_file(feature_file, unallowed_tags):
    """Validates a feature file.

	Args:
		feature_file_path: the path to the feature file.
	Returns:
		a list of errors.
	"""

    file_status, feature_file_path = feature_file

    with open(feature_file_path, "r") as fp:
        contents = fp.read()

    parser = Parser()
    try:
        feature_file = parser.parse(TokenScanner(contents))
    except Exception as e:
        return [
            "[ERROR] Errors exist in " + feature_file_path,
            "\t- Could not parse the file! " + str(e)
        ]

    errors = []
    feature_tag_names = [
        tag["name"] for tag in feature_file["feature"]["tags"]
    ]
    scenarios = [
        feature_child for feature_child in feature_file["feature"]["children"]
        if feature_child['type'] == 'Scenario'
        or feature_child['type'] == 'ScenarioOutline'
    ]

    # validate tags in the feature
    for unallowed_tag in set(unallowed_tags).intersection(feature_tag_names):
        errors.append(
            "\t- Remove the %s tag from the feature before you commit" %
            unallowed_tag)

    # validate tags in all the scenarios
    for scenario in scenarios:
        for tag in scenario["tags"]:
            if tag["name"] in unallowed_tags:
                errors.append(
                    "\t- Before you commit, remove the %s tag from the following scenario:\n\t\t'%s'"
                    % (tag["name"], scenario["name"]))

    # validate scenario numbers
    prev_scenario_num = "0"
    for curr_scenario in scenarios:
        # validate prescence
        if "." not in curr_scenario["name"]:
            errors.append(
                "\t- The following scenario needs to start with a number followed by a period: '%s'"
                % curr_scenario["name"])
            break
        curr_scenario_num = curr_scenario["name"].split(".")[0].strip()
        if not curr_scenario_num or curr_scenario_num.isalpha():
            errors.append(
                "\t- The following scenario needs to start with a number: '%s'"
                % curr_scenario["name"])
            break
        # validate ordering
        if prev_scenario_num.isdigit():
            # previous scenario didn't have a letter
            if curr_scenario_num.isdigit():
                # current scenario doesn't have a letter
                if int(curr_scenario_num) != int(prev_scenario_num) + 1:
                    errors.append(
                        "\t- The ordering of the scenarios breaks down on Scenario '%s'"
                        % curr_scenario_num)
                    break
            else:
                # current scenario has a letter
                if curr_scenario_num[-1] != "a":
                    errors.append(
                        "\t- The ordering of the scenarios breaks down on Scenario '%s'"
                        % curr_scenario_num)
                    break
        else:
            # previous scenario had a letter
            prev_scenario_letter = prev_scenario_num[-1]
            if curr_scenario_num.isdigit():
                # current scenario doesn't have a letter
                if int(curr_scenario_num) != int(prev_scenario_num[:-1]) + 1:
                    if ord(curr_scenario_num[-1]
                           ) != ord(prev_scenario_letter) + 1:
                        errors.append(
                            "\t- The ordering of the scenarios breaks down on Scenario '%s'"
                            % curr_scenario_num)
                        break
            else:
                # current scenario has a letter
                if int(curr_scenario_num[:-1]) != int(
                        prev_scenario_num[:-1]) + 1:
                    # number has not been incremented
                    if ord(curr_scenario_num[-1]
                           ) != ord(prev_scenario_letter) + 1:
                        errors.append(
                            "\t- The ordering of the scenarios breaks down on Scenario '%s'"
                            % curr_scenario_num)
                        break
                else:
                    # number has been incremented
                    if curr_scenario_num[-1] != "a":
                        errors.append(
                            "\t- The ordering of the scenarios breaks down on Scenario '%s'"
                            % curr_scenario_num)
                        break
        prev_scenario_num = curr_scenario_num

    if errors:
        errors.insert(0, "[ERROR] Errors exist in " + feature_file_path)

    return errors
import codecs
import os
import sys
if sys.version_info < (3, 0):
    import codecs
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from gherkin.token_scanner import TokenScanner
from gherkin.token_formatter_builder import TokenFormatterBuilder
from gherkin.parser import Parser

files = sys.argv[1:]
if sys.version_info < (3, 0) and os.name != 'nt':  # for Python2 unless on Windows native
    UTF8Writer = codecs.getwriter('utf8')
    sys.stdout = UTF8Writer(sys.stdout)
parser = Parser(TokenFormatterBuilder())
for file in files:
    scanner = TokenScanner(file)
    print(parser.parse(scanner))
Example #33
0
def parseFeatureFile(feature_file):
    parser = Parser()
    feature = parser.parse(TokenScanner(feature_file))
    return feature
Example #34
0
 def test_should_raise_when_feature_does_not_parse(self):
     p = Parser(Mock(PrettyFormatter))
     with tools.assert_raises(ParseError):
         p.parse(u"Feature: f\nFeature: f", __file__,
                 inspect.currentframe().f_back.f_lineno - 1)
Example #35
0
# source:
# https://stackoverflow.com/questions/43107367/how-to-use-gherkin-official

# see also:
# https://github.com/cucumber/gherkin-python/tree/master/test
# https://github.com/cucumber-attic/gherkin
from gherkin.token_scanner import TokenScanner
from gherkin.parser import Parser

parser = Parser()
feature_file = parser.parse(
    TokenScanner('''
Feature: Foo
  Background:

  Scenario Outline: 12
  
  Scenario Outline: 15

'''))
print(feature_file)