Exemple #1
0
    def collect(self):
        """Load each document in the given input file into a different test

        Yields:
            YamlItem: Essentially an individual pytest 'test object'
        """

        try:
            # Convert to a list so we can catch parser exceptions
            all_tests = list(
                yaml.load_all(self.fspath.open(encoding="utf-8"),
                              Loader=IncludeLoader))
        except yaml.parser.ParserError as e:
            raise_from(exceptions.BadSchemaError, e)

        for test_spec in all_tests:
            if not test_spec:
                logger.warning("Empty document in input file '%s'",
                               self.fspath)
                continue

            try:
                for i in self._generate_items(test_spec):
                    i.initialise_fixture_attrs()
                    yield i
            except (TypeError, KeyError):
                verify_tests(test_spec, with_plugins=False)
                raise
Exemple #2
0
    def collect(self):
        """Load each document in the given input file into a different test

        Yields:
            YamlItem: Essentially an individual pytest 'test object'
        """

        try:
            # Convert to a list so we can catch parser exceptions
            all_tests = list(
                yaml.load_all(self.fspath.open(), Loader=IncludeLoader))
        except yaml.parser.ParserError as e:
            raise_from(exceptions.BadSchemaError, e)

        for test_spec in all_tests:
            if not test_spec:
                logger.warning("Empty document in input file '%s'",
                               self.fspath)
                continue

            try:
                yield YamlItem(test_spec["test_name"], self, test_spec,
                               self.fspath)
            except (TypeError, KeyError):
                verify_tests(test_spec)
Exemple #3
0
    def runtest(self):
        self.global_cfg = self._parse_arguments()

        self.global_cfg.setdefault("variables", {})

        load_plugins(self.global_cfg)

        # INTERNAL
        # NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail
        # instead. This doesn't differentiate between an error in verification
        # and an error when running the test though.
        xfail = self.spec.get("_xfail", False)

        try:
            verify_tests(self.spec)

            fixture_values = self._load_fixture_values()
            self.global_cfg["variables"].update(fixture_values)

            run_test(self.path, self.spec, self.global_cfg)
        except exceptions.BadSchemaError:
            if xfail == "verify":
                logger.info("xfailing test while verifying schema")
            else:
                raise
        except exceptions.TavernException:
            if xfail == "run":
                logger.info("xfailing test when running")
            else:
                raise
        else:
            if xfail:
                logger.error("Expected test to fail")
                raise exceptions.TestFailError(
                    "Expected test to fail at {} stage".format(xfail))
Exemple #4
0
    def runtest(self):
        # Load ini first
        ini_global_cfg_paths = self.config.getini("tavern-global-cfg") or []
        # THEN load command line, to allow overwriting of values
        cmdline_global_cfg_paths = self.config.getoption(
            "tavern_global_cfg") or []

        all_paths = ini_global_cfg_paths + cmdline_global_cfg_paths
        global_cfg = load_global_config(all_paths)

        if self.config.getini("tavern-strict") is not None:
            strict = self.config.getini("tavern-strict")
            if isinstance(strict, list):
                if any(i not in ["body", "headers", "redirect_query_params"]
                       for i in strict):
                    raise exceptions.UnexpectedKeysError(
                        "Invalid values for 'strict' use in config file")
        elif self.config.getoption("tavern_strict") is not None:
            strict = self.config.getoption("tavern_strict")
        else:
            strict = []

        global_cfg["strict"] = strict

        global_cfg["backends"] = {}
        backends = ["http", "mqtt"]
        for b in backends:
            # similar logic to above - use ini, then cmdline if present
            ini_opt = self.config.getini("tavern-{}-backend".format(b))
            cli_opt = self.config.getoption("tavern_{}_backend".format(b))

            in_use = ini_opt
            if cli_opt and (cli_opt != ini_opt):
                in_use = cli_opt

            global_cfg["backends"][b] = in_use

        load_plugins(global_cfg)

        # INTERNAL
        xfail = self.spec.get("_xfail", False)

        try:
            verify_tests(self.spec)
            run_test(self.path, self.spec, global_cfg)
        except exceptions.BadSchemaError:
            if xfail == "verify":
                logger.info("xfailing test while verifying schema")
            else:
                raise
        except exceptions.TavernException:
            if xfail == "run":
                logger.info("xfailing test when running")
            else:
                raise
        else:
            if xfail:
                logger.error("Expected test to fail")
                raise exceptions.TestFailError
Exemple #5
0
    def collect(self):
        """Load each document in the given input file into a different test

        Yields:
            YamlItem: Essentially an individual pytest 'test object'
        """
        for test_spec in yaml.load_all(self.fspath.open(), Loader=IncludeLoader):
            verify_tests(test_spec)
            yield YamlItem(test_spec["test_name"], self, test_spec, self.fspath)
Exemple #6
0
    def runtest(self):
        # Do a deep copy because this sometimes still retains things from previous tests(?)
        self.global_cfg = copy.deepcopy(load_global_cfg(self.config))

        self.global_cfg.setdefault("variables", {})

        load_plugins(self.global_cfg)

        self.global_cfg["tavern_internal"] = {"pytest_hook_caller": self.config.hook}

        # INTERNAL
        # NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail
        # instead. This doesn't differentiate between an error in verification
        # and an error when running the test though.
        xfail = self.spec.get("_xfail", False)

        try:
            fixture_values = self._load_fixture_values()
            self.global_cfg["variables"].update(fixture_values)

            call_hook(
                self.global_cfg,
                "pytest_tavern_beta_before_every_test_run",
                test_dict=self.spec,
                variables=self.global_cfg["variables"],
            )

            verify_tests(self.spec)

            for stage in self.spec["stages"]:
                if not stage.get("name"):
                    if not stage.get("id"):
                        # Should never actually reach here, should be caught at schema check time
                        raise exceptions.BadSchemaError(
                            "One of name or ID must be specified"
                        )

                    stage["name"] = stage["id"]

            run_test(self.path, self.spec, self.global_cfg)
        except exceptions.BadSchemaError:
            if xfail == "verify":
                logger.info("xfailing test while verifying schema")
            else:
                raise
        except exceptions.TavernException:
            if xfail == "run":
                logger.info("xfailing test when running")
            else:
                raise
        else:
            if xfail:
                logger.error("Expected test to fail")
                raise exceptions.TestFailError(
                    "Expected test to fail at {} stage".format(xfail)
                )
Exemple #7
0
    def test_timeout_tuple_fail(self, test_dict, incorrect_value):
        """Timeout must be a list of floats or a float"""
        test_dict["stages"][0]["request"]["timeout"] = [1, incorrect_value]

        with pytest.raises(BadSchemaError):
            verify_tests(test_dict)

        test_dict["stages"][0]["request"]["timeout"] = [incorrect_value, 1]

        with pytest.raises(BadSchemaError):
            verify_tests(test_dict)
Exemple #8
0
    def runtest(self):
        verify_tests(self.spec)

        # Load ini first
        ini_global_cfg_paths = self.config.getini("tavern-global-cfg") or []
        # THEN load command line, to allow overwriting of values
        cmdline_global_cfg_paths = self.config.getoption("tavern_global_cfg") or []

        all_paths = ini_global_cfg_paths + cmdline_global_cfg_paths
        global_cfg = load_global_config(all_paths)

        run_test(self.path, self.spec, global_cfg)
Exemple #9
0
    def runtest(self):
        self.global_cfg = load_global_cfg(self.config)

        self.global_cfg.setdefault("variables", {})

        load_plugins(self.global_cfg)

        self.global_cfg["tavern_internal"] = {
            "pytest_hook_caller": self.config.hook
        }

        # INTERNAL
        # NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail
        # instead. This doesn't differentiate between an error in verification
        # and an error when running the test though.
        xfail = self.spec.get("_xfail", False)

        try:
            fixture_values = self._load_fixture_values()
            self.global_cfg["variables"].update(fixture_values)

            call_hook(
                self.global_cfg,
                "pytest_tavern_beta_before_every_test_run",
                test_dict=self.spec,
                variables=self.global_cfg["variables"],
            )

            verify_tests(self.spec)

            run_test(self.path, self.spec, self.global_cfg)
        except exceptions.BadSchemaError:
            if xfail == "verify":
                logger.info("xfailing test while verifying schema")
            else:
                raise
        except exceptions.TavernException:
            if xfail == "run":
                logger.info("xfailing test when running")
            else:
                raise
        else:
            if xfail:
                logger.error("Expected test to fail")
                raise exceptions.TestFailError(
                    "Expected test to fail at {} stage".format(xfail))
Exemple #10
0
 def test_simple_json_body(self, test_dict):
     """Simple json dict in request and response"""
     verify_tests(test_dict)
Exemple #11
0
    def test_header_request_list(self, test_dict):
        """Parameters must always be a dict"""
        test_dict["stages"][0]["request"]["params"] = [1, "text", -1]

        with pytest.raises(BadSchemaError):
            verify_tests(test_dict)
Exemple #12
0
 def test_cert_as_string_tuple_list(self, test_dict, correct_value):
     test_dict["stages"][0]["request"]["cert"] = correct_value
     verify_tests(test_dict)
Exemple #13
0
    def test_headers_response_list(self, test_dict):
        """Headers must always be a dict"""
        test_dict["stages"][0]["response"]["headers"] = [1, "text", -1]

        with pytest.raises(BadSchemaError):
            verify_tests(test_dict)
Exemple #14
0
 def test_cert_as_tuple(self, test_dict, incorrect_value):
     test_dict["stages"][0]["request"]["cert"] = incorrect_value
     with pytest.raises(BadSchemaError):
         verify_tests(test_dict)
Exemple #15
0
    def test_json_value_request(self, test_dict):
        """Don't match other stuff"""
        test_dict["stages"][0]["request"]["json"] = "Hello"

        with pytest.raises(BadSchemaError):
            verify_tests(test_dict)
Exemple #16
0
    def test_json_value_response(self, test_dict):
        """Don't match other stuff"""
        test_dict["stages"][0]["response"]["body"] = "Hi"

        with pytest.raises(BadSchemaError):
            verify_tests(test_dict)
Exemple #17
0
    def test_json_list_response(self, test_dict):
        """Response contains a list"""
        test_dict["stages"][0]["response"]["body"] = [1, "text", -1]

        verify_tests(test_dict)
Exemple #18
0
    def test_json_list_request(self, test_dict):
        """Request contains a list"""
        test_dict["stages"][0]["request"]["json"] = [1, "text", -1]

        verify_tests(test_dict)
Exemple #19
0
    def collect(self):
        """Load each document in the given input file into a different test

        Yields:
            YamlItem: Essentially an individual pytest 'test object'
        """
        # pylint: disable=too-many-nested-blocks

        try:
            # Convert to a list so we can catch parser exceptions
            all_tests = list(
                yaml.load_all(self.fspath.open(encoding="utf-8"),
                              Loader=IncludeLoader))
        except yaml.parser.ParserError as e:
            raise_from(exceptions.BadSchemaError, e)

        for test_spec in all_tests:
            if not test_spec:
                logger.warning("Empty document in input file '%s'",
                               self.fspath)
                continue

            try:
                item = YamlItem(test_spec["test_name"], self, test_spec,
                                self.fspath)
            except (TypeError, KeyError):
                verify_tests(test_spec)
                raise

            marks = test_spec.get("marks", [])

            if marks:
                # Get included variables so we can do things like:
                # skipif: {my_integer} > 2
                # skipif: 'https' in '{hostname}'
                # skipif: '{hostname}'.contains('ignoreme')
                included = test_spec.get("includes", [])
                fmt_vars = {}
                for i in included:
                    fmt_vars.update(**i.get("variables", {}))

                pytest_marks = []

                # This should either be a string or a skipif
                for m in marks:
                    if isinstance(m, str):
                        m = format_keys(m, fmt_vars)
                        pytest_marks.append(getattr(pytest.mark, m))
                    elif isinstance(m, dict):
                        for markname, extra_arg in m.items():
                            if markname == "parametrize":
                                raise NotImplementedError(
                                    "'parametrize' is not currently implemented"
                                )

                            extra_arg = format_keys(extra_arg, fmt_vars)
                            pytest_marks.append(
                                getattr(pytest.mark, markname)(extra_arg))

                for pm in pytest_marks:
                    item.add_marker(pm)

            yield item
Exemple #20
0
 def test_verify_with_incorrect_value(self, test_dict, incorrect_value):
     test_dict["stages"][0]["request"]["verify"] = incorrect_value
     with pytest.raises(BadSchemaError):
         verify_tests(test_dict)
Exemple #21
0
 def test_verify_with_string_boolean(self, test_dict, correct_value):
     test_dict["stages"][0]["request"]["verify"] = correct_value
     verify_tests(test_dict)