Exemple #1
0
    def runtest(self):
        self.global_cfg = self._parse_arguments()

        self.global_cfg.setdefault("variables", {})

        load_plugins(self.global_cfg)

        # INTERNAL
        # NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail
        # instead. This doesn't differentiate between an error in verification
        # and an error when running the test though.
        xfail = self.spec.get("_xfail", False)

        try:
            verify_tests(self.spec)

            fixture_values = self._load_fixture_values()
            self.global_cfg["variables"].update(fixture_values)

            run_test(self.path, self.spec, self.global_cfg)
        except exceptions.BadSchemaError:
            if xfail == "verify":
                logger.info("xfailing test while verifying schema")
            else:
                raise
        except exceptions.TavernException:
            if xfail == "run":
                logger.info("xfailing test when running")
            else:
                raise
        else:
            if xfail:
                logger.error("Expected test to fail")
                raise exceptions.TestFailError(
                    "Expected test to fail at {} stage".format(xfail))
Exemple #2
0
    def runtest(self):
        # Load ini first
        ini_global_cfg_paths = self.config.getini("tavern-global-cfg") or []
        # THEN load command line, to allow overwriting of values
        cmdline_global_cfg_paths = self.config.getoption(
            "tavern_global_cfg") or []

        all_paths = ini_global_cfg_paths + cmdline_global_cfg_paths
        global_cfg = load_global_config(all_paths)

        if self.config.getini("tavern-strict") is not None:
            strict = self.config.getini("tavern-strict")
            if isinstance(strict, list):
                if any(i not in ["body", "headers", "redirect_query_params"]
                       for i in strict):
                    raise exceptions.UnexpectedKeysError(
                        "Invalid values for 'strict' use in config file")
        elif self.config.getoption("tavern_strict") is not None:
            strict = self.config.getoption("tavern_strict")
        else:
            strict = []

        global_cfg["strict"] = strict

        global_cfg["backends"] = {}
        backends = ["http", "mqtt"]
        for b in backends:
            # similar logic to above - use ini, then cmdline if present
            ini_opt = self.config.getini("tavern-{}-backend".format(b))
            cli_opt = self.config.getoption("tavern_{}_backend".format(b))

            in_use = ini_opt
            if cli_opt and (cli_opt != ini_opt):
                in_use = cli_opt

            global_cfg["backends"][b] = in_use

        load_plugins(global_cfg)

        # INTERNAL
        xfail = self.spec.get("_xfail", False)

        try:
            verify_tests(self.spec)
            run_test(self.path, self.spec, global_cfg)
        except exceptions.BadSchemaError:
            if xfail == "verify":
                logger.info("xfailing test while verifying schema")
            else:
                raise
        except exceptions.TavernException:
            if xfail == "run":
                logger.info("xfailing test when running")
            else:
                raise
        else:
            if xfail:
                logger.error("Expected test to fail")
                raise exceptions.TestFailError
Exemple #3
0
    def runtest(self):
        # Do a deep copy because this sometimes still retains things from previous tests(?)
        self.global_cfg = copy.deepcopy(load_global_cfg(self.config))

        self.global_cfg.setdefault("variables", {})

        load_plugins(self.global_cfg)

        self.global_cfg["tavern_internal"] = {"pytest_hook_caller": self.config.hook}

        # INTERNAL
        # NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail
        # instead. This doesn't differentiate between an error in verification
        # and an error when running the test though.
        xfail = self.spec.get("_xfail", False)

        try:
            fixture_values = self._load_fixture_values()
            self.global_cfg["variables"].update(fixture_values)

            call_hook(
                self.global_cfg,
                "pytest_tavern_beta_before_every_test_run",
                test_dict=self.spec,
                variables=self.global_cfg["variables"],
            )

            verify_tests(self.spec)

            for stage in self.spec["stages"]:
                if not stage.get("name"):
                    if not stage.get("id"):
                        # Should never actually reach here, should be caught at schema check time
                        raise exceptions.BadSchemaError(
                            "One of name or ID must be specified"
                        )

                    stage["name"] = stage["id"]

            run_test(self.path, self.spec, self.global_cfg)
        except exceptions.BadSchemaError:
            if xfail == "verify":
                logger.info("xfailing test while verifying schema")
            else:
                raise
        except exceptions.TavernException:
            if xfail == "run":
                logger.info("xfailing test when running")
            else:
                raise
        else:
            if xfail:
                logger.error("Expected test to fail")
                raise exceptions.TestFailError(
                    "Expected test to fail at {} stage".format(xfail)
                )
Exemple #4
0
    def runtest(self):
        self.global_cfg = load_global_cfg(self.config)

        self.global_cfg.setdefault("variables", {})

        load_plugins(self.global_cfg)

        self.global_cfg["tavern_internal"] = {
            "pytest_hook_caller": self.config.hook
        }

        # INTERNAL
        # NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail
        # instead. This doesn't differentiate between an error in verification
        # and an error when running the test though.
        xfail = self.spec.get("_xfail", False)

        try:
            fixture_values = self._load_fixture_values()
            self.global_cfg["variables"].update(fixture_values)

            call_hook(
                self.global_cfg,
                "pytest_tavern_beta_before_every_test_run",
                test_dict=self.spec,
                variables=self.global_cfg["variables"],
            )

            verify_tests(self.spec)

            run_test(self.path, self.spec, self.global_cfg)
        except exceptions.BadSchemaError:
            if xfail == "verify":
                logger.info("xfailing test while verifying schema")
            else:
                raise
        except exceptions.TavernException:
            if xfail == "run":
                logger.info("xfailing test when running")
            else:
                raise
        else:
            if xfail:
                logger.error("Expected test to fail")
                raise exceptions.TestFailError(
                    "Expected test to fail at {} stage".format(xfail))
Exemple #5
0
    def _load_schema_with_plugins(self, schema_filename):
        mangled = "{}-plugins".format(schema_filename)

        try:
            return self._loaded[mangled]
        except KeyError:
            plugins = load_plugins()
            base_schema = copy.deepcopy(self._load_base_schema(schema_filename))

            logger.debug("Adding plugins to schema: %s", plugins)

            for p in plugins:
                try:
                    plugin_schema = p.plugin.schema
                except AttributeError:
                    # Don't require a schema
                    logger.debug("No schema defined for %s", p.name)
                else:
                    base_schema["mapping"].update(plugin_schema.get("initialisation", {}))

            self._loaded[mangled] = base_schema
            return self._loaded[mangled]