def test_format_request_var_value(self, fulltest, mockargs, includes, request_key): """Variables from request should be available to format in response""" sent_value = str(uuid.uuid4()) fulltest["stages"][0]["request"]["method"] = "POST" fulltest["stages"][0]["request"][request_key] = sent_value resp_key = request_key mockargs[request_key] = {"returned": sent_value} fulltest["stages"][0]["response"][resp_key] = { "returned": "{tavern.request_vars.%s:s}" % request_key } mock_response = Mock(**mockargs) with patch( "tavern._plugins.rest.request.requests.Session.request", return_value=mock_response, ) as pmock: run_test("heif", fulltest, includes) assert pmock.called
def test_run_once(self, fulltest, mockargs, includes): mock_responses = Mock(**mockargs) with patch("tavern._plugins.rest.request.requests.Session.request", return_value=mock_responses) as pmock: run_test("heif", fulltest, includes) assert pmock.call_count == 1
def test_format_request_var_dict(self, fulltest, includes): """Variables from request should be available to format in response - this is the original keys in the input file, NOT the formatted ones where 'json' is converted to 'payload' in the actual MQTT publish""" stage = fulltest["stages"][0] sent = stage["mqtt_publish"]["json"] mockargs = { "spec": paho.MQTTMessage, "payload": json.dumps({ "echo": sent["message"] }).encode("utf8"), "topic": stage["mqtt_publish"]["topic"], } mock_response = Mock(**mockargs) fake_client = MagicMock( spec=MQTTClient, message_received=Mock(return_value=mock_response)) with patch("tavern._plugins.mqtt.client.paho.Client", fake_client), patch("tavern.core.get_extra_sessions", return_value={"paho-mqtt": fake_client}) as pmock: run_test("heif", fulltest, includes) assert pmock.called
def runtest(self): self.global_cfg = self._parse_arguments() self.global_cfg.setdefault("variables", {}) load_plugins(self.global_cfg) # INTERNAL # NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail # instead. This doesn't differentiate between an error in verification # and an error when running the test though. xfail = self.spec.get("_xfail", False) try: verify_tests(self.spec) fixture_values = self._load_fixture_values() self.global_cfg["variables"].update(fixture_values) run_test(self.path, self.spec, self.global_cfg) except exceptions.BadSchemaError: if xfail == "verify": logger.info("xfailing test while verifying schema") else: raise except exceptions.TavernException: if xfail == "run": logger.info("xfailing test when running") else: raise else: if xfail: logger.error("Expected test to fail") raise exceptions.TestFailError( "Expected test to fail at {} stage".format(xfail))
def runtest(self): # Load ini first ini_global_cfg_paths = self.config.getini("tavern-global-cfg") or [] # THEN load command line, to allow overwriting of values cmdline_global_cfg_paths = self.config.getoption( "tavern_global_cfg") or [] all_paths = ini_global_cfg_paths + cmdline_global_cfg_paths global_cfg = load_global_config(all_paths) if self.config.getini("tavern-strict") is not None: strict = self.config.getini("tavern-strict") if isinstance(strict, list): if any(i not in ["body", "headers", "redirect_query_params"] for i in strict): raise exceptions.UnexpectedKeysError( "Invalid values for 'strict' use in config file") elif self.config.getoption("tavern_strict") is not None: strict = self.config.getoption("tavern_strict") else: strict = [] global_cfg["strict"] = strict global_cfg["backends"] = {} backends = ["http", "mqtt"] for b in backends: # similar logic to above - use ini, then cmdline if present ini_opt = self.config.getini("tavern-{}-backend".format(b)) cli_opt = self.config.getoption("tavern_{}_backend".format(b)) in_use = ini_opt if cli_opt and (cli_opt != ini_opt): in_use = cli_opt global_cfg["backends"][b] = in_use load_plugins(global_cfg) # INTERNAL xfail = self.spec.get("_xfail", False) try: verify_tests(self.spec) run_test(self.path, self.spec, global_cfg) except exceptions.BadSchemaError: if xfail == "verify": logger.info("xfailing test while verifying schema") else: raise except exceptions.TavernException: if xfail == "run": logger.info("xfailing test when running") else: raise else: if xfail: logger.error("Expected test to fail") raise exceptions.TestFailError
def test_format_env_keys_missing_failure(self, fulltest, mockargs, includes): """Fails if key is not present""" env_key = "SPECIAL_CI_MAGIC_COMMIT_TAG" fulltest["stages"][0]["request"]["params"] = {"a_format_key": "{tavern.env_vars.%s}" % env_key} with pytest.raises(exceptions.MissingFormatError): run_test("heif", fulltest, includes)
def runtest(self): # Do a deep copy because this sometimes still retains things from previous tests(?) self.global_cfg = copy.deepcopy(load_global_cfg(self.config)) self.global_cfg.setdefault("variables", {}) load_plugins(self.global_cfg) self.global_cfg["tavern_internal"] = {"pytest_hook_caller": self.config.hook} # INTERNAL # NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail # instead. This doesn't differentiate between an error in verification # and an error when running the test though. xfail = self.spec.get("_xfail", False) try: fixture_values = self._load_fixture_values() self.global_cfg["variables"].update(fixture_values) call_hook( self.global_cfg, "pytest_tavern_beta_before_every_test_run", test_dict=self.spec, variables=self.global_cfg["variables"], ) verify_tests(self.spec) for stage in self.spec["stages"]: if not stage.get("name"): if not stage.get("id"): # Should never actually reach here, should be caught at schema check time raise exceptions.BadSchemaError( "One of name or ID must be specified" ) stage["name"] = stage["id"] run_test(self.path, self.spec, self.global_cfg) except exceptions.BadSchemaError: if xfail == "verify": logger.info("xfailing test while verifying schema") else: raise except exceptions.TavernException: if xfail == "run": logger.info("xfailing test when running") else: raise else: if xfail: logger.error("Expected test to fail") raise exceptions.TestFailError( "Expected test to fail at {} stage".format(xfail) )
def test_success(self, fulltest, mockargs, includes): """Successful test """ mock_response = Mock(**mockargs) with patch("tavern.plugins.requests.Session.request", return_value=mock_response) as pmock: run_test("heif", fulltest, includes) assert pmock.called
def runtest(self): global_cfg = self.config.getoption("tavern_global_cfg") or {} if global_cfg: with open(global_cfg, "r") as gfileobj: contents = yaml.load(gfileobj) else: contents = {} run_test(self.path, self.spec, contents)
def test_repeats_twice_and_fails(self, fulltest, mockargs, includes): fulltest["stages"][0]["max_retries"] = 1 mockargs['status_code'] = 400 mock_response = Mock(**mockargs) with patch("tavern._plugins.rest.request.requests.Session.request", return_value=mock_response) as pmock: with pytest.raises(exceptions.TestFailError): run_test("heif", fulltest, includes) assert pmock.call_count == 2
def runtest(self): verify_tests(self.spec) # Load ini first ini_global_cfg_paths = self.config.getini("tavern-global-cfg") or [] # THEN load command line, to allow overwriting of values cmdline_global_cfg_paths = self.config.getoption("tavern_global_cfg") or [] all_paths = ini_global_cfg_paths + cmdline_global_cfg_paths global_cfg = load_global_config(all_paths) run_test(self.path, self.spec, global_cfg)
def test_invalid_body(self, fulltest, mockargs, includes): """Wrong body returned """ mockargs["json"] = lambda: {"wrong": "thing"} mock_response = Mock(**mockargs) with patch("tavern.plugins.requests.Session.request", return_value=mock_response) as pmock: with pytest.raises(exceptions.TestFailError): run_test("heif", fulltest, includes) assert pmock.called
def test_sleep_after(self, fulltest, mockargs, includes): """Should sleep with delay_after in stage spec""" fulltest["stages"][0]["delay_after"] = 2 mock_response = Mock(**mockargs) with patch("tavern.plugins.requests.Session.request", return_value=mock_response) as pmock: with patch("tavern.util.delay.time.sleep") as smock: run_test("heif", fulltest, includes) assert pmock.called smock.assert_called_with(2)
def test_invalid_headers(self, fulltest, mockargs, includes): """Wrong headers """ mockargs["headers"] = {"content-type": "application/x-www-url-formencoded"} mock_response = Mock(**mockargs) with patch("tavern.plugins.requests.Session.request", return_value=mock_response) as pmock: with pytest.raises(exceptions.TestFailError): run_test("heif", fulltest, includes) assert pmock.called
def test_invalid_code(self, fulltest, mockargs, includes): """Wrong status code """ mockargs["status_code"] = 400 mock_response = Mock(**mockargs) with patch("tavern.plugins.requests.Session.request", return_value=mock_response) as pmock: with pytest.raises(exceptions.TestFailError): run_test("heif", fulltest, includes) assert pmock.called
def test_repeats_twice_and_succeeds(self, fulltest, mockargs, includes): fulltest["stages"][0]["max_retries"] = 1 failed_mockargs = deepcopy(mockargs) failed_mockargs["status_code"] = 400 mock_responses = [Mock(**failed_mockargs), Mock(**mockargs)] with patch( "tavern._plugins.rest.request.requests.Session.request", side_effect=mock_responses, ) as pmock: run_test("heif", fulltest, includes) assert pmock.call_count == 2
def test_neither(self, fulltest, mockargs, includes, fake_stages): """ Raises error if not defined """ mock_response = Mock(**mockargs) stage_includes = [ ] newtest = deepcopy(fulltest) newtest["includes"] = stage_includes newtest["stages"].insert(0, {"type": "ref", "id": "my_external_stage"}) with pytest.raises(exceptions.InvalidStageReferenceError): with patch("tavern._plugins.rest.request.requests.Session.request", return_value=mock_response) as pmock: run_test("heif", newtest, includes)
def test_format_env_keys(self, fulltest, mockargs, includes): """Should be able to get variables from the environment and use them in test responses""" env_key = "SPECIAL_CI_MAGIC_COMMIT_TAG" fulltest["stages"][0]["request"]["params"] = {"a_format_key": "{tavern.env_vars.%s}" % env_key} mock_response = Mock(**mockargs) with patch("tavern.plugins.requests.Session.request", return_value=mock_response) as pmock: with patch.dict(os.environ, {env_key: "bleuihg"}): run_test("heif", fulltest, includes) assert pmock.called
def runtest(self): self.global_cfg = load_global_cfg(self.config) self.global_cfg.setdefault("variables", {}) load_plugins(self.global_cfg) self.global_cfg["tavern_internal"] = { "pytest_hook_caller": self.config.hook } # INTERNAL # NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail # instead. This doesn't differentiate between an error in verification # and an error when running the test though. xfail = self.spec.get("_xfail", False) try: fixture_values = self._load_fixture_values() self.global_cfg["variables"].update(fixture_values) call_hook( self.global_cfg, "pytest_tavern_beta_before_every_test_run", test_dict=self.spec, variables=self.global_cfg["variables"], ) verify_tests(self.spec) run_test(self.path, self.spec, self.global_cfg) except exceptions.BadSchemaError: if xfail == "verify": logger.info("xfailing test while verifying schema") else: raise except exceptions.TavernException: if xfail == "run": logger.info("xfailing test when running") else: raise else: if xfail: logger.error("Expected test to fail") raise exceptions.TestFailError( "Expected test to fail at {} stage".format(xfail))
def test_included_stage(self, fulltest, mockargs, includes, fake_stages): """Load stage from includes""" mock_response = Mock(**mockargs) stage_includes = [{"stages": fake_stages}] newtest = deepcopy(fulltest) newtest["includes"] = stage_includes newtest["stages"].insert(0, {"type": "ref", "id": "my_external_stage"}) with patch( "tavern._plugins.rest.request.requests.Session.request", return_value=mock_response, ) as pmock: run_test("heif", newtest, includes) self.check_mocks_called(pmock)
def test_both_stages(self, fulltest, mockargs, includes, fake_stages): """Load stage defined in both - raise a warning for now""" mock_response = Mock(**mockargs) stage_includes = [{"stages": fake_stages}] newtest = deepcopy(fulltest) newtest["includes"] = stage_includes newtest["stages"].insert(0, {"type": "ref", "id": "my_external_stage"}) includes["stages"] = fake_stages with pytest.raises(exceptions.DuplicateStageDefinitionError): with patch( "tavern._plugins.rest.request.requests.Session.request", return_value=mock_response, ) as pmock: run_test("heif", newtest, includes) assert not pmock.called
def test_format_request_var_value(self, fulltest, includes): """Same as above but with plain keys""" stage = fulltest["stages"][0] sent = stage["mqtt_publish"]["payload"] mockargs = { "spec": paho.MQTTMessage, "payload": sent.encode("utf8"), "topic": stage["mqtt_publish"]["topic"], } mock_response = Mock(**mockargs) fake_client = MagicMock( spec=MQTTClient, message_received=Mock(return_value=mock_response)) with patch("tavern._plugins.mqtt.client.paho.Client", fake_client), patch("tavern.core.get_extra_sessions", return_value={"paho-mqtt": fake_client}) as pmock: run_test("heif", fulltest, includes) assert pmock.called
def test_both_stages(self, fulltest, mockargs, includes, fake_stages): """ Load stage defined in both - raise a warning for now """ mock_response = Mock(**mockargs) stage_includes = [ { "stages": fake_stages } ] newtest = deepcopy(fulltest) newtest["includes"] = stage_includes newtest["stages"].insert(0, {"type": "ref", "id": "my_external_stage"}) includes["stages"] = fake_stages with pytest.warns(FutureWarning): with patch("tavern._plugins.rest.request.requests.Session.request", return_value=mock_response) as pmock: run_test("heif", newtest, includes) self.check_mocks_called(pmock)
def test_external_stage(self, fulltest, mockargs, includes): """ Successfully load and run stage ref from the includes """ mock_response = Mock(**mockargs) newtest = deepcopy(fulltest) newtest["includes"] = [includes] newtest["stages"].insert(0, {"type": "ref", "id": "my_external_stage"}) with patch("tavern._plugins.rest.request.requests.Session.request", return_value=mock_response) as pmock: run_test("heif", newtest, includes) assert pmock.called # We expect 2 calls, first to bing (external stage), # then google (part of fulltest) assert len(pmock.call_args_list) == 2 args, kwargs = pmock.call_args_list[0] assert kwargs["url"] == "http://www.bing.com" args, kwargs = pmock.call_args_list[1] assert kwargs["url"] == "http://www.google.com"
def runtest(self): global_cfg = self.config.getoption("tavern_global_cfg") or {} run_test(self.path, self.spec, global_cfg)