def _load_fixture_values(self): fixture_markers = self.iter_markers("usefixtures") values = {} for m in fixture_markers: if isinstance(m.args, (list, tuple)): mark_values = {f: self.funcargs[f] for f in m.args} elif isinstance(m.args, str): # Not sure if this can happen if validation is working # correctly, but it appears to be slightly broken so putting # this check here just in case mark_values = {m.args: self.funcargs[m.args]} else: raise exceptions.BadSchemaError(( "Can't handle 'usefixtures' spec of '{}'." " There appears to be a bug in pykwalify so verification of" " 'usefixtures' is broken - it should be a list of fixture" " names").format(m.args)) if any(mv in values for mv in mark_values): logger.warning("Overriding value for %s", mark_values) values.update(mark_values) return values
def maybe_load_ext(v): key, value = v if is_ext_function(value): # If it is an ext function, load the new (or supplemental) value[s] ext = value.pop("$ext") f = get_wrapped_create_function(ext) new_value = f() if len(value) == 0: # Use only this new value return key, new_value elif isinstance(new_value, dict): # Merge with some existing data. At this point 'value' is known to be a dict. return key, deep_dict_merge(value, f()) else: # For example, if it's defined like # # - testkey: testval # $ext: # function: mod:func # # and 'mod:func' returns a string, it's impossible to 'merge' with the existing data. logger.error("Values still in 'val': %s", value) raise exceptions.BadSchemaError( "There were extra key/value pairs in the 'val' for this parametrize mark, but the ext function {} returned '{}' (of type {}) that was not a dictionary. It is impossible to merge these values." .format(ext, new_value, type(new_value))) return key, value
def error_on_empty_scalar(self, mark): # pylint: disable=unused-argument location = "{mark.name:s}:{mark.line:d} - column {mark.column:d}".format( mark=mark) error = "Error at {} - cannot define an empty value in test - either give it a value or explicitly set it to None".format( location) raise exceptions.BadSchemaError(error)
def _read_filespec(filespec): """ Get configuration for uploading file Can either be just a path to a file or a 'long' format including content type/encoding Args: filespec: Either a string with the path to a file or a dictionary with file_path and possible content_type and/or content_encoding Returns: tuple: (file path, content type, content encoding) """ if isinstance(filespec, str): return filespec, None, None elif isinstance(filespec, dict): return ( filespec.get("file_path"), filespec.get("content_type"), filespec.get("content_encoding"), ) else: # Could remove, also done in schema check raise exceptions.BadSchemaError( "File specification must be a path or a dictionary" )
def get_wrapped_response_function(ext): """Wraps a ext function with arguments given in the test file This is similar to functools.wrap, but this makes sure that 'response' is always the first argument passed to the function Args: ext (dict): $ext function dict with function, extra_args, and extra_kwargs to pass Returns: function: Wrapped function """ args = ext.get("extra_args") or () kwargs = ext.get("extra_kwargs") or {} try: func = import_ext_function(ext["function"]) except KeyError: raise exceptions.BadSchemaError( "No function specified in external function block") @functools.wraps(func) def inner(response): return func(response, *args, **kwargs) inner.func = func return inner
def validate_comparision(each_comparision): try: assert set( each_comparision.keys()) == {'jmespath', 'operator', 'expected'} except KeyError as e: raise_from( exceptions.BadSchemaError( "Invalid keys given to JMES validation function"), e) jmespath, _operator, expected = each_comparision[ 'jmespath'], each_comparision['operator'], each_comparision['expected'] try: COMPARATORS[_operator] except KeyError as e: raise_from(exceptions.BadSchemaError("Invalid comparator given"), e) return jmespath, _operator, expected
def runtest(self): # Do a deep copy because this sometimes still retains things from previous tests(?) self.global_cfg = copy.deepcopy(load_global_cfg(self.config)) self.global_cfg.setdefault("variables", {}) load_plugins(self.global_cfg) self.global_cfg["tavern_internal"] = {"pytest_hook_caller": self.config.hook} # INTERNAL # NOTE - now that we can 'mark' tests, we could use pytest.mark.xfail # instead. This doesn't differentiate between an error in verification # and an error when running the test though. xfail = self.spec.get("_xfail", False) try: fixture_values = self._load_fixture_values() self.global_cfg["variables"].update(fixture_values) call_hook( self.global_cfg, "pytest_tavern_beta_before_every_test_run", test_dict=self.spec, variables=self.global_cfg["variables"], ) verify_tests(self.spec) for stage in self.spec["stages"]: if not stage.get("name"): if not stage.get("id"): # Should never actually reach here, should be caught at schema check time raise exceptions.BadSchemaError( "One of name or ID must be specified" ) stage["name"] = stage["id"] run_test(self.path, self.spec, self.global_cfg) except exceptions.BadSchemaError: if xfail == "verify": logger.info("xfailing test while verifying schema") else: raise except exceptions.TavernException: if xfail == "run": logger.info("xfailing test when running") else: raise else: if xfail: logger.error("Expected test to fail") raise exceptions.TestFailError( "Expected test to fail at {} stage".format(xfail) )
def check_ext_functions(verify_block): if isinstance(verify_block, list): for vf in verify_block: self.validate_functions.append( get_wrapped_response_function(vf)) elif isinstance(verify_block, dict): self.validate_functions.append( get_wrapped_response_function(verify_block)) elif verify_block is not None: raise exceptions.BadSchemaError( "Badly formatted 'verify_response_with' block")
def validate_comparison(each_comparison): try: assert set( each_comparison.keys()) == {"jmespath", "operator", "expected"} except KeyError as e: raise exceptions.BadSchemaError( "Invalid keys given to JMES validation function") from e jmespath, _operator, expected = ( each_comparison["jmespath"], each_comparison["operator"], each_comparison["expected"], ) try: COMPARATORS[_operator] except KeyError as e: raise exceptions.BadSchemaError("Invalid comparator given") from e return jmespath, _operator, expected
def get_parametrized_items(self, test_spec, parametrize_marks, pytest_marks): """Return new items with new format values available based on the mark This will change the name from something like 'test a thing' to 'test a thing[param1]', 'test a thing[param2]', etc. This probably messes with -k Note: This still has the pytest.mark.parametrize mark on it, though it doesn't appear to do anything. This could be removed? """ # These should be in the same order as specified in the input file vals = [i["parametrize"]["vals"] for i in parametrize_marks] try: combined = itertools.product(*vals) except TypeError as e: raise exceptions.BadSchemaError( "Invalid match between numbers of keys and number of values in parametrize mark" ) from e keys = [i["parametrize"]["key"] for i in parametrize_marks] for vals_combination in combined: variables, inner_formatted = _generate_parametrized_test_items( keys, vals_combination) # Change the name spec_new = copy.deepcopy(test_spec) spec_new["test_name"] = test_spec["test_name"] + "[{}]".format( inner_formatted) logger.debug("New test name: %s", spec_new["test_name"]) # Make this new thing available for formatting spec_new.setdefault("includes", []).append({ "name": "parametrized[{}]".format(inner_formatted), "description": "autogenerated by Tavern", "variables": variables, }) # And create the new item item_new = YamlItem.yamlitem_from_parent(spec_new["test_name"], self, spec_new, self.fspath) item_new.add_markers(pytest_marks) yield item_new
def validate_pykwalify(response, schema): """Make sure the response matches a given schema Args: response (Response): reqeusts.Response object schema (dict): Schema for response """ try: to_verify = response.json() except TypeError as e: raise_from(exceptions.BadSchemaError("Tried to match a pykwalify schema against a non-json response"), e) else: verify_generic(to_verify, schema)
def validate_regex(response, expression, *, header=None, in_jmespath=None): """Make sure the response matches a regex expression Args: response (requests.Response): requests.Response object expression (str): Regex expression to use header (str): Match against a particular header instead of the body in_jmespath (str): if present, jmespath to access before trying to match Returns: dict: dictionary of regex: boxed name capture groups """ if header and in_jmespath: raise exceptions.BadSchemaError( "Can only specify one of header or jmespath") if header: content = response.headers[header] else: content = response.text if in_jmespath: if not response.headers.get("content-type", "").startswith("application/json"): logger.warning( "Trying to use jmespath match but content type is not application/json" ) try: decoded = json.loads(content) except json.JSONDecodeError as e: raise exceptions.RegexAccessError( "unable to decode json for regex match") from e content = recurse_access_key(decoded, in_jmespath) if not isinstance(content, str): raise exceptions.RegexAccessError( "Successfully accessed {} from response, but it was a {} and not a string" .format(in_jmespath, type(content))) logger.debug("Matching %s with %s", content, expression) match = re.search(expression, content) if match is None: raise exceptions.RegexAccessError("No match for regex") return {"regex": Box(match.groupdict())}
def get_publish_args(rspec, test_block_config): """Format mqtt request args Todo: Anything else to do here? """ fspec = format_keys(rspec, test_block_config["variables"]) if "json" in rspec: if "payload" in rspec: raise exceptions.BadSchemaError( "Can only specify one of 'payload' or 'json' in MQTT request") fspec["payload"] = json.dumps(fspec.pop("json")) return fspec
def _get_payload_vals(self): # TODO move this check to initialisation/schema checking if "json" in self.expected: if "payload" in self.expected: raise exceptions.BadSchemaError( "Can only specify one of 'payload' or 'json' in MQTT response" ) payload = self.expected["json"] json_payload = True elif "payload" in self.expected: payload = self.expected["payload"] json_payload = False else: payload = None json_payload = False return payload, json_payload
def __init__(self, session, name, expected, test_block_config): # pylint: disable=unused-argument defaults = {"status_code": 200} super().__init__(name, deep_dict_merge(defaults, expected), test_block_config) self.status_code = None def check_code(code): if int(code) not in _codes: logger.warning("Unexpected status code '%s'", code) in_file = self.expected["status_code"] try: if isinstance(in_file, list): for code_ in in_file: check_code(code_) else: check_code(in_file) except TypeError as e: raise exceptions.BadSchemaError("Invalid code") from e
def _get_payload_vals(self): # TODO move this check to initialisation/schema checking if "json" in self.expected: if "payload" in self.expected: raise exceptions.BadSchemaError( "Can only specify one of 'payload' or 'json' in MQTT response" ) payload = self.expected["json"] json_payload = True if payload.pop("$ext", None): logger.warning( "$ext function found in block %s - this has been moved to verify_response_with block - see documentation", "json", ) elif "payload" in self.expected: payload = self.expected["payload"] json_payload = False else: payload = None json_payload = False return payload, json_payload
def _format_test_marks(original_marks, fmt_vars, test_name): """Given the 'raw' marks from the test and any available format variables, generate new marks for this test Args: original_marks (list): Raw string from test - should correspond to either a pytest builtin mark or a custom user mark fmt_vars (dict): dictionary containing available format variables test_name (str): Name of test (for error logging) Returns: tuple: first element is normal pytest mark objects, second element is all marks which were formatted (no matter their content) Todo: Fix doctests below - failing due to missing pytest markers Example: # >>> _format_test_marks([], {}, 'abc') # ([], []) # >>> _format_test_marks(['tavernmarker'], {}, 'abc') # (['tavernmarker'], []) # >>> _format_test_marks(['{formatme}'], {'formatme': 'tavernmarker'}, 'abc') # (['tavernmarker'], []) # >>> _format_test_marks([{'skipif': '{skiptest}'}], {'skiptest': true}, 'abc') # (['tavernmarker'], []) """ pytest_marks = [] formatted_marks = [] for m in original_marks: if isinstance(m, str): # a normal mark m = _format_without_inner(m, fmt_vars) pytest_marks.append(getattr(pytest.mark, m)) elif isinstance(m, dict): # skipif or parametrize (for now) for markname, extra_arg in m.items(): # NOTE # cannot do 'skipif' and rely on a parametrized # argument. try: extra_arg = _format_without_inner(extra_arg, fmt_vars) except exceptions.MissingFormatError as e: msg = "Tried to use mark '{}' (with value '{}') in test '{}' but one or more format variables was not in any configuration file used by the test".format( markname, extra_arg, test_name ) # NOTE # we could continue and let it fail in the test, but # this gives a better indication of what actually # happened (even if it is difficult to test) raise exceptions.MissingFormatError(msg) from e else: pytest_marks.append(getattr(pytest.mark, markname)(extra_arg)) formatted_marks.append({markname: extra_arg}) else: raise exceptions.BadSchemaError("Unexpected mark type '{}'".format(type(m))) return pytest_marks, formatted_marks
def verify(self, response): """Ensure mqtt message has arrived Args: response: not used """ self.response = response topic = self.expected["topic"] timeout = self.expected.get("timeout", 1) # TODO move this check to initialisation/schema checking if "json" in self.expected: if "payload" in self.expected: raise exceptions.BadSchemaError( "Can only specify one of 'payload' or 'json' in MQTT response" ) payload = self.expected["json"] json_payload = True else: payload = self.expected["payload"] json_payload = False time_spent = 0 while time_spent < timeout: t0 = time.time() msg = self._client.message_received(timeout - time_spent) if not msg: # timed out break self.received_messages.append(msg) msg.payload = msg.payload.decode("utf8") if json_payload: try: msg.payload = json.loads(msg.payload) except LoadException: logger.warning("Expected a json payload but got '%s'", msg.payload) msg = None continue if msg.payload != payload: logger.warning( "Got unexpected payload on topic '%s': '%s' (expected '%s')", msg.topic, msg.payload, payload) elif msg.topic != topic: logger.warning( "Got unexpected message in '%s' with payload '%s'", msg.topic, msg.payload) else: logger.info("Got expected message in '%s' with payload '%s'", msg.topic, msg.payload) break msg = None time_spent += time.time() - t0 if not msg: self._adderr( "Expected '%s' on topic '%s' but no such message received", payload, topic) if self.errors: raise exceptions.TestFailError("Test '{:s}' failed:\n{:s}".format( self.name, self._str_errors())) return {}
def get_request_args(rspec, test_block_config): """Format the test spec given values inthe global config Todo: Add similar functionality to validate/save $ext functions so input can be generated from a function Args: rspec (dict): Test spec test_block_config (dict): Test block config Returns: dict: Formatted test spec Raises: BadSchemaError: Tried to pass a body in a GET request """ # pylint: disable=too-many-locals,too-many-statements request_args = {} # Ones that are required and are enforced to be present by the schema required_in_file = ["method", "url"] optional_with_default = {"verify": True, "stream": False} if "method" not in rspec: logger.debug("Using default GET method") rspec["method"] = "GET" content_keys = ["data", "json", "files", "file_body"] in_request = [c for c in content_keys if c in rspec] if len(in_request) > 1: # Explicitly raise an error here # From requests docs: # Note, the json parameter is ignored if either data or files is passed. # However, we allow the data + files case, as requests handles it correctly if set(in_request) != {"data", "files"}: raise exceptions.BadSchemaError( "Can only specify one type of request data in HTTP request (tried to " "send {})".format(" and ".join(in_request)) ) headers = rspec.get("headers", {}) has_content_header = "content-type" in [h.lower() for h in headers.keys()] if "files" in rspec: if has_content_header: logger.warning( "Tried to specify a content-type header while sending a file - this will be ignored" ) rspec["headers"] = { i: j for i, j in headers.items() if i.lower() != "content-type" } fspec = format_keys(rspec, test_block_config["variables"]) send_in_body = fspec.get("file_body") if send_in_body: request_args["file_body"] = send_in_body def add_request_args(keys, optional): for key in keys: try: request_args[key] = fspec[key] except KeyError: if optional or (key in request_args): continue # This should never happen raise add_request_args(required_in_file, False) add_request_args(RestRequest.optional_in_file, True) if "auth" in fspec: request_args["auth"] = tuple(fspec["auth"]) if "cert" in fspec: if isinstance(fspec["cert"], list): request_args["cert"] = tuple(fspec["cert"]) if "timeout" in fspec: # Needs to be a tuple, it being a list doesn't work if isinstance(fspec["timeout"], list): request_args["timeout"] = tuple(fspec["timeout"]) # If there's any nested json in parameters, urlencode it # if you pass nested json to 'params' then requests silently fails and just # passes the 'top level' key, ignoring all the nested json. I don't think # there's a standard way to do this, but urlencoding it seems sensible # eg https://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter # > ...represented in an OAuth 2.0 request as UTF-8 encoded JSON (which ends # > up being form-urlencoded when passed as an OAuth parameter) for key, value in request_args.get("params", {}).items(): if isinstance(value, dict): request_args["params"][key] = quote_plus(json.dumps(value)) for key, val in optional_with_default.items(): request_args[key] = fspec.get(key, val) # TODO # requests takes all of these - we need to parse the input to get them # "cookies", # These verbs _can_ send a body but the body _should_ be ignored according # to the specs - some info here: # https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods if request_args["method"] in ["GET", "HEAD", "OPTIONS"]: if any(i in request_args for i in ["json", "data"]): warnings.warn( "You are trying to send a body with a HTTP verb that has no semantic use for it", RuntimeWarning, ) return request_args
def get_request_args(rspec, test_block_config): """Format the test spec given values inthe global config Todo: Add similar functionality to validate/save $ext functions so input can be generated from a function Args: rspec (dict): Test spec test_block_config (dict): Test block config Returns: dict: Formatted test spec Raises: BadSchemaError: Tried to pass a body in a GET request """ # pylint: disable=too-many-locals request_args = {} # Ones that are required and are enforced to be present by the schema required_in_file = [ "method", "url", ] optional_in_file = [ "json", "data", "params", "headers", "files", "timeout", # Ideally this would just be passed through but requests seems to error # if we pass a list instead of a tuple, so we have to manually convert # it further down # "auth", ] optional_with_default = { "verify": True, "stream": False } if "method" not in rspec: logger.debug("Using default GET method") rspec["method"] = "GET" content_keys = [ "data", "json", ] headers = rspec.get("headers", {}) has_content_header = "content-type" in [h.lower() for h in headers.keys()] if "files" in rspec: if any(ckey in rspec for ckey in content_keys): raise exceptions.BadSchemaError("Tried to send non-file content alongside a file") if has_content_header: logger.warning("Tried to specify a content-type header while sending a file - this will be ignored") rspec["headers"] = {i: j for i, j in headers.items() if i.lower() != "content-type"} elif headers: # This should only be hit if we aren't sending a file if not has_content_header: rspec["headers"]["content-type"] = "application/json" fspec = format_keys(rspec, test_block_config["variables"]) def add_request_args(keys, optional): for key in keys: try: request_args[key] = fspec[key] except KeyError: if optional or (key in request_args): continue # This should never happen raise add_request_args(required_in_file, False) add_request_args(optional_in_file, True) if "auth" in fspec: request_args["auth"] = tuple(fspec["auth"]) if "timeout" in fspec: # Needs to be a tuple, it being a list doesn't work if isinstance(fspec["timeout"], list): request_args["timeout"] = tuple(fspec["timeout"]) for key in optional_in_file: try: func = get_wrapped_create_function(request_args[key].pop("$ext")) except (KeyError, TypeError, AttributeError): pass else: request_args[key] = func() # If there's any nested json in parameters, urlencode it # if you pass nested json to 'params' then requests silently fails and just # passes the 'top level' key, ignoring all the nested json. I don't think # there's a standard way to do this, but urlencoding it seems sensible # eg https://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter # > ...represented in an OAuth 2.0 request as UTF-8 encoded JSON (which ends # > up being form-urlencoded when passed as an OAuth parameter) for key, value in request_args.get("params", {}).items(): if isinstance(value, dict): request_args["params"][key] = quote_plus(json.dumps(value)) for key, val in optional_with_default.items(): request_args[key] = fspec.get(key, val) # TODO # requests takes all of these - we need to parse the input to get them # "cookies", # These verbs _can_ send a body but the body _should_ be ignored according # to the specs - some info here: # https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods if request_args["method"] in ["GET", "HEAD", "OPTIONS"]: if any(i in request_args for i in ["json", "data"]): warnings.warn("You are trying to send a body with a HTTP verb that has no semantic use for it", RuntimeWarning) return request_args