def walk_url(schema_root, xml_root, pieces, ns_map, depth=0): if len(pieces) == 0: # base case return current_piece = pieces[0] schema_node = find_child_by_name(schema_root, current_piece) parent_is_keyed_list = schema_root.is_listy() and not schema_root.is_leafy() if schema_node is None and parent_is_keyed_list: # the piece is intended to be a key actual_keys = get_key_names(schema_root) possible_keys = pieces[0] url_keys = possible_keys.split(",") for k,v in zip(actual_keys, url_keys): xml_node = et.SubElement(xml_root, k) xml_node.text = v return walk_url(schema_root, xml_root, pieces[1:], ns_map, depth+1) elif schema_node is None: # the piece isn't a key, and there is no node by that name raise ValueError("(%s) has no child (%s)" % (schema_root.get_name(), current_piece)) # make sure the prefix -> namespace mapping is in our namespace dict schema_prefix = schema_node.get_prefix() schema_ns = schema_node.get_ns() if schema_prefix not in ns_map: ns_map[schema_prefix] = schema_ns schema_name = schema_node.get_name() xml_node = et.SubElement(xml_root, "{%s}%s" % (schema_ns, schema_name), nsmap=ns_map) return walk_url(schema_node, xml_node, pieces[1:], ns_map, depth+1)
def _convert_pieces(self, root_node, operation, body, pieces, queries, is_operation, depth=0): """Used to convert everything in the URL except the last element, and queries root_node -- the YangNode that is the parent of possible elements to translate pieces -- the remaining pieces of the URL to translate """ xml = list() if pieces: current_name = pieces.pop(0) current_node = find_child_by_name(root_node, current_name) else: current_name = '' current_node = root_node # PUTs and non-rpc POSTs contain the target element in the body so it need not be generated from the url if len(pieces) <= 0: # top level GETs are pruned for only the keys if operation == "GET": return self._convert_target(root_node, operation, body, queries, is_operation, current_name, depth < 1) else: return self._convert_target(root_node, operation, body, queries, is_operation, current_name, False) current_namespace = current_node.get_ns() current_name_wo_prefix = current_name[(current_name.find(':') + 1):] if current_node != root_node: xml.append('<%s xmlns="%s">' % (current_name_wo_prefix, current_namespace)) if current_node.is_listy() and len(pieces) > 0: possible_keys = pieces.pop(0) url_keys =possible_keys.split(",") actual_keys = get_key_names(current_node) if len(url_keys) != len(actual_keys): raise ValueError("url keys don't match actual keys") if operation == "DELETE" and len(pieces) == 0: xml[-1] = xml[-1].replace('>', ' xc:operation="delete">', 1) for key_name, key_string in zip(actual_keys, url_keys): while "%" in key_string: # names can be doubly encoded # ex: trafgen%252F2%252F1 gets unquoted into trafgen%2F2%2F1 which # finally gets unquoted into trafgen/2/1 key_string = urllib.parse.unquote(key_string) xml.append('<%s>%s</%s>' % (key_name, key_string, key_name)) if len(pieces) == 0 and operation in ["PUT", "POST"]: return self._convert_target(root_node, operation, body, queries, is_operation, root_node.get_name(), False) if len(pieces) > 0: xml.append(self._convert_pieces(current_node, operation, body, pieces, queries, is_operation, depth+1)) xml.append('</%s>' % (current_name_wo_prefix)) return ''.join(xml)
def _build_xpath(url_pieces, schema_node): if not url_pieces: return "" if len(url_pieces) <= 0: return "" current_piece = url_pieces.pop(0) if current_piece == "": return "" child_schema_node = find_child_by_name(schema_node, current_piece) piece_is_key = False xpath_snippet = "" prefix = child_schema_node.get_prefix() if child_schema_node.is_listy() and (len(url_pieces) > 0 and url_pieces[0] != ""): possible_keys = url_pieces.pop(0) url_keys = possible_keys.split(",") actual_keys = get_key_names(child_schema_node) first = True predicate = "[" for key_name, key_string in zip(actual_keys, url_keys): while "%" in key_string: # names can be doubly encoded # ex: trafgen%252F2%252F1 gets unquoted into trafgen%2F2%2F1 which # finally gets unquoted into trafgen/2/1 key_string = urllib.parse.unquote(key_string) if not first: predicate = predicate + "][%s = '%s'" % ( key_name, tornado.escape.xhtml_escape(key_string)) else: predicate = predicate + "%s = '%s'" % ( key_name, tornado.escape.xhtml_escape(key_string)) first = False predicate = predicate + "]" xpath_snippet = "%s:%s%s" % (prefix, child_schema_node.get_name(), predicate) else: xpath_snippet = "%s:%s" % (prefix, child_schema_node.get_name()) if len(url_pieces) == 0: return "/" + xpath_snippet return "/" + xpath_snippet + _build_xpath(url_pieces, child_schema_node)
def _build_xpath(url_pieces, schema_node): if not url_pieces: return "" if len(url_pieces) <= 0 : return "" current_piece = url_pieces.pop(0) if current_piece == "": return "" child_schema_node = find_child_by_name(schema_node, current_piece) piece_is_key = False xpath_snippet = "" prefix = child_schema_node.get_prefix() if child_schema_node.is_listy() and (len(url_pieces) > 0 and url_pieces[0] != ""): possible_keys = url_pieces.pop(0) url_keys = possible_keys.split(",") actual_keys = get_key_names(child_schema_node) first = True predicate = "[" for key_name, key_string in zip(actual_keys, url_keys): while "%" in key_string: # names can be doubly encoded # ex: trafgen%252F2%252F1 gets unquoted into trafgen%2F2%2F1 which # finally gets unquoted into trafgen/2/1 key_string = urllib.parse.unquote(key_string) if not first: predicate = predicate + "][%s = '%s'" % (key_name, tornado.escape.xhtml_escape(key_string)) else: predicate = predicate + "%s = '%s'" % (key_name, tornado.escape.xhtml_escape(key_string)) first = False predicate = predicate + "]" xpath_snippet = "%s:%s%s"%(prefix, child_schema_node.get_name(), predicate) else: xpath_snippet = "%s:%s"%(prefix,child_schema_node.get_name()) if len(url_pieces) == 0: return "/" + xpath_snippet return "/" + xpath_snippet + _build_xpath(url_pieces, child_schema_node)
def __getitem__(self, keys): """ Walk the schema tree to the list with the given keys. Throws a ValueErro if the wrong number of keys is used. """ actual_keys = get_key_names(self._current_descriptor) if isinstance(keys, tuple): kv_pairs = zip(actual_keys, keys) else: if len(actual_keys) > 1: raise ValueError("expected %d keys, received 1 key" % len(actual_keys)) kv_pairs = [(actual_keys[0], keys)] value = self._getitem_hook(kv_pairs, keys) if value is not None: return value else: return self
def walk_url(schema_root, xml_root, pieces, ns_map, depth=0): if len(pieces) == 0: # base case return current_piece = pieces[0] schema_node = find_child_by_name(schema_root, current_piece) parent_is_keyed_list = schema_root.is_listy( ) and not schema_root.is_leafy() if schema_node is None and parent_is_keyed_list: # the piece is intended to be a key actual_keys = get_key_names(schema_root) possible_keys = pieces[0] url_keys = possible_keys.split(",") for k, v in zip(actual_keys, url_keys): xml_node = et.SubElement(xml_root, k) xml_node.text = v return walk_url(schema_root, xml_root, pieces[1:], ns_map, depth + 1) elif schema_node is None: # the piece isn't a key, and there is no node by that name raise ValueError("(%s) has no child (%s)" % (schema_root.get_name(), current_piece)) # make sure the prefix -> namespace mapping is in our namespace dict schema_prefix = schema_node.get_prefix() schema_ns = schema_node.get_ns() if schema_prefix not in ns_map: ns_map[schema_prefix] = schema_ns schema_name = schema_node.get_name() xml_node = et.SubElement(xml_root, "{%s}%s" % (schema_ns, schema_name), nsmap=ns_map) return walk_url(schema_node, xml_node, pieces[1:], ns_map, depth + 1)
def _convert_pieces(self, root_node, operation, body, pieces, queries, is_operation, depth=0): """Used to convert everything in the URL except the last element, and queries root_node -- the YangNode that is the parent of possible elements to translate pieces -- the remaining pieces of the URL to translate """ xml = list() current_name = pieces.pop(0) current_node = find_child_by_name(root_node, current_name) # PUTs and non-rpc POSTs contain the target element in the body so it need not be generated from the url if len(pieces) <= 0: # top level GETs are pruned for only the keys if operation == "GET": return self._convert_target(root_node, operation, body, queries, is_operation, current_name, depth < 1) else: return self._convert_target(root_node, operation, body, queries, is_operation, current_name, False) current_namespace = current_node.get_ns() current_name_wo_prefix = current_name[(current_name.find(':') + 1):] xml.append('<%s xmlns="%s">' % (current_name_wo_prefix, current_namespace)) if current_node.is_listy() and len(pieces) > 0: possible_keys = pieces.pop(0) url_keys = possible_keys.split(",") actual_keys = get_key_names(current_node) if len(url_keys) != len(actual_keys): raise ValueError("url keys don't match actual keys") if operation == "DELETE" and len(pieces) == 0: xml[-1] = xml[-1].replace('>', ' xc:operation="delete">', 1) for key_name, key_string in zip(actual_keys, url_keys): while "%" in key_string: # names can be doubly encoded # ex: trafgen%252F2%252F1 gets unquoted into trafgen%2F2%2F1 which # finally gets unquoted into trafgen/2/1 key_string = urllib.parse.unquote(key_string) xml.append('<%s>%s</%s>' % (key_name, key_string, key_name)) if len(pieces) == 0 and operation in ["PUT", "POST"]: return self._convert_target(root_node, operation, body, queries, is_operation, root_node.get_name(), False) if len(pieces) > 0: xml.append( self._convert_pieces(current_node, operation, body, pieces, queries, is_operation, depth + 1)) xml.append('</%s>' % (current_name_wo_prefix)) return ''.join(xml)