def __init__(self, form): if not etree.iselement(form): raise TypeError, "Form() requires a <form> element, not type '%s'" % type(form).__name__ if form.tag != 'form': raise ValueError, "Form() requires a <form> element, not <%s>" % form.tag OrderedDict.__init__(self) self.action = form.get('action') self.method = form.get('method', 'get').lower() self.enctype = form.get('enctype', MIME_URLFORM) self.charset = [val.strip().lower() for val in form.get('accept-charset', '').split(',') if val] self.accept = [val.strip().lower() for val in form.get('accept', '').split(',') if val] # self._items = OrderedDict() labels = dict((elem.get('for'), gettext(elem)) for elem in form.iterfind('.//label[@for]')) for elem in form.iterfind('.//*[@name]'): name = elem.get('name') type = elem.get('type', 'text').lower() if elem.tag == 'input' else elem.tag if type in ('text', 'password', 'hidden'): self[name] = Form.Item(type, elem.get('value', '')) elif type == 'textarea': self[name] = Form.Item(type, gettext(elem)) elif type in ('radio', 'checkbox'): value = elem.get('value', 'on') label = labels.get(elem.get('id')) or gettext(elem) or elem.tail or value item = self.setdefault(name, Form.OptItem(type)) item.addopt(value, label, hasflag(elem, 'checked')) elif type == 'submit': value = elem.get('value', 'Submit Query') item = self.setdefault(name, Form.OptItem(type)) item.addopt(value, value) elif type == 'select': item = self[name] = Form.OptItem(type, hasflag(elem, 'multiple')) for opt in elem.iterfind('.//option'): text = gettext(opt) item.addopt(opt.get('value', text), text, hasflag(opt, 'selected'))
def _to_datacenter(self, datacenter): datacenter_id = datacenter.find('dataCenterId').text if ET.iselement(datacenter.find('dataCenterName')): datacenter_name = datacenter.find('dataCenterName').text else: datacenter_name = None version = datacenter.find('dataCenterVersion').text if ET.iselement(datacenter.find('provisioningState')): provisioning_state = datacenter.find('provisioningState').text else: provisioning_state = None if ET.iselement(datacenter.find('location')): location = datacenter.find('location').text else: location = None provisioning_state = self.PROVISIONING_STATE.get(provisioning_state, NodeState.UNKNOWN) return Datacenter(id=datacenter_id, name=datacenter_name, version=version, driver=self.connection.driver, extra={'provisioning_state': provisioning_state, 'location': location})
def delete_labels(concepts, lab_elem): """Accepts a dictionary of concepts, and a label linkbase element. The dictionary of concepts contains concepts as keys, and a list of label types to remove as their values. The label types of the concepts are removed from the label linkbase element. """ xlink = "{http://www.w3.org/1999/xlink}" linkbase = "{http://www.xbrl.org/2003/linkbase}" role_attr_xpath = "{0}role".format(xlink) to_attr_xpath = "{0}to".format(xlink) label_attr_xpath = "{0}label".format(xlink) lab_link = lab_elem.find(".//{0}labelLink".format(linkbase)) label_xpath = ".//{0}label[@{1}label='%s']".format(linkbase, xlink) loc_href_xpath = ".//{0}loc[@{1}href='%s']".format(linkbase, xlink) label_arc_xpath = ".//{0}labelArc[@{1}from='%s']".format(linkbase, xlink) removed_labels = {} for concept, label_types in concepts.items(): for loc_to_delete in lab_link.iterfind(loc_href_xpath % concept): label_ref = label_arc_xpath % loc_to_delete.get(label_attr_xpath) for label_arc_to_delete in lab_link.iterfind(label_ref): to_label = label_arc_to_delete.get(to_attr_xpath) for lab_to_delete in lab_link.iterfind(label_xpath % to_label): lab_role = lab_to_delete.get(role_attr_xpath) if label_types == "All" or lab_role in label_types: removed_labels.setdefault( concept, dict() )[lab_role] = lab_to_delete.text lab_link.remove(lab_to_delete) if not etree.iselement(lab_link.find(label_xpath % to_label)): lab_link.remove(label_arc_to_delete) if not etree.iselement(lab_link.find(label_ref)): lab_link.remove(loc_to_delete) return (removed_labels, lab_elem)
def GET(self): # collect query string parameters jurisdiction = web.input().get('jurisdiction', None) exclude = web.input().get('exclude', []) locale = web.input().get('locale', 'en') select = web.input().get('select', None) # ensure exclude is a list if type(exclude) != list: exclude = [exclude] html = chooser_dropdown(jurisdiction, exclude, locale, select) # is there a root <select> tag included if ET.iselement(html): yield "document.write('<select name=\"%s\">');\n" % \ html.attrib['name'] for node in html: yield "document.write('%s');\n" % ET.tostring(node) if ET.iselement(html): yield "document.write('</select>');" return
def DeserializeXML(cls, element): assert element.tag == 'Project' proj = Project() proj.getAttr(element, 'frameworkVersion', required=True) proj.getAttr(element, 'name', required=True) proj.getAttr(element, 'type', required=True) proj.getAttr(element, 'path', required=True) proj.getAttr(element, 'rootNamespace') for child in element: if not etree.iselement(child) or child.tag is etree.Comment: continue if child.tag == 'Configuration': proj.configurations += [Configuration.DeserializeXML(child)] elif child.tag == 'ReferencePath': proj.referencePaths += [child.text] elif child.tag == 'Files': for filedata in child: if not etree.iselement(filedata) or filedata.tag is etree.Comment: continue proj.files += [File.DeserializeXML(filedata)] elif child.tag == 'Reference': proj.references += [Reference.DeserializeXML(child)] else: print('!!! Unknown project tag child {}'.format(child.tag)) return proj
def _get_src_element(self): """ overloaded getter method for the src_element property parses a source XML file and if it fails for whatever reason returns an empty clip XML element Note: also resets the dest_xml_path if it fails so the source XML file is not overwritten unintentionally """ try: try: # parse the document and return a clip file? src_element = ET.parse(self.src_xml_path, parser=OpenClip.parser).getroot() except ET.XMLSyntaxError as e: # not an XML file... raise InvalidOpenClipFile, e except IOError as e: # not a file! raise InvalidOpenClipFile, e if not ET.iselement(src_element) and not src_element.tag == "clip": # XML but NOT a clip file raise InvalidOpenClipFile, "Cannot retrieve root <clip> Element." except InvalidOpenClipFile as e: # ok so we tried but we'll have to init a new etree self.dest_xml_path = None LOG.debug(e) src_element = self.create_etree().getroot() except: self.dest_xml_path = None LOG.debug("Unknown Error parsing file... Does it exist?") LOG.warning("could not parse file. Generating generic clip XML without track or version.") src_element = self.create_etree().getroot() return src_element
def parse(self): if self.lxml == None: return content if not self.lxml.tag in self.permitted_elements: self.lxml.tag = 'div' for elem in self.lxml.xpath('//*'): if not etree.iselement(elem): elem.drop_tree() elif not elem.tag in self.permitted_elements: elem.drop_tag() elif elem.tag == 'a' and 'href' in elem.attrib and elem.attrib['href'].startswith('javascript:'): elem.drop_tag() for key in elem.attrib: if not key in self.permitted_attrs: elem.attrib.pop(key) return etree.tostring( self.lxml, method='xml', encoding='utf-8', xml_declaration=None, pretty_print=False, with_tail=True, standalone=None )
def __init__(self, version=current_version): """ Instantiates a CompatibilityHelper object, which contains various version-specific information that helps maintain backwards-compatibility with older versions of settings files. @param version: Pass the root element of the file you're going to read and its version will be read out. Alternatively, pass a float, as long as that float is a valid version. Default for the parameter is the current version. @raise CompatibilityException: Raised if an invalid version is passed. """ if(etree.iselement(version)): # Retrieve the version from the root element. if self.versionTag in version.attrib: self.version = float(version.get(self.versionTag)); version = self.version; else: raise CompatibilityException(CompatibilityException.ERR_INVALID_VERSION); else: self.version = version; # When more versions are out, if the tags have changed, we'll # go through an if/elif control here. For now, just check for # invalid versions. if version > self.max_version: raise CompatibilityException(CompatibilityException.ERR_FUTURE); elif version < self.first_version: raise CompatibilityException(CompatibilityException.ERR_INVALID_VERSION);
def test_dict_to_etree_list_of_nodes(self): list_of_nodes = { 'node1': [ {'subnode11': None}, ], 'node2': [ {'subnode21': None}, {'subnode22': None}, ], 'node3': [ {'subnode31': None}, ], 'node4': [ 'simple text', None, None ] } helpers.dict_to_etree(list_of_nodes, self.root) mapped_nodes = {node.tag for node in self.root} assert etree.iselement(self.root) assert len(self.root) == 6 assert set(list_of_nodes.keys()) == mapped_nodes assert len(self.root[0]) == 1 assert len(self.root[1]) == 1 assert len(self.root[2]) == 2
def summarizeSingleSegment(eRte): """ Traverses all points in a route segments, stores the found points and calculates the total distance, climb, and descend to the summary list """ if not etree.iselement(eRte): raise commandError("NOELE") NS = getNS(eRte) eRteName = eRte.find(NS % 'name') if eRteName is None: raise commandError("NONAME") rteName = eRteName.text eRtePts = eRte.findall(NS % 'rtept') if eRtePts is None: raise commandError("NOPTS") lLatLons=[getLatLon(eRtePt) for eRtePt in eRtePts] eRtePtNames = (eRtePt.find(NS % 'name') for eRtePt in eRtePts) if eRtePtNames is None: raise commandError("NONAME") lNames =(eName.text for eName in eRtePtNames) eRtePtEles = (eRtePt.find(NS % 'ele') for eRtePt in eRtePts) lEles =(float(eEle.text) for eEle in eRtePtEles if eEle is not None) srcLength=lengthOf(list(lLatLons)) srcClimb,srcDescend=eleProfileOf(list(lEles)) return rteName, len(list(lLatLons)), srcLength, srcClimb, srcDescend
def get_key_chain_value(self, event, value): keys = self.key.split(self.separator) event_key = keys.pop(0) if len(keys) == 0: event.set(event_key, value) else: root_element = event.get(event_key, None) current_element = root_element try: item = keys.pop(0) if not item == current_element.tag: raise while True: if len(keys) > 0: item = keys.pop(0) next_step = current_element.find(item) if not etree.iselement(next_step): next_step = etree.Element(item) current_element.append(next_step) current_element = next_step else: current_element.text = str(value) break except Exception as err: self.logger.error("Unable to follow key chain. Ran into non-xml value of '{value}'".format(value=current_element), event=event) raise return event
def __call__(self, tag, *children, **attrib): get = self._typemap.get if self._namespace is not None and tag[0] != '{': tag = self._namespace + tag elem = self._makeelement(tag, nsmap=self._nsmap) if attrib: get(dict)(elem, attrib) for item in children: if callable(item): item = item() t = get(type(item)) if t is None: if ET.iselement(item): elem.append(item) continue for basetype in type(item).__mro__: # See if the typemap knows of any of this type's bases. t = get(basetype) if t is not None: break else: raise TypeError("bad argument type: %s(%r)" % (type(item).__name__, item)) v = t(elem, item) if v: get(type(v))(elem, v) return elem
def test_dict_to_etree_nodes_with_attr_and_text(self): node1_text = 'Hai, I R node1!' node2_text = 'I do not talk to strangers' nodes_with_attr_and_text = { 'node1': { '@personality': 'brave', '#text': node1_text, }, 'node2': { '@personality': 'shy', '#text': node2_text, '@age': '21', } } helpers.dict_to_etree(nodes_with_attr_and_text, self.root) mapped_nodes = {node.tag for node in self.root} assert etree.iselement(self.root) assert len(self.root) == 2 assert set(nodes_with_attr_and_text.keys()) == mapped_nodes assert self.root[0].text == node1_text assert self.root[1].text == node2_text assert self.root[0].get('personality') == 'brave' assert self.root[1].get('personality') == 'shy' assert self.root[1].get('age') == '21' assert 'age' not in self.root[0].attrib
def __init__(self, xml): u"""Creates the mappet object from either lxml object, a string or a dict. If you pass a dict without root element, one will be created for you with 'root' as tag name. >>> Mappet({'a': {'#text': 'list_elem_1', '@attr1': 'val1'}}).to_str() '<a attr1="val1">list_elem_1</a>' >>> Mappet({'#text': 'list_elem_1', '@attr1': 'val1'}).to_str() '<root attr1="val1">list_elem_1</root>' """ if etree.iselement(xml): self._xml = xml elif isinstance(xml, basestring): self._xml = etree.fromstring(xml) elif isinstance(xml, dict): if len(xml) == 1: root_name = xml.keys()[0] body = xml[root_name] else: root_name = 'root' body = xml self._xml = helpers.dict_to_etree(body, etree.Element(root_name)) else: raise AttributeError('Specified data cannot be used to construct a Mappet object.')
def __call__(self, oeb, opts): import cssutils self.log = oeb.logger self.opts = opts self.oeb = oeb for item in oeb.manifest.items: self.current_item = item if etree.iselement(item.data): rewrite_links(self.current_item.data, self.url_replacer) elif hasattr(item.data, 'cssText'): cssutils.replaceUrls(item.data, self.url_replacer) if self.oeb.guide: for ref in self.oeb.guide.values(): href = urlnormalize(ref.href) href, frag = urldefrag(href) replacement = self.rename_map.get(href, None) if replacement is not None: nhref = replacement if frag: nhref += '#' + frag ref.href = nhref if self.oeb.toc: self.fix_toc_entry(self.oeb.toc)
def replaceContents(el, newElements): clearContents(el) if(etree.iselement(newElements) and newElements.text is not None): appendChild(el, newElements.text) for new in newElements: appendChild(el, new) return el
def __call__(self, etree): """Validate doc using Schematron. Returns true if document is valid, false if not. """ self._clear_error_log() result = self._validator(etree) if self._store_report: self._validation_report = result errors = self._validation_errors(result) if errors: if _etree.iselement(etree): fname = etree.getroottree().docinfo.URL or "<file>" else: fname = etree.docinfo.URL or "<file>" for error in errors: # Does svrl report the line number, anywhere? Don't think so. self._append_log_message( domain=self._domain, type=self._error_type, level=self._level, line=0, message=_etree.tostring(error, encoding="unicode"), filename=fname, ) return False return True
def print_result(result, pretty_print, encoding=None, _is_py3=sys.version_info[0] >= 3): stdout = sys.stdout if not stdout.isatty() and not encoding: encoding = "utf8" if et.iselement(result): result = et.tostring( result, xml_declaration=False, with_tail=False, pretty_print=pretty_print, encoding=encoding ) if not pretty_print: # pretty printing appends newline, otherwise we do it if isinstance(result, unicode): result += "\n" else: result += "\n".encode("ascii") elif isinstance(result, basestring): result += "\n" else: result = "%r\n" % result # '%r' for better number formatting if encoding and encoding != "unicode" and isinstance(result, unicode): result = result.encode(encoding) if _is_py3 and not isinstance(result, unicode): stdout.buffer.write(result) else: stdout.write(result)
def request(self, rpc_command, source=None, filter=None): """ *rpc_command* specifies rpc command to be dispatched either in plain text or in xml element format (depending on command) *source* name of the configuration datastore being queried *filter* specifies the portion of the configuration to retrieve (by default entire configuration is retrieved) :seealso: :ref:`filter_params` Examples of usage:: dispatch('clear-arp-table') or dispatch element like :: xsd_fetch = new_ele('get-xnm-information') sub_ele(xsd_fetch, 'type').text="xml-schema" sub_ele(xsd_fetch, 'namespace').text="junos-configuration" dispatch(xsd_fetch) """ if etree.iselement(rpc_command): node = rpc_command else: node = new_ele(rpc_command) if source is not None: node.append(util.datastore_or_url("source", source, self._assert)) if filter is not None: node.append(util.build_filter(filter)) return self._request(node)
def __init__(self, name=None, element=None, ns=None, nsmap=dict(), idattribute='id', path=None): self.format = 0 self.outpath = path self.element = element if self.element is None and path is not None: self.element = etree.Element(name, nsmap=nsmap) alsoProvides(self, IRoot) self.namespaces = nsmap self.idattribute = idattribute _ns, _name = self._extractname(self.element) if not name: name = _name if not ns: ns = _ns if ns: self.prefix = '{%s}' % ns else: self.prefix = '' self.ns = ns OrderedNode.__init__(self, name=name) if element is None: return if etree.iselement(element): fq = '%s%s' % (self.prefix, name) if element.tag != fq: raise ValueError, \ 'Fq of given element does not match (%s != %s)' \ % (element.tag, fq) children = self.element.getchildren() else: children = [element.getroot()] self._buildchildren(children)
def __call__(self, tag, *children, **attrib): get = self._typemap.get if self._namespace is not None and tag[0] != '{': tag = self._namespace + tag #I HAVE ADDED THIS TO BE MORE COMPATIBLE WITH THE PYTHON ETREE if self._nsmap is not None: elem = self._makeelement(tag, nsmap=self._nsmap) else: elem = self._makeelement(tag) if attrib: get(dict)(elem, attrib) for item in children: if callable(item): item = item() t = get(type(item)) if t is None: if etree.iselement(item): elem.append(item) continue raise TypeError("bad argument type: %r" % item) else: v = t(elem, item) if v: get(type(v))(elem, v) return elem
def writeGpxFile(eGpx,lLatLon,sOutFile): """ """ if not etree.iselement(eGpx): raise commandError("NOELE") NS = getNS(eGpx) if not lLatLon is None: eBounds= etree.ETXPath(NS % 'bounds')(eGpx) if eBounds: minlat,minlon,maxlat,maxlon = \ minmaxOf(lLatLon,NULL_BOUNDS) eBounds[0].set('minlat',str(minlat)) eBounds[0].set('minlon',str(minlon)) eBounds[0].set('maxlat',str(maxlat)) eBounds[0].set('maxlon',str(maxlon)) eTime= etree.ETXPath(NS % 'time')(eGpx) if eTime: eTime[0].text=getNowUtc() eGpx.set('creator', 'gpxrte - http://www.josef-heid.de') etree.ElementTree(eGpx).write(sOutFile,encoding='utf-8', \ xml_declaration=True,pretty_print=True)
def f(x): if etree.iselement(x): return etree.tostring(x).strip() elif isinstance(x, etree._ElementStringResult): return str(x) return x
def print_result(result, pretty_print): if et.iselement(result): result = et.tostring(result, xml_declaration=False, pretty_print=pretty_print) if pretty_print: result = result[:-1] # strip newline at the end print result
def _add_varlist_item(varlist, key, brief=None, descr=None): list_entry = ET.Element('varlistentry') varlist.append(list_entry) term = ET.Element('term') if ET.iselement(key): term.append(key) else: term.text = key.strip() list_entry.append(term) list_item = ET.Element('listitem') if brief is not None: if len(brief) > 0: list_item.text = "- %s" % brief else: list_item.text = ":" if descr is not None: if descr.__class__.__name__ == 'list': list_item.extend(descr) else: list_item.append(descr) list_entry.append(list_item) return list_item
def preRunMacro(self, obj, parameters): self._clearRunMacro() xml_root = None if isinstance(obj , (str, unicode)): if obj.startswith('<') and not parameters: xml_root = etree.fromstring(obj) else: macros = [] if len(parameters) == 0: macros_strs = obj.split('\n') for m in macros_strs: pars = m.split() macros.append((pars[0], pars[1:])) else: parameters = map(str, parameters) macros.append((obj, parameters)) xml_root = xml_seq = etree.Element('sequence') for m in macros: macro_name = m[0] macro_params = m[1] xml_macro = self._createMacroXml(macro_name, macro_params) xml_macro.set('id', str(uuid.uuid1())) xml_seq.append(xml_macro) elif etree.iselement(obj): xml_root = obj else: raise TypeError('obj must be a string or a etree.Element') self._running_macros = {} for macro_xml in xml_root.xpath('//macro'): id, name = macro_xml.get('id'), macro_xml.get('name') self._running_macros[id] = Macro(self, name, id, macro_xml) return xml_root
def simpleDiagnostics(rootElement): print "Is root an element? ", etree.iselement(rootElement) # test if it's some kind of Element if len(rootElement): isParent = rootElement is rootElement[0].getparent() # lxml.etree only! test if it has children print "Does root element have children? ", isParent print "Childreen are" print([ c.tag for c in rootElement ])
def getNS(e): if not etree.iselement(e): raise commandError("NOELE") try: return '{'+e.nsmap[None]+'}%s' except KeyError: return '{'+e.nsmap['gpx']+'}%s'
def clean_concepts(linkbases): """Searches through the provided dictionary of linkbases using the xsd to build a list of extension concepts. Then finds any that aren't referenced by the presentation, definition, calculation, or label linkbases and removes them. """ concepts_removed = [] xlink = "{http://www.w3.org/1999/xlink}" schema = linkbases["xsd"]["filename"].split("/")[-1] href_xpath = ".//*[@{0}href='{1}#%s']".format(xlink, schema) concepts_xpath = ".//{http://www.w3.org/2001/XMLSchema}element" for concept in linkbases["xsd"]["root"].iterfind(concepts_xpath): identifier = concept.get("id") used = False for key, val in linkbases.items(): exists = val["root"].find(href_xpath % identifier) if key != "xsd" and etree.iselement(exists): used = True break if not used: linkbases["xsd"]["root"].remove(concept) concepts_removed.append(identifier) return concepts_removed
def _to_volume(self, volume, node=None): ATTRIBUTE_NAME_MAP = { 'dataCenterId': 'datacenter_id', 'storageId': 'storage_id', 'storageName': 'storage_name', 'serverIds': 'server_id', 'creationTime': 'creation_time', 'lastModificationTime': 'last_modification_time', 'provisioningState': 'provisioning_state', 'size': 'size', } extra = {} for attribute_name, extra_name in ATTRIBUTE_NAME_MAP.items(): elem = volume.find(attribute_name) if ET.iselement(elem): value = elem.text else: value = None extra[extra_name] = value if ET.iselement(volume.find('mountImage')): image_id = volume.find('mountImage')[0].text image_name = volume.find('mountImage')[1].text else: image_id = None image_name = None extra['image_id'] = image_id extra['image_name'] = image_name extra['size'] = int(extra['size']) if extra['size'] else 0 extra['provisioning_state'] = \ self.PROVISIONING_STATE.get(extra['provisioning_state'], NodeState.UNKNOWN) storage_id = extra['storage_id'] storage_name = extra['storage_name'] size = extra['size'] return StorageVolume( id=storage_id, name=storage_name, size=size, driver=self.connection.driver, extra=extra)
def __init__(self, etree=None, file=None, include=True, expand=True, include_params={}, expand_params={}, compile_params={}, store_schematron=False, store_xslt=False, store_report=False, phase=None, error_finder=ASSERTS_ONLY): super(Schematron, self).__init__() self._store_report = store_report self._schematron = None self._validator_xslt = None self._validation_report = None if error_finder is not self.ASSERTS_ONLY: self._validation_errors = error_finder # parse schema document, may be a schematron schema or an XML Schema or # a RelaxNG schema with embedded schematron rules root = None try: if etree is not None: if _etree.iselement(etree): root = etree else: root = etree.getroot() elif file is not None: root = _etree.parse(file).getroot() except Exception: raise _etree.SchematronParseError("No tree or file given: %s" % sys.exc_info()[1]) if root is None: raise ValueError("Empty tree") if root.tag == _schematron_root: schematron = root else: schematron = self._extract(root) if schematron is None: raise _etree.SchematronParseError( "Document is not a schematron schema or schematron-extractable" ) # perform the iso-schematron skeleton implementation steps to get a # validating xslt if include: schematron = self._include(schematron, **include_params) if expand: schematron = self._expand(schematron, **expand_params) if not schematron_schema_valid(schematron): raise _etree.SchematronParseError( "invalid schematron schema: %s" % schematron_schema_valid.error_log) if store_schematron: self._schematron = schematron # add new compile keyword args here if exposing them compile_kwargs = {'phase': phase} compile_params = _stylesheet_param_dict(compile_params, compile_kwargs) validator_xslt = self._compile(schematron, **compile_params) if store_xslt: self._validator_xslt = validator_xslt self._validator = _etree.XSLT(validator_xslt)
def to_ele(x): "Convert and return the :class:`~xml.etree.ElementTree.Element` for the XML document *x*. If *x* is already an :class:`~xml.etree.ElementTree.Element` simply returns that." return x if etree.iselement(x) else etree.fromstring(x)
for child in childs: print(child.tag) root.insert(0, etree.Element("child0")) childs = list(root) for child in childs: print(child.tag) start = root[:1] end = root[-1:] print(start[0].tag) print(end[0].tag) print(end[0].tag) print(etree.iselement(root)) if len(root): # this no longer works! print("The root element has children") for child in root: print(child.tag) root[0] = root[-1] # this moves the element in lxml.etree! for child in root: print(child.tag) l = [0, 1, 2, 3] l[0] = l[-1] print(l)
# and store the result in a variable called `xml_doc` xml_doc = etree.parse(xml_source_file) # there is a lot of malformed xml out there! in order to make sure that # what looks like good xml actually *is*, we'll start by getting # the current xml document's "root" element document_root = xml_doc.getroot() # let's print it out to see what it looks like. Because it's currently # stored as byte data, we need to use the etree.tostring() method in order # to see anything useful print(etree.tostring(document_root)) # if the document_root is a well-formed XML element, continue with our # wrangling efforts if etree.iselement(document_root): # create our output file, naming it "xml_"+filename output_file = open("xml_" + filename + ".csv", "w") # there is a "writer" recipe that lets us easily write `.csv`-formatted rows # so, just as we did when "reading", now that we've opened our `output_file` # we'll use this recipe to easily write rows, instead of reading them output_writer = csv.writer(output_file) # thanks to lxml, each xml element (or "node") has a property called "attrib" # whose data type is a Python dictionary (dict). # a `dict` type has several methods of accessing its contents # including the `.keys()`, `.values()`, and `.items()` methods, which return # lists (see: https://docs.python.org/3/library/stdtypes.html#typesmapping) # in this case, the list returned by the `.keys()` method will be useful as column headers
def parse_opendrive(rootNode): """ Tries to parse XML tree, return OpenDRIVE object """ # Only accept xml element if not etree.iselement(rootNode): raise TypeError("Argument rootNode is not a xml element") newOpenDrive = OpenDrive() # Header header = rootNode.find("header") if header is not None: # Reference if header.find("geoReference") is not None: pass # Junctions for junction in rootNode.findall("junction"): newJunction = Junction() newJunction.id = int(junction.get("id")) newJunction.name = str(junction.get("name")) for connection in junction.findall("connection"): newConnection = JunctionConnection() newConnection.id = connection.get("id") newConnection.incomingRoad = connection.get("incomingRoad") newConnection.connectingRoad = connection.get("connectingRoad") newConnection.contactPoint = connection.get("contactPoint") for laneLink in connection.findall("laneLink"): newLaneLink = JunctionConnectionLaneLink() newLaneLink.fromId = laneLink.get("from") newLaneLink.toId = laneLink.get("to") newConnection.addLaneLink(newLaneLink) newJunction.addConnection(newConnection) newOpenDrive.junctions.append(newJunction) # Load roads for road in rootNode.findall("road"): newRoad = Road() newRoad.id = int(road.get("id")) newRoad.name = road.get("name") newRoad.junction = int(road.get("junction")) if road.get("junction") != "-1" else None # TODO: Problems!!!! newRoad.length = float(road.get("length")) # Links if road.find("link") is not None: predecessor = road.find("link").find("predecessor") if predecessor is not None: newPredecessor = RoadLinkPredecessor() newPredecessor.elementType = predecessor.get("elementType") newPredecessor.elementId = predecessor.get("elementId") newPredecessor.contactPoint = predecessor.get("contactPoint") newRoad.link.predecessor = newPredecessor successor = road.find("link").find("successor") if successor is not None: newSuccessor = RoadLinkSuccessor() newSuccessor.elementType = successor.get("elementType") newSuccessor.elementId = successor.get("elementId") newSuccessor.contactPoint = successor.get("contactPoint") newRoad.link.successor = newSuccessor for neighbor in road.find("link").findall("neighbor"): newNeighbor = RoadLinkNeighbor() newNeighbor.side = neighbor.get("side") newNeighbor.elementId = neighbor.get("elementId") newNeighbor.direction = neighbor.get("direction") newRoad.link.neighbors.append(newNeighbor) # Type for roadType in road.findall("type"): newType = RoadType() newType.sPos = roadType.get("s") newType.type = roadType.get("type") if roadType.find("speed"): newSpeed = RoadTypeSpeed() newSpeed.max = roadType.find("speed").get("max") newSpeed.unit = roadType.find("speed").get("unit") newType.speed = newSpeed newRoad.types.append(newType) # Plan view for geometry in road.find("planView").findall("geometry"): startCoord = [float(geometry.get("x")), float(geometry.get("y"))] if geometry.find("line") is not None: newRoad.planView.addLine(startCoord, float(geometry.get("hdg")), float(geometry.get("length"))) elif geometry.find("spiral") is not None: newRoad.planView.addSpiral(startCoord, float(geometry.get("hdg")), float(geometry.get("length")), float(geometry.find("spiral").get("curvStart")), float(geometry.find("spiral").get("curvEnd"))) elif geometry.find("arc") is not None: newRoad.planView.addArc(startCoord, float(geometry.get("hdg")), float(geometry.get("length")), float(geometry.find("arc").get("curvature"))) elif geometry.find("poly3") is not None: raise NotImplementedError() elif geometry.find("paramPoly3") is not None: if geometry.find("paramPoly3").get("pRange"): if geometry.find("paramPoly3").get("pRange") == "arcLength": pMax = float(geometry.get("length")) else: pMax = None else: pMax = None newRoad.planView.addParamPoly3( \ startCoord, \ float(geometry.get("hdg")), \ float(geometry.get("length")), \ float(geometry.find("paramPoly3").get("aU")), \ float(geometry.find("paramPoly3").get("bU")), \ float(geometry.find("paramPoly3").get("cU")), \ float(geometry.find("paramPoly3").get("dU")), \ float(geometry.find("paramPoly3").get("aV")), \ float(geometry.find("paramPoly3").get("bV")), \ float(geometry.find("paramPoly3").get("cV")), \ float(geometry.find("paramPoly3").get("dV")), \ pMax \ ) else: raise Exception("invalid xml") # Elevation profile if road.find("elevationProfile") is not None: for elevation in road.find("elevationProfile").findall("elevation"): newElevation = RoadElevationProfileElevation() newElevation.sPos = elevation.get("s") newElevation.a = elevation.get("a") newElevation.b = elevation.get("b") newElevation.c = elevation.get("c") newElevation.d = elevation.get("d") newRoad.elevationProfile.elevations.append(newElevation) # Lateral profile if road.find("lateralProfile") is not None: for superelevation in road.find("lateralProfile").findall("superelevation"): newSuperelevation = RoadLateralProfileSuperelevation() newSuperelevation.sPos = superelevation.get("s") newSuperelevation.a = superelevation.get("a") newSuperelevation.b = superelevation.get("b") newSuperelevation.c = superelevation.get("c") newSuperelevation.d = superelevation.get("d") newRoad.lateralProfile.superelevations.append(newSuperelevation) for crossfall in road.find("lateralProfile").findall("crossfall"): newCrossfall = RoadLateralProfileCrossfall() newCrossfall.side = crossfall.get("side") newCrossfall.sPos = crossfall.get("s") newCrossfall.a = crossfall.get("a") newCrossfall.b = crossfall.get("b") newCrossfall.c = crossfall.get("c") newCrossfall.d = crossfall.get("d") newRoad.lateralProfile.crossfalls.append(newCrossfall) for shape in road.find("lateralProfile").findall("shape"): newShape = RoadLateralProfileShape() newShape.sPos = shape.get("s") newShape.t = shape.get("t") newShape.a = shape.get("a") newShape.b = shape.get("b") newShape.c = shape.get("c") newShape.d = shape.get("d") newRoad.lateralProfile.shapes.append(newShape) # Lanes lanes = road.find("lanes") if lanes is None: raise Exception("Road must have lanes element") # Lane offset for laneOffset in lanes.findall("laneOffset"): newLaneOffset = RoadLanesLaneOffset() newLaneOffset.sPos = laneOffset.get("s") newLaneOffset.a = laneOffset.get("a") newLaneOffset.b = laneOffset.get("b") newLaneOffset.c = laneOffset.get("c") newLaneOffset.d = laneOffset.get("d") newRoad.lanes.laneOffsets.append(newLaneOffset) # Lane sections for laneSectionIdx, laneSection in enumerate(road.find("lanes").findall("laneSection")): newLaneSection = RoadLanesSection() # Manually enumerate lane sections for referencing purposes newLaneSection.idx = laneSectionIdx newLaneSection.sPos = laneSection.get("s") newLaneSection.singleSide = laneSection.get("singleSide") sides = dict( left=newLaneSection.leftLanes, center=newLaneSection.centerLanes, right=newLaneSection.rightLanes ) for sideTag, newSideLanes in sides.items(): side = laneSection.find(sideTag) # It is possible one side is not present if side is None: continue for lane in side.findall("lane"): newLane = RoadLaneSectionLane() newLane.id = lane.get("id") newLane.type = lane.get("type") newLane.level = lane.get("level") # Lane Links if lane.find("link") is not None: if lane.find("link").find("predecessor") is not None: newLane.link.predecessorId = lane.find("link").find("predecessor").get("id") if lane.find("link").find("successor") is not None: newLane.link.successorId = lane.find("link").find("successor").get("id") # Width for widthIdx, width in enumerate(lane.findall("width")): newWidth = RoadLaneSectionLaneWidth() newWidth.idx = widthIdx newWidth.sOffset = width.get("sOffset") newWidth.a = width.get("a") newWidth.b = width.get("b") newWidth.c = width.get("c") newWidth.d = width.get("d") newLane.widths.append(newWidth) # Border for borderIdx, border in enumerate(lane.findall("border")): newBorder = RoadLaneSectionLaneBorder() newBorder.idx = borderIdx newBorder.sPos = border.get("sOffset") newBorder.a = border.get("a") newBorder.b = border.get("b") newBorder.c = border.get("c") newBorder.d = border.get("d") newLane.borders.append(newBorder) # Road Marks # TODO # Material # TODO # Visiblility # TODO # Speed # TODO # Access # TODO # Lane Height # TODO # Rules # TODO newSideLanes.append(newLane) newRoad.lanes.laneSections.append(newLaneSection) # OpenDrive does not provide lane section lengths by itself, calculate them by ourselves for laneSection in newRoad.lanes.laneSections: # Last lane section in road if laneSection.idx + 1 >= len(newRoad.lanes.laneSections): laneSection.length = newRoad.planView.getLength() - laneSection.sPos # All but the last lane section end at the succeeding one else: laneSection.length = newRoad.lanes.laneSections[laneSection.idx + 1].sPos - laneSection.sPos # OpenDrive does not provide lane width lengths by itself, calculate them by ourselves for laneSection in newRoad.lanes.laneSections: for lane in laneSection.allLanes: widthsPoses = np.array([x.sOffset for x in lane.widths] + [laneSection.length]) widthsLengths = widthsPoses[1:] - widthsPoses[:-1] for widthIdx, width in enumerate(lane.widths): width.length = widthsLengths[widthIdx] # Objects # TODO # Signals # TODO newOpenDrive.roads.append(newRoad) return newOpenDrive
def addRoutine(self, Routine): assert etree.iselement( Routine.getLocalRoot()) and Routine.getLocalRoot().tag == "Routine" if not self.checkIfChild("Routines"): self.Routines = etree.SubElement(self.root, "Routines") self.Routines.append(Routine.getLocalRoot())
def parse(self, rpt_item): """ PluginID data is built as the report is processed however we want to also be certain to not duplicate existing t_vulndata so a lookup is performed with both the pluginID and fname. If none found the record is entered into the database and populates the local dict Args: rpt_item: A ReportItem field (etree._Element or CSV line) Returns: t_vulndata.id: integer field of db.t_vulndata[id] vulndata: A dictionary of fields for t_vulndata extradata: A dictionary of extra data fields such as references """ # TODO: Check validity of XML or CSV # if not etree.iselement(rpt_item): # log("Invalid plugin data received: %s" % type(rpt_item), logging.ERROR) # return (None, {}, {}) # extract specific parts of ReportItem extradata = {} SF_RE = re.compile('Source File: (\w+).nasl') if etree.iselement(rpt_item): # XML element, parse it as such is_xml = True extradata['proto'] = rpt_item.get('protocol', 'info') extradata['port'] = rpt_item.get('port', 0) extradata['svcname'] = rpt_item.findtext('svc_name', '') extradata['plugin_output'] = rpt_item.findtext('plugin_output', '') extradata['exploit_available'] = rpt_item.findtext( 'exploit_available', 'false') fname = rpt_item.findtext('fname', '') pluginID = rpt_item.get('pluginID') f_title = rpt_item.findtext('plugin_name', '') f_riskscore = rpt_item.get('risk_factor', '') f_cvss_score = float(rpt_item.findtext('cvss_base_score', 0.0)) f_cvss_i_score = float( rpt_item.findtext('cvss_temporal_score', 0.0)) f_description = rpt_item.findtext('description') f_solution = rpt_item.findtext('solution') f_dt_published = rpt_item.findtext('plugin_publication_date') f_dt_added = rpt_item.findtext('plugin_publication_date') f_dt_modified = rpt_item.findtext('plugin_modification_date') severity = int(rpt_item.get('severity', 0)) cvss_vectors = rpt_item.findtext( 'cvss_vector') # CVSS2#AV:N/AC:M/Au:N/C:P/I:P/A:P else: # CSV data, parse it as such is_xml = False extradata['proto'] = rpt_item.get('Protocol', 'info') extradata['port'] = rpt_item.get('Port', 0) extradata['svcname'] = '' # TODO: Look this up in etc/services extradata['plugin_output'] = rpt_item.get( 'Plugin Text', rpt_item.get('Plugin Output', '')) extradata['exploit_available'] = rpt_item.get('Exploit?', 'false') pluginID = rpt_item.get('Plugin', rpt_item.get('Plugin ID')) f_title = rpt_item.get('Plugin Name', rpt_item.get('Name', '')) f_riskscore = rpt_item.get('Risk Factor', '') f_cvss_score = rpt_item.get('CVSS Base Score', rpt_item.get('CVSS', 0.0)) f_cvss_i_score = rpt_item.get('CVSS Temporal Score', 0.0) f_description = rpt_item.get('Description') f_solution = rpt_item.get('Solution') f_dt_published = rpt_item.get('Plugin Publication Date') f_dt_added = rpt_item.get('Plugin Publication Date') f_dt_modified = rpt_item.get('Plugin Modification Date') severity = rpt_item.get('Severity', 0) cvss_vectors = rpt_item.get( 'CVSS Vector') # AV:N/AC:L/Au:N/C:P/I:P/A:N sf_re = SF_RE.search(extradata['plugin_output']) if sf_re: fname = sf_re.groups()[0] else: fname = None # CSV DictReader sets fields to '' so force float/int if nothing set if not f_cvss_score: f_cvss_score = 0.0 if not f_cvss_i_score: f_cvss_i_score = 0.0 # Severity may be not set, set it to zero then if not severity: severity = 0 # Severity may also be a word, lets map them to numbers severity_map = { 'Critical': 4, 'High': 3, 'Medium': 2, 'Low': 1, 'Info': 0, } if isinstance(severity, str): severity = severity_map[severity] if not extradata['port']: extradata['port'] = 0 # CSV puts N/A for date fields but we need them to be None or real datetimes... if f_dt_published == "N/A": f_dt_published = None if f_dt_added == "N/A": f_dt_added = None if f_dt_modified == "N/A": f_dt_modified = None # set t_vulndata.f_vulnid based on pluginID if no filename is found extradata['pluginID'] = pluginID if fname: fname = fname.rstrip('.nasl') f_vulnid = IS_SLUG()("%s-%s" % (fname, pluginID))[0] # slugify it else: f_vulnid = pluginID # references with multiple values for refdata in self.ref_types: extradata[refdata] = [] if is_xml: for i in rpt_item.findall(refdata): extradata[refdata].append(i.text) else: if rpt_item.get(refdata): extradata[refdata].append(rpt_item.get(refdata)) # single value references for refdata in self.single_refs: if is_xml: extradata[refdata] = [rpt_item.findtext(refdata)] else: if rpt_item.get(refdata): extradata[refdata] = rpt_item.get(refdata) # check local dict, else check t_vulndata if pluginID in self.vulns: return self.vulns[pluginID][0], self.vulns[pluginID][1], extradata else: vuln_row = self.db(self.db.t_vulndata.f_vulnid == f_vulnid).select( cache=(self.cache.ram, 180)).first() if vuln_row: # exists in t_vulndata, return it vuln_id = vuln_row.id vulndata = vuln_row.as_dict() return vuln_id, vulndata, extradata # vulnerability-specific data vulndata = { 'f_vulnid': f_vulnid, 'f_title': f_title, 'f_riskscore': f_riskscore, 'f_cvss_score': f_cvss_score, 'f_cvss_i_score': f_cvss_i_score, 'f_description': f_description, 'f_solution': f_solution, 'f_dt_published': f_dt_published, 'f_dt_added': f_dt_added, 'f_dt_modified': f_dt_modified, 'f_source': 'Nessus', } # Nessus only has 5 severity levels: 0, 1, 2, 3 and 4 .. We go to 11. Assign 0:0, 1:3, 2:5, 3:8, 4:10 sevmap = {'0': 0, '1': 3, '2': 5, '3': 8, '4': 10} vulndata['f_severity'] = sevmap[str(severity)] if cvss_vectors: if cvss_vectors.startswith("CVSS2"): cvss_vectors = cvss_vectors[6:] vulndata['f_cvss_av'] = cvss_vectors[3] vulndata['f_cvss_ac'] = cvss_vectors[8] vulndata['f_cvss_au'] = cvss_vectors[13] vulndata['f_cvss_c'] = cvss_vectors[17] vulndata['f_cvss_i'] = cvss_vectors[21] vulndata['f_cvss_a'] = cvss_vectors[25] else: vulndata['f_cvss_av'] = '' vulndata['f_cvss_ac'] = '' vulndata['f_cvss_au'] = '' vulndata['f_cvss_c'] = '' vulndata['f_cvss_i'] = '' vulndata['f_cvss_a'] = '' vuln_id = self.db.t_vulndata.update_or_insert(**vulndata) if not vuln_id: vuln_id = self.db(self.db.t_vulndata.f_vulnid == f_vulnid).select( cache=(self.cache.ram, 180)).first().id if vuln_id: self.stats['processed'] += 1 self.vulns[pluginID] = [vuln_id, vulndata] self.db.commit() log(" [-] Adding vulnerability to vuln database: %s" % f_vulnid) # add/update vulnerability references self.db_vuln_refs(vuln_id, vulndata, extradata) else: log( " [!] Error inserting/finding vulnerability in database: %s" % f_vulnid, logging.ERROR) return vuln_id, vulndata, extradata
def transform_html(input_html_file, template_html_file, sitting_date_iso, output_folder=''): # template tree output_tree = html.parse(template_html_file) output_root = output_tree.getroot() # input html (from InDesign) input_tree = html.parse(input_html_file) # InDesign outputs xhtml5 we can convert to html5 # html.xhtml_to_html(input_tree) input_root = input_tree.getroot() # expecting the date to be marked up like the below # <div id="_idContainer000"> # <h1 class="Title" lang="en-US"><strong class="Bold">Issued on:</strong> 21 April at 7.00pm</h1> # </div> issued_date = input_root.xpath('//div[@id="_idContainer000"]/h1') if len(issued_date) > 0: issued_date = issued_date[0] if iselement(issued_date): issued_date_text = issued_date.text_content() issued_date.getparent().remove(issued_date) else: issued_date_text = '' warning( 'Expected to find issued date in the input HTML file. The issued at time will be missing from the bottom of the output HMTML. Check that the InDesign template has not been tampored with.' ) # should return a formatted string in the form 26th April 2017 # prepared_date = format_date(prepared_date_iso, strftime='%d %b %Y') # also get the date in the form YYMMDD for the output file name # clean up the HTML # convert bullet to square bullet for ele in input_root.xpath('.//span[@class="pythonFindBullet"]'): if ele.text == '\u2022': ele.text = '\u25A0 ' # remove filename for internal hyperlinks # e.g. href="cmsilist2.html#_idTextAnchor000" -> href="#_idTextAnchor000" # usualy filename before # iD_file_name = os.path.basename(input_html_file) # but sometimes people change the filename but the original filename is in the <title> iD_file_title = input_root.findtext('head/title', default='') if iD_file_title != '': iD_file_title = '{}.html'.format(iD_file_title) all_links = input_root.xpath('//a') for link in all_links: link_href = link.get('href', default=None) if link_href: link.set( 'href', link_href.replace(iD_file_name, '').replace(iD_file_title, '')) # put all the html from the input file into the proper place in the output file # get the location in the output_root we want to append to append_point = output_root.xpath('//div[@id="content-goes-here"]') if len(append_point) < 1: print( 'ERROR: Script can\'t find <div id="content-goes-here"> in the template.' ' This is needed as this is where we are going inject html from the input html' ) exit() else: append_point = append_point[0] # change the title to be like # <h1 class="mainTitle" id="MainTitleWithDate"> h1s = input_root.xpath('//h1') if len(h1s) < 1: print('WARNING: at least one element with a h1 tag was expected. ' 'The title may not appear in the output') else: h1 = h1s[0] h1.set('class', 'mainTitle') h1.set('id', 'MainTitleWithDate') br = h1.find('br') if iselement(br): if br.tail: br.tail = ' ' + br.tail # add space br.drop_tag() # clean up ToC by removing page numbers # for element in input_root.xpath('//p[@class="TableOfContents_Toc2"]/a'): # # remove any <br> as uterwise cant remove page numbers from the end # brs = element.findall('.//br') # for br in brs: # br.drop_tag() # span = element.find('span') # if iselement(span): # if span.text: # span.text += ' ' # if span.tail: # span.tail = span.tail.rstrip('1234567890') # elif element.text: # element.text = element.text.rstrip('1234567890') # remove Indesign toc for element in input_root.xpath('//div[contains(@class,"ToC-Box")]'): element.drop_tree() # sort the numbers (hanging indents etc) numbers = input_root.xpath('//p[@class="paraQuestion"]/span[1]') for number in numbers: # cosider changing this in InDesign number.set('class', 'charBallotNumber') new_span = html.fromstring( '<span style="display : block; float : left; width : 2.1em; height : 1em;"></span>' ) number_parent = number.getparent() new_span.append(number) number_parent.insert(0, new_span) # get the container divs in the input root that contain the html container_divs = input_root.xpath('//div[contains(@id,"_idContainer")]') for div in container_divs: # this line will put all the child elements of the container div in to the output html append_point.append(div) # change what is in the <title> element in the output title_element = output_root.xpath('//title') if len(title_element) and iselement(title_element[0]): title_element = title_element[0] date_formatted = format_date(sitting_date_iso, strftime="%A %d %B %Y") if title_element.text: title_element.text += f' for {date_formatted}' else: title_element.text += f'Call List for {date_formatted}' # sort tables out xpath = '//table[contains(@class, "Call-Sheet-Table")]' \ '|//table[contains(@class, "Basic-Table")]' for table in output_root.xpath(xpath): table.classes.update( ['table', 'table-bordered', 'table-responsive-md']) thead = table.find('thead') if iselement(thead): thead.classes.add('thead-light') # Add IDs and perminant ancors to the html # Added at the request of IDMS # need to get all the heading elements xpath = '//h3[@class="paraBusinessSub-SectionHeading"]' headings = output_root.xpath(xpath) for i, heading in enumerate(headings): # generate id text id_text = f'anchor-{i}' if heading.get('id', default=None): heading.set('name', heading.get('id')) heading.set('id', id_text) anchor = SubElement(heading, 'a') permalink_for = 'Permalink for ' + heading.text_content() anchor.set('href', '#' + id_text) anchor.set('aria-label', 'Anchor') anchor.set('title', permalink_for) anchor.set('data-anchor-icon', '§') anchor.set('class', 'anchor-link') # I also feel like removing paraBusinessSub-SectionHeading heading.classes.remove('paraBusinessSub-SectionHeading') # find where to put the Toc nav_xpath_results = output_root.xpath('//nav[@id="toc"]') # create new toc h3s = output_root.xpath('//*[contains(@class, "js-toc-content")]//h3') if len(nav_xpath_results): toc_injection_point = nav_xpath_results[0] ol = SubElement(toc_injection_point, 'ol') ol.set('class', 'toc-list') for h3 in h3s: li = SubElement(ol, 'li') li.set('class', 'toc-list-item') a = SubElement(li, 'a') a.set('href', '#' + h3.get('id', '')) a.set('class', 'toc-link') a.text = h3.text_content() # finally change the prepared date at the bottom of the page # footerblock = output_root.xpath('//div[@id="footerBlockDate"]/p') # if len(footerblock) < 1: # warning('Can\'t find the footer block to append the prepared date to.') # elif footerblock[0].text is None: # footerblock[0].text = issued_date_text # else: # footerblock[0].text = '{}{}'.format(footerblock[0].text, issued_date_text) # get the output file path input_file_base_path = os.path.dirname(input_html_file) # date in PPU forn date_ppu = format_date(sitting_date_iso, strftime='%y%m%d') output_file_name = '{}{}{}'.format(OUTPUTFILE_BASENAME, date_ppu, FILE_EXTENSION) if output_folder: output_file_path = os.path.join(output_folder, output_file_name) else: output_file_path = os.path.join(input_file_base_path, output_file_name) # output the file for writing bytes output_tree.write(output_file_path, encoding='UTF-8', method="html", xml_declaration=False) # output_file = open(output_file_path, 'wb') # output_file.write(html.tostring(output_tree)) # output_file.close() print('Output file path: ', output_file_path, '\n') return Path(output_file_path)
def __init__(self, attribute, **kwargs): # default values self.__lic_servers = [] # just for bookkeeping self.expires = None self.site = kwargs.get("site", "unknown") self.used = 0 self.sge_used_requested = 0 self.sge_used_issued = 0 if etree.iselement(attribute): # init from xml _xml = attribute # print etree.tostring(_xml, pretty_print=True) if _xml.get("source", "file") == "server": # source from license fetch via lmutil lmstat _version = _xml.find("version") # xml from license check self.name = _xml.get("name") self.attribute = _xml.get("name") self.license_type = "simple" # do not set license servers self.total = int(_xml.get("issued", "0")) self.reserved = int(_xml.get("reserved", "0")) # show in frontend / command tools self.show = True # is used: set for SGE self.is_used = False # limit usage (reserve this number of licenses for external usage) self.limit = 0 self.added = "unknown" if _version is not None and _version.get("expiry", ""): self.expires = datetime.datetime.strptime( _version.get("expiry"), EXPIRY_DT) self.__match_str = "" # add server info for server in kwargs["server_info"].findall(".//server"): _port, _addr = server.attrib["info"].split("@") self.__lic_servers.append((int(_port), _addr)) else: self.name = _xml.get("name") self.attribute = _xml.get("attribute") self.license_type = _xml.get("type") if self.license_type == "simple": for _lic_srv in _xml.findall( "license_servers/license_server"): self.__lic_servers.append((int(_lic_srv.get("port")), _lic_srv.get("address"))) self.__match_str = _xml.get("match_str", "") else: self.__eval_str = _xml.get("eval_str") self.total = int(_xml.get("total")) self.reserved = int(_xml.get("reserved")) self.limit = int(_xml.get("limit", "0")) self.is_used = True if int(_xml.get("in_use", "0")) else False self.show = True if int(_xml.get("show", "1")) else False self.added = _xml.get("added", "unknown") self.expires = _xml.get("expires", "") if self.expires: self.expires = datetime.datetime.strptime( self.expires, EXPIRY_DT) self.used = 0 else: self.is_used = False self.total = 0 self.reserved = 0 self.limit = 0 self.show = True self.name = attribute.lower() self.attribute = attribute self.license_type = kwargs.get("license_type", "simple") if self.license_type == "simple": lsp_parts = kwargs["license_server"].split(",") for lsp_part in lsp_parts: port, host = lsp_part.split("@") self.__lic_servers.append((int(port), host)) self.__match_str = kwargs.get("match_str", "") else: self.__eval_str = kwargs.get("eval_str", "1") self.added = "unknown"
def isElement(node): # LXML HAS THE DUMBEST XML TREE DATA MODEL IN THE WORLD return etree.iselement(node) and isinstance(node.tag, basestring)
def can_document_member(cls, member, membername, isattr, parent): if ET.iselement(member) and member.tag == 'memberdef' and member.get('kind') == 'variable': return True return False
def setParent(self, parent): assert etree.iselement(parent) and parent.tag == "Programs" parent.append(self.root)
def step_impl(context): """ :type context: behave.runner.Context """ expect(etree.iselement(context.res.getroot())).to.be.true expect(etree.tostring(context.res)).to.equal(etree.tostring(context.xml))
def _to_node(self, node): """ Convert the request into a node Node """ ATTRIBUTE_NAME_MAP = { 'dataCenterId': 'datacenter_id', 'dataCenterVersion': 'datacenter_version', 'serverId': 'node_id', 'serverName': 'node_name', 'cores': 'cores', 'ram': 'ram', 'internetAccess': 'internet_access', 'provisioningState': 'provisioning_state', 'virtualMachineState': 'virtual_machine_state', 'creationTime': 'creation_time', 'lastModificationTime': 'last_modification_time', 'osType': 'os_type', 'availabilityZone': 'availability_zone', 'cpuHotPlug': 'cpu_hotpluggable', 'ramHotPlug': 'memory_hotpluggable', 'nicHotPlug': 'nic_hotpluggable', 'discVirtioHotPlug': 'disc_virtio_hotplug', 'discVirtioHotUnPlug': 'disc_virtio_hotunplug' } extra = {} for attribute_name, extra_name in ATTRIBUTE_NAME_MAP.items(): elem = node.find(attribute_name) if ET.iselement(elem): value = elem.text else: value = None extra[extra_name] = value public_ips = [] private_ips = [] if ET.iselement(node.find('nics')): for nic in node.findall('.//nics'): n_elements = list(nic.findall('.//ips')) if len(n_elements) > 0: ip = n_elements[0].text if is_private_subnet(ip): private_ips.append(ip) else: public_ips.append(ip) extra['provisioning_state'] = self.PROVISIONING_STATE.get( extra['provisioning_state'], NodeState.UNKNOWN) node_id = extra['node_id'] node_name = extra['node_name'] state = self.NODE_STATE_MAP.get(extra['virtual_machine_state'], NodeState.UNKNOWN) return Node(id=node_id, name=node_name, state=state, public_ips=public_ips, private_ips=private_ips, driver=self.connection.driver, extra=extra)
def compareElements(e1, e2, xpath, position): #print "xpath: "+xpath #print e1.tag + " : " +e2.tag e1Qname = etree.QName(e1.tag) e2Qname = etree.QName(e2.tag) if e1Qname.localname != e2Qname.localname: print(bcolors.FAIL + "Element names differ at path: " + xpath + bcolors.ENDC) return False if e1Qname.namespace != e2Qname.namespace: print(bcolors.FAIL + "Element namespaces differ at path: " + xpath + bcolors.ENDC) return False retval = True # check attributes if len(e1.attrib) != len(e2.attrib): # print("%d != %d" % (len(e1.attrib), len(e2.attrib))) print(bcolors.FAIL + "Different number of attributes at path: " + xpath + bcolors.ENDC) retval = False else: for attrKey in e1.attrib.keys(): attrValue1 = e2.get(attrKey) attrValue2 = e2.get(attrKey) # print attrKey + ": " + attrValue1 + " : " + attrValue2 if attrValue1 != attrValue2: print(bcolors.FAIL + "Different attribute values at path: " + xpath + "/@" + attrKey + bcolors.ENDC) retval = False # check text #xstr = lambda s: s or "" #pattern = "\s+" #text1 = re.sub(pattern, xstr(e1.text), "") #text2 = re.sub(pattern, xstr(e2.text), "") text1 = e1.text text2 = e2.text if text1 != text2: print(bcolors.FAIL + "Different text at path: " + xpath + "/text()" + bcolors.ENDC) retval = False # check child elements children1 = e1.getchildren() children2 = e2.getchildren() if len(children1) != len(children2): # print("%d != %d" % (len(children1), len(children2))) print(bcolors.FAIL + "Different number of children at path: " + xpath + bcolors.ENDC) retval = False else: # Check children pos = 0 for child1 in children1: pos += 1 if etree.iselement(child1): child2 = children2[pos - 1] if not compareElements(child1, child2, xpath + "/*[" + str(pos) + "]", 0): retval = False return retval
def addTag(self, Tag): assert etree.iselement( Tag.getLocalRoot()) and Tag.getLocalRoot().tag == "Tag" if not self.checkIfChild("Tags"): self.Tags = etree.SubElement(self.root, "Tags") self.Tags.append(Tag.getLocalRoot())
def __init__(self, metadata_record): if ET.iselement(metadata_record): self.tree = metadata_record else: self.tree = ET.fromstring(metadata_record)
def xml_create_tag(xmltree, xpath, element, place_index=None, tag_order=None, occurrences=None, correct_order=True, several=True): """ This method evaluates an xpath expression and creates a tag in a xmltree under the returned nodes. If there are no nodes under the specified xpath an error is raised. The tag is appended by default, but can be inserted at a certain index (`place_index`) or can be inserted according to a given order of tags :param xmltree: an xmltree that represents inp.xml :param xpath: a path where to place a new tag :param element: a tag name or etree Element to be created :param place_index: defines the place where to put a created tag :param tag_order: defines a tag order :param occurrences: int or list of int. Which occurence of the parent nodes to create a tag. By default all nodes are used. :param correct_order: bool, if True (default) and a tag_order is given, that does not correspond to the given order in the xmltree (only order wrong no unknown tags) it will be corrected and a warning is given This is necessary for some edge cases of the xml schemas of fleur :param several: bool, if True multiple tags od the given name are allowed :raises ValueError: If the insertion failed in any way (tag_order does not match, failed to insert, ...) :returns: xmltree with created tags """ import copy from more_itertools import unique_justseen from masci_tools.io.common_functions import is_sequence if not etree.iselement(element): element_name = element try: element = etree.Element(element) except ValueError as exc: raise ValueError(f"Failed to construct etree Element from '{element_name}'") from exc else: element_name = element.tag parent_nodes = eval_xpath(xmltree, xpath, list_return=True) if len(parent_nodes) == 0: raise ValueError(f"Could not create tag '{element_name}' because atleast one subtag is missing. " 'Use create=True to create the subtags') if occurrences is not None: if not is_sequence(occurrences): occurrences = [occurrences] try: parent_nodes = [parent_nodes[occ] for occ in occurrences] except IndexError as exc: raise ValueError('Wrong value for occurrences') from exc for parent in parent_nodes: element_to_write = copy.deepcopy(element) if tag_order is not None: try: tag_index = tag_order.index(element_name) except ValueError as exc: raise ValueError(f"The tag '{element_name}' was not found in the order list. " f'Allowed tags are: {tag_order}') from exc behind_tags = tag_order[:tag_index] child_tags = [child.tag for child in parent.iterchildren()] #This ignores serial duplicates. With this out of order tags will be obvious e.g ['ldaU', 'lo','lo', 'ldaU'] #will result in ['ldaU', 'lo', 'ldaU'] existing_order = list(unique_justseen(child_tags)) #Does the input file have unknown tags extra_tags = set(existing_order).difference(set(tag_order)) if extra_tags: raise ValueError(f'Did not find existing elements in the tag_order list: {extra_tags}') if element_name in existing_order and not several: raise ValueError(f'The given tag {element_name} is not allowed to appear multiple times') #Is the existing order in line with the given tag_order if sorted(existing_order, key=tag_order.index) != existing_order: if not correct_order: raise ValueError('Existing order does not correspond to tag_order list\n' f'Expected order: {tag_order}\n' f'Actual order: {existing_order}') else: #Here we know that there are no unexpected tags in the order, so we can 'repair' the order warnings.warn('Existing order does not correspond to tag_order list. Correcting it\n' f'Expected order: {tag_order}\n' f'Actual order: {existing_order}') new_tag = copy.deepcopy(parent) #Remove all child nodes from new_tag (deepcopied so they are still on parent) for node in new_tag.iterchildren(): new_tag.remove(node) for tag in tag_order: #Iterate over all children with the given tag on the parent and append to the new_tag for node in parent.iterchildren(tag=tag): new_tag.append(node) #Now replace the parent node with the reordered node parent_of_parent = parent.getparent() index = parent_of_parent.index(parent) parent_of_parent.remove(parent) parent_of_parent.insert(index, new_tag) parent = new_tag for tag in reversed(behind_tags): existing_tags = list(parent.iterchildren(tag=tag)) if len(existing_tags) != 0: insert_index = parent.index(existing_tags[-1]) + 1 try: parent.insert(insert_index, element_to_write) except ValueError as exc: raise ValueError(f"Failed to insert element '{element_name}' behind '{tag}' tag") from exc break else: #This is the construct for reaching the end of the loop without breaking try: parent.insert(0, element_to_write) except ValueError as exc: raise ValueError( f"Failed to insert element '{element_name}' at the beginning of the order") from exc elif place_index is not None: #We just try to insert the new element at the index try: parent.insert(place_index, element_to_write) except ValueError as exc: raise ValueError(f"Failed to create element '{element_name}' at the place index '{place_index}' " f"to the parent '{parent.tag}'") from exc else: #We append the node and hope nothing breaks try: parent.append(element_to_write) except ValueError as exc: raise ValueError(f"Failed to append element '{element_name}' to the parent '{parent.tag}'") from exc etree.indent(xmltree) return xmltree
def parseDrilldown(drilldownNode): defaultTarget = drilldownNode.get('target') if (drilldownNode is not None) else None results = list() fieldMap = dict() conditionsType = None implicitCondition = None if drilldownNode is not None: for drilldownNode in [node for node in drilldownNode if et.iselement(node) and type(node.tag) is str]: nodeName = drilldownNode.tag.lower() if nodeName == 'link': if conditionsType is None: conditionsType = IMPLICIT_CONDITION elif conditionsType is EXPLICT_CONDITION: raise AttributeError('Cannot mix <%s> with explicit <condition>s (line %d)' % (nodeName, drilldownNode.sourceline)) field = drilldownNode.attrib.get('field') series = drilldownNode.attrib.get('series') if not field and series: field = series if not field: field = '*' if drilldownNode.text and len(drilldownNode.text) == 0: continue action = parseDrilldownAction(drilldownNode, defaultLinkTarget=defaultTarget) if action is not None: checkField(field, fieldMap, drilldownNode) if field == '*': if implicitCondition is None: implicitCondition = Condition(field='*') results.append(implicitCondition) implicitCondition.add(action) else: results.append(Condition(field=field, action=action)) elif nodeName in ('set', 'unset'): if conditionsType is None: conditionsType = IMPLICIT_CONDITION elif conditionsType is EXPLICT_CONDITION: raise AttributeError('Cannot mix <%s> with explicit <condition>s' % nodeName) if 'field' in drilldownNode.attrib: logger.warn('Ignoring field attribute for top-level <%s> action, assuming field="*" (line %d)', nodeName, drilldownNode.sourceline) action = parseDrilldownAction(drilldownNode, defaultLinkTarget=defaultTarget) if action is not None: checkField('*', fieldMap, drilldownNode) if implicitCondition is None: implicitCondition = Condition(field='*') results.append(implicitCondition) implicitCondition.add(action) elif nodeName == 'condition': if conditionsType is None: conditionsType = EXPLICT_CONDITION elif conditionsType is IMPLICIT_CONDITION: raise AttributeError('Cannot mix <%s> with implicit conditions (line %d)' % (nodeName, drilldownNode.sourceline)) field = drilldownNode.attrib.get('field', '*') checkField(field, fieldMap, drilldownNode) condition = Condition(field=field) for node in [node for node in drilldownNode if et.iselement(node) and type(node.tag) is str]: action = parseDrilldownAction(node, defaultLinkTarget=defaultTarget) if action is not None: condition.add(action) results.append(condition) else: logger.warn('Ignoring unrecognized drilldown node "%s" (line %d)', nodeName, drilldownNode.sourceline) return results
def to_ele(x): return x if etree.iselement(x) else etree.fromstring(x.encode("UTF-8"))
def parse(self, host_properties): """ Parse out the <HostProperties> xml content or CSV line. There can be a number of <tag> entries that are either useful to us in t_hosts or other areas. These are processed and returned as dictionary entries in 'hostdata' Args: host_properties: A <HostProperties> section from .nessus or a CSV line Returns: t_hosts.id, { hostdata } """ from gluon.validators import IS_IPADDRESS hostdata = {} if etree.iselement(host_properties): for tag in host_properties.findall('tag'): hostdata[tag.get('name')] = tag.text ipaddr = hostdata.get('host-ip') else: # with CSV each line has all the hostdata fields so we set them here for use later ipaddr = host_properties.get('IP Address') if not ipaddr: # Scanner CSV, use Host ipaddr = host_properties.get('Host') hostdata['mac-address'] = host_properties.get('MAC Address', '') hostdata['host-fqdn'] = host_properties.get('DNS Name', '') hostdata['netbios-name'] = host_properties.get('NetBIOS Name', '') if (ipaddr not in self.ip_include and self.ip_include) or (ipaddr in self.ip_exclude): log("Host in exclude or not in include list, skipping") self.stats['skipped'] += 1 return None, {} host_id = get_host_record(ipaddr) if host_id and not self.update_hosts: return host_id, hostdata # new host found, pull what we need for t_hosts hostfields = {} hostfields['f_engineer'] = self.engineer hostfields['f_asset_group'] = self.asset_group hostfields['f_confirmed'] = False # check ipv4/ipv6 and set hostfields accordingly if IS_IPADDRESS(is_ipv4=True)(ipaddr)[1] is None: hostfields['f_ipv4'] = ipaddr elif IS_IPADDRESS(is_ipv6=True)(ipaddr)[1] is None: hostfields['f_ipv6'] = ipaddr else: log("Invalid IP Address in HostProperties: %s" % ipaddr, logging.ERROR) return None, {} # pull out relevant hostfields for (k, v) in hostdata.iteritems(): if k == 'mac-address': # multiple mac addrs may appear wildly, just pull the first hostfields['f_macaddr'] = v[:v.find('\n')] elif k == 'host-fqdn': hostfields['f_hostname'] = v elif k == 'netbios-name': hostfields['f_netbios_name'] = v if not self.update_hosts and not host_id: result = self.db.t_hosts.validate_and_insert(**hostfields) if not result.id: log("Error adding host to DB: %s" % result.errors, logging.ERROR) return None, {} self.stats['added'] += 1 host_id = result.id log(" [-] Adding host: %s" % ipaddr) elif self.update_hosts: if hostfields['f_ipv4']: host_id = self.db( self.db.t_hosts.f_ipv4 == hostfields['f_ipv4']).update( **hostfields) self.db.commit() host_id = get_host_record(hostfields['f_ipv4']) if host_id: host_id = host_id.id log(" [-] Updating IP: %s" % (hostfields['f_ipv4'])) else: host_id = self.db( self.db.t_hosts.f_ipv6 == hostfields['f_ipv6']).update( **hostfields) self.db.commit() host_id = get_host_record(hostfields['f_ipv6']) host_id = host_id.id log(" [-] Updating IP: %s" % (hostfields['f_ipv6'])) self.stats['updated'] += 1 return host_id, hostfields
def to_ele(x): "Convert and return the :class:`~xml.etree.ElementTree.Element` for the XML document *x*. If *x* is already an :class:`~xml.etree.ElementTree.Element` simply returns that." if sys.version < '3': return x if etree.iselement(x) else etree.fromstring(x, parser=parser) else: return x if etree.iselement(x) else etree.fromstring(x.encode('UTF-8'), parser=parser)
def as_etree(doc, root_tag='root', item_tag='item'): if etree.iselement(doc): return doc tree = etree.Element(root_tag) return append_to_element(tree, doc, item_tag)
def run_netconf(operation, device, steps, datastore, rpc_data, returns, **kwargs): """Form NETCONF message and send to testbed.""" log.debug('NETCONF MESSAGE') try: device.raise_mode = RaiseMode.NONE except NameError: log.error('Make sure you have ncclient installed in your virtual env') return False try: et.iselement('<test>') except NameError: log.error('The "lxml" library is required for NETCONF testing') return False if operation == 'capabilities': if not returns: log.error(banner('No NETCONF data to compare capability.')) return False return in_capabilities(list(device.server_capabilities), returns) rpc_verify = RpcVerify(log=log, capabilities=list(device.server_capabilities)) if not rpc_data: log.error('NETCONF message data not present') return False if not datastore: log.warning('"datastore" variables not set so choosing:\n' 'datastore:\n type: running\n lock: True\n retry: 10\n') datastore = {} ds = datastore.get('type', '') lock = datastore.get('lock', True) retry = datastore.get('retry', 10) format = kwargs.get('format', {}) auto_validate = format.get('auto-validate', True) negative_test = format.get('negative-test', False) timeout = format.get('timeout', None) pause = format.get('pause', 0) if pause: if isinstance(pause, string_types): try: sleep(int(pause)) except ValueError: try: pause = sleep(float(pause)) except ValueError: log.error('Invalid "pause" type {0}'.format(type(pause))) else: sleep(pause) if timeout: if isinstance(timeout, string_types): try: device.timeout = int(timeout) except ValueError: try: device.timeout = float(timeout) except ValueError: log.error('Invalid "timeout" type {0}'.format( type(timeout))) else: device.timeout = timeout actual_ds, ds_state = get_datastore_state(ds, rpc_verify) if not ds: log.info('USING DEVICE DATASTORE: {0}'.format(actual_ds)) ds = actual_ds else: log.info('USING TEST DATASTORE: {0}'.format(ds)) rpc_data['datastore'] = ds rpc_data['operation'] = operation if operation == 'rpc': # Custom RPC represented in raw string form result = netconf_send(device, [('rpc', { 'rpc': rpc_data['rpc'] })], ds_state, lock=lock, lock_retry=retry) else: prt_op, kwargs = gen_ncclient_rpc(rpc_data) result = netconf_send(device, [(prt_op, kwargs)], ds_state, lock=lock, lock_retry=retry) try: for op, reply in result: log.info( et.tostring(et.fromstring(reply.encode('utf-8'), parser=et.XMLParser( recover=True, encoding='utf-8')), pretty_print=True).decode('utf-8')) except Exception as exc: log.info('Pretty print failed: {0}'.format(str(exc))) # rpc-reply should show up in NETCONF log if not result: log.error(banner('NETCONF rpc-reply NOT RECIEVED')) return False errors = [] for op, res in result: if '<rpc-error>' in res: errors.append(res) elif op == 'traceback': errors.append(res) if errors: log.error(banner('NETCONF MESSAGE ERRORED')) if not negative_test: return False if rpc_data['operation'] == 'edit-config' and auto_validate: # Verify the get-config TODO: what do we do with custom rpc's? rpc_clone = deepcopy(rpc_data) rpc_clone['operation'] = 'get-config' rpc_clone['datastore'] = 'running' for node in rpc_clone.get('nodes'): node.pop('value', '') node.pop('edit-op', '') prt_op, kwargs = gen_ncclient_rpc(rpc_clone) resp_xml = netconf_send(device, [(prt_op, kwargs)], ds_state, lock=False) resp_elements = rpc_verify.process_rpc_reply(resp_xml) return rpc_verify.verify_rpc_data_reply(resp_elements, rpc_data) elif rpc_data['operation'] in ['get', 'get-config']: if not returns: log.error(banner('No NETCONF data to compare rpc-reply to.')) return False # should be just one result if len(result) >= 1: op, resp_xml = result[0] resp_elements = rpc_verify.process_rpc_reply(resp_xml) return rpc_verify.process_operational_state(resp_elements, returns) else: log.error(banner('NO XML RESPONSE')) return False elif rpc_data['operation'] == 'edit-data': # TODO: get-data return may not be relevent depending on datastore log.debug('Use "get-data" yang action to verify this "edit-data".') elif rpc_data['operation'] == 'subscribe': log.info(banner('Subscribed to {0}'.format('TODO: device name'))) return True
def run_netconf(operation, device, steps, datastore, rpc_data, returns, **kwargs): """Form NETCONF message and send to testbed.""" log.debug('NETCONF MESSAGE') result = True try: device.raise_mode = RaiseMode.NONE except NameError: log.error('Make sure you have ncclient installed in your virtual env') return False try: et.iselement('<test>') except NameError: log.error('The "lxml" library is required for NETCONF testing') return False format = kwargs.get('format', {}) if 'auto_validate' in format: auto_validate = format.get('auto_validate') else: auto_validate = format.get('auto-validate', True) if 'negative_test' in format: negative_test = format.get('negative_test') else: negative_test = format.get('negative-test', False) timeout = format.get('timeout', None) pause = _validate_pause(format.get('pause', 0)) if pause: sleep(pause) if operation == 'capabilities': if not returns: log.error(banner('No NETCONF data to compare capability.')) return False else: result = in_capabilities( list(device.server_capabilities), returns ) return negative_test != result rpc_verify = RpcVerify( log=log, capabilities=list(device.server_capabilities) ) if not rpc_data: log.error('NETCONF message data not present') return False if 'explicit' not in rpc_verify.with_defaults and \ 'report-all' in rpc_verify.with_defaults: for node in rpc_data.get('nodes', []): if node.get('edit-op', '') == 'create' and node.get('default', ''): log.info( 'Skipping CREATE; RFC 6243 "report-all" and default exists' ) return True if not datastore: log.warning('"datastore" variables not set so choosing:\n' 'datastore:\n type: running\n lock: True\n retry: 10\n') datastore = {} ds = datastore.get('type', '') lock = datastore.get('lock', True) retry = datastore.get('retry', 10) if timeout: if isinstance(timeout, string_types): try: device.timeout = int(timeout) except ValueError: try: device.timeout = float(timeout) except ValueError: log.error('Invalid "timeout" type {0}'.format(type(timeout))) else: device.timeout = timeout actual_ds, ds_state = get_datastore_state(ds, rpc_verify) if not ds: log.info('USING DEVICE DATASTORE: {0}'.format(actual_ds)) ds = actual_ds else: log.info('USING TEST DATASTORE: {0}'.format(ds)) rpc_data['datastore'] = ds rpc_data['operation'] = operation # operation may be raw rpc or well-formed lxml object if 'rpc' in rpc_data and operation in ['rpc', 'subscribe']: # Custom RPC represented in raw string form so check syntax try: et.fromstring(rpc_data['rpc']) except et.XMLSyntaxError as exc: log.error('Custom RPC has invalid XML:\n {0}:'.format(str(exc))) log.error('{0}'.format(str(rpc_data['rpc']))) log.error(banner('NETCONF FAILED')) return False result = netconf_send( device, [('rpc', {'rpc': rpc_data['rpc']})], ds_state, lock=lock, lock_retry=retry ) else: prt_op, kwargs = gen_ncclient_rpc(rpc_data) result = netconf_send( device, [(prt_op, kwargs)], ds_state, lock=lock, lock_retry=retry ) for op, reply in result: if op == 'traceback': log.error('Failed to send using NETCONF') break # rpc-reply should show up in NETCONF log if not result: log.error(banner('NETCONF rpc-reply NOT RECIEVED')) return False errors = [] for op, res in result: if '<rpc-error>' in res: log.error( et.tostring( et.fromstring( res.encode('utf-8'), parser=et.XMLParser( recover=True, encoding='utf-8') ), pretty_print=True ).decode('utf-8') ) errors.append(res) elif op == 'traceback': log.error('TRACEBACK: {0}'.format(str(res))) errors.append(res) if errors: return negative_test != False if rpc_data['operation'] == 'edit-config' and auto_validate: # Verify custom rpc's with a follow-up action. if pause: sleep(pause) rpc_clone = deepcopy(rpc_data) rpc_clone['operation'] = 'get-config' rpc_clone['datastore'] = 'running' for node in rpc_clone.get('nodes'): node.pop('value', '') node.pop('edit-op', '') prt_op, kwargs = gen_ncclient_rpc(rpc_clone) resp_xml = netconf_send( device, [(prt_op, kwargs)], ds_state, lock=False ) resp_elements = rpc_verify.process_rpc_reply(resp_xml) result = rpc_verify.verify_rpc_data_reply(resp_elements, rpc_data) return negative_test != result elif rpc_data['operation'] in ['get', 'get-config']: if not returns: log.error(banner('No NETCONF data to compare rpc-reply to.')) return False # should be just one result if len(result) >= 1: op, resp_xml = result[0] resp_elements = rpc_verify.process_rpc_reply(resp_xml) verify_result = rpc_verify.process_operational_state( resp_elements, returns ) return negative_test != verify_result else: log.error(banner('NO XML RESPONSE')) return False elif rpc_data['operation'] == 'edit-data': # TODO: get-data return may not be relevent depending on datastore log.debug('Use "get-data" yang action to verify this "edit-data".') elif rpc_data['operation'] == 'subscribe': # check if subscription id exists in rpc reply, subscribe to device if subscription id is found for op, res in result: if '</subscription-id>' in res or 'rpc-reply' in res and '<ok/>' in res: rpc_data['decode'] = rpc_verify.process_rpc_reply rpc_data['verifier'] = rpc_verify.process_operational_state rpc_data['format'] = format rpc_data['returns'] = returns device.subscribe(rpc_data) break else: log.error(banner('SUBSCRIPTION FAILED')) return negative_test != False return negative_test != True