def testRecordField(self, lfieldName, lfieldInRef, srefData, srunData, bVisual): """ test fieldName in record """ assert len(lfieldName) == len((lfieldInRef)) for i, f in enumerate(lfieldName): if lfieldInRef[i] is None: lfieldInRef[i] = f cntOk = cntErr = cntMissed = 0 # srefData = srefData.decode('utf-8') #.strip("\n") RefData = etree.XML(srefData.strip("\n").encode('utf-8')) RunData = etree.XML(srunData.strip("\n").encode('utf-8')) lPages = RefData.xpath('//%s' % ('PAGE[@number]')) lRefKeys = {} for page in lPages: pnum = page.get('number') key = page.get('pagenum') xpath = "./%s" % ("RECORD") lrecord = page.xpath(xpath) if len(lrecord) == 0: pass else: for record in lrecord: lf = [] for fieldInRef in lfieldInRef: xpath = "./%s" % ("./@%s" % fieldInRef) ln = record.xpath(xpath) if ln and len(ln[0]) > 0: lf.append(ln[0]) if lf != []: try: # if (pnum,key,lf) in lRefKeys[key]: # print ('duplicated',(pnum,key,lf)) # else: # lRefKeys[key].append((pnum,key,lf)) lRefKeys[key].append((pnum, key, lf)) except KeyError: lRefKeys[key] = [(pnum, key, lf)] lRunPerPage = {} lPageMapping = {} if RunData: lpages = RunData.xpath('//%s' % ('PAGE[@number]')) for page in lpages: pnum = page.get('number') key = page.get('pagenum') lPageMapping[key] = pnum if key in lRefKeys: #record level! xpath = "./%s" % ("RECORD") lrecord = page.xpath(xpath) if len(lrecord) == 0: pass # lRun.append([]) else: for record in lrecord: lf = [] for fieldName in lfieldName: xpath = "./%s" % ("./@%s" % fieldName) ln = record.xpath(xpath) if len(ln) > 0 and len(ln[0]) > 0: lf.append(ln[0]) if len(lf) == len(lfieldName): try: lRunPerPage[key].append((pnum, key, lf)) except KeyError: lRunPerPage[key] = [(pnum, key, lf)] ltisRefsRunbErrbMiss = list() for key in lRunPerPage: # for key in ['Neuoetting_008_03_0032']: lRun = lRunPerPage[key] lRef = lRefKeys[key] runLen = len(lRunPerPage[key]) refLen = len(lRefKeys[key]) bT = False if refLen <= runLen: rows = lRef cols = lRun else: rows = lRun cols = lRef bT = True cost_matrix = np.zeros((len(rows), len(cols)), dtype=float) for a, i in enumerate(rows): curRef = i for b, j in enumerate(cols): runElt = j ret, val = self.testCompareRecordField(curRef, runElt) dist = 100 - val cost_matrix[a, b] = dist # print (curRef,runElt,val,dist) m = linear_sum_assignment(cost_matrix) r1, r2 = m if False: print(len(lRef), lRef) print(len(lRun), lRun) print(bT, r1, r2) lcsTH = self.lcsTH lCovered = [] lMatched = [] for a, i in enumerate(r2): # print (key,a,r1[a],i,rows[r1[a]][2],cols[i][2], 100-cost_matrix[r1[a],i]) if 100 - cost_matrix[r1[a, ], i] > lcsTH: cntOk += 1 if bT: ltisRefsRunbErrbMiss.append( (rows[r1[a]][1], int(rows[r1[a]][0]), cols[i][2], rows[r1[a]][2], False, False)) lMatched.append(i) else: ltisRefsRunbErrbMiss.append( (cols[i][1], int(cols[i][0]), rows[r1[a]][2], cols[i][2], False, False)) lMatched.append(r1[a]) else: #too distant: false if bT: lCovered.append(i) ltisRefsRunbErrbMiss.append( (rows[r1[a]][1], int(rows[r1[a]][0]), "", rows[r1[a]][2], True, False)) else: lCovered.append(r1[a]) ltisRefsRunbErrbMiss.append( (cols[i][1], int(cols[i][0]), "", cols[i][2], True, False)) cntErr += 1 # print ('matched',lMatched) for i, iref in enumerate(lRef): if i not in lMatched: # print ('not mathced',i,iref) ltisRefsRunbErrbMiss.append( (lRef[i][1], int(lPageMapping[lRef[i][1]]), lRef[i][2], '', False, True)) cntMissed += 1 # else:print('machtg!',i,lRef[i]) ltisRefsRunbErrbMiss.sort(key=lambda x: x[0]) # for x in ltisRefsRunbErrbMiss: # print (x) return (cntOk, cntErr, cntMissed, ltisRefsRunbErrbMiss)
from ckan.plugins.toolkit import url_for from ckan.logic.auth.get import package_show import ckan.model as model import ckan.lib.create_test_data as ctd import ckanext.datastore.backend.postgres as db from ckanext.datastore.tests.helpers import set_url_type from ckan.tests import helpers, factories import ckanext.sitemap.tests.testdata as testdata import ckanext.sitemap.plugin as plugin log = logging.getLogger(__file__) siteschema = etree.XMLSchema(etree.XML(str.encode(testdata.sitemap))) class TestSiteMap: sysadmin_user = None normal_user = None @pytest.fixture(autouse=True) def initial_data(self, clean_db, clean_index, test_request_context, app): ctd.CreateTestData.create() self.sysadmin_user = model.User.get("testsysadmin") self.normal_user = model.User.get("annafan") engine = db.get_write_engine() self.Session = orm.scoped_session(orm.sessionmaker(bind=engine)) with test_request_context(): set_url_type(
<state>online</state> <size-total>0</size-total> <size-used>0</size-used> <size-available>0</size-available> <is-inconsistent>false</is-inconsistent> <is-invalid>false</is-invalid> </volume-info>""" FAKE_XML1 = b"""<options>\ <test1>abc</test1>\ <test2>abc</test2>\ </options>""" FAKE_XML2 = b"""<root><options>somecontent</options></root>""" FAKE_NA_ELEMENT = netapp_api.NaElement(etree.XML(FAKE_VOL_XML)) FAKE_INVOKE_DATA = 'somecontent' FAKE_XML_STR = 'abc' FAKE_API_NAME = 'volume-get-iter' FAKE_API_NAME_ELEMENT = netapp_api.NaElement(FAKE_API_NAME) FAKE_NA_SERVER_STR = '127.0.0.1' FAKE_NA_SERVER = netapp_api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_5 = netapp_api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_5.set_vfiler('filer')
def get_capabilities(request, layerid=None, user=None, mapid=None, category=None, tolerant=False): """ Compile a GetCapabilities document containing public layers filtered by layer, user, map, or category """ rootdoc = None layers = None cap_name = ' Capabilities - ' if layerid is not None: dataset_obj = Dataset.objects.get(id=layerid) cap_name += dataset_obj.title layers = Dataset.objects.filter(id=layerid) elif user is not None: layers = Dataset.objects.filter(owner__username=user) cap_name += user elif category is not None: layers = Dataset.objects.filter(category__identifier=category) cap_name += category elif mapid is not None: map_obj = Map.objects.get(id=mapid) cap_name += map_obj.title alternates = [] for layer in map_obj.maplayers.iterator(): if layer.local: alternates.append(layer.name) layers = Dataset.objects.filter(alternate__in=alternates) for layer in layers: if request.user.has_perm('view_resourcebase', layer.get_self_resource()): access_token = get_or_create_token(request.user) if access_token and not access_token.is_expired(): access_token = access_token.token else: access_token = None try: workspace, layername = layer.alternate.split(":") if ":" in layer.alternate else (None, layer.alternate) layercap = get_dataset_capabilities(layer, access_token=access_token, tolerant=tolerant) if layercap is not None: # 1st one, seed with real GetCapabilities doc try: namespaces = {'wms': 'http://www.opengis.net/wms', 'xlink': 'http://www.w3.org/1999/xlink', 'xsi': 'http://www.w3.org/2001/XMLSchema-instance'} layercap = dlxml.fromstring(layercap) rootdoc = etree.ElementTree(layercap) format_online_resource(workspace, layername, rootdoc, namespaces) service_name = rootdoc.find('.//wms:Service/wms:Name', namespaces) if service_name is not None: service_name.text = cap_name rootdoc = rootdoc.find('.//wms:Capability/wms:Layer/wms:Layer', namespaces) except Exception as e: import traceback traceback.print_exc() logger.error( f"Error occurred creating GetCapabilities for {layer.typename}: {str(e)}") rootdoc = None if layercap is None or not len(layercap) or rootdoc is None or not len(rootdoc): # Get the required info from layer model # TODO: store time dimension on DB also tpl = get_template("geoserver/layer.xml") ctx = { 'layer': layer, 'geoserver_public_url': ogc_server_settings.public_url, 'catalogue_url': settings.CATALOGUE['default']['URL'], } gc_str = tpl.render(ctx) gc_str = gc_str.encode("utf-8", "replace") layerelem = etree.XML(gc_str) rootdoc = etree.ElementTree(layerelem) except Exception as e: import traceback traceback.print_exc() logger.error( f"Error occurred creating GetCapabilities for {layer.typename}:{str(e)}") rootdoc = None if rootdoc is not None: capabilities = etree.tostring( rootdoc, xml_declaration=True, encoding='UTF-8', pretty_print=True) return HttpResponse(capabilities, content_type="text/xml") return HttpResponse(status=200)
def response(resp): results = [] search_results = etree.XML(resp.content) # return empty array if there are no results if search_results.xpath(failure_xpath): return [] try: infobox_title = search_results.xpath(input_xpath)[0].text except: infobox_title = "" pods = search_results.xpath(pods_xpath) result_chunks = [] result_content = "" for pod in pods: pod_id = pod.xpath(pod_id_xpath)[0] pod_title = pod.xpath(pod_title_xpath)[0] pod_is_result = pod.xpath(pod_primary_xpath) subpods = pod.xpath(subpods_xpath) if not subpods: continue # Appends either a text or an image, depending on which one is more suitable for subpod in subpods: content = subpod.xpath(plaintext_xpath)[0].text image = subpod.xpath(image_xpath) if content and pod_id not in image_pods: if pod_is_result or not result_content: if pod_id != "Input": result_content = "%s: %s" % (pod_title, content) # if no input pod was found, title is first plaintext pod if not infobox_title: infobox_title = content content = replace_pua_chars(content) result_chunks.append({'label': pod_title, 'value': content}) elif image: result_chunks.append({'label': pod_title, 'image': {'src': image[0].xpath(img_src_xpath)[0], 'alt': image[0].xpath(img_alt_xpath)[0]}}) if not result_chunks: return [] title = "Wolfram|Alpha (%s)" % infobox_title # append infobox results.append({'infobox': infobox_title, 'attributes': result_chunks, 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]}) # append link to site results.append({'url': resp.request.headers['Referer'], 'title': title, 'content': result_content}) return results
class XMLIngestor(Ingestor, EncodingSupport, XMLSupport, HTMLSupport): "XML file ingestor class. Generates a tabular HTML representation." MIME_TYPES = ['text/xml'] EXTENSIONS = ['xml'] SCORE = 1 MAX_SIZE = 4 * 1024 * 1024 XSLT = etree.XML(b"""<?xml version="1.0" encoding="UTF-8"?> <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:output omit-xml-declaration="yes" indent="yes"/> <xsl:strip-space elements="*"/> <xsl:template match="/"> <table> <xsl:apply-templates/> </table> </xsl:template> <xsl:template match="*"> <tr> <td> <p><xsl:value-of select="name()"/></p> </td> <td> <p><xsl:value-of select="."/></p> </td> </tr> </xsl:template> <xsl:template match="*[*]"> <tr> <td> <p><xsl:value-of select="name()"/></p> </td> <td> <table> <xsl:apply-templates/> </table> </td> </tr> </xsl:template> </xsl:stylesheet>""") def ingest(self, file_path, entity): """Ingestor implementation.""" entity.schema = model.get('HyperText') for file_size in entity.get('fileSize'): if int(file_size) > self.MAX_SIZE: raise ProcessingException("XML file is too large.") doc = self.parse_xml_path(file_path) text = self.extract_html_text(doc.getroot()) entity.set('bodyText', text) try: transform = etree.XSLT(self.XSLT) html_doc = transform(doc) html_body = html.tostring(html_doc, encoding=str, pretty_print=True) entity.set('bodyHtml', html_body) except ValueError as ve: raise ProcessingException("Error converting XML file: %s" % ve) from ve
def test_get_igroup_by_initiators_multiple_pages(self): initiator = '11:22:33:44:55:66:77:88' expected_igroup1 = { 'initiator-group-os-type': 'default', 'initiator-group-type': 'fcp', 'initiator-group-name': 'openstack-igroup1', } expected_igroup2 = { 'initiator-group-os-type': 'default', 'initiator-group-type': 'fcp', 'initiator-group-name': 'openstack-igroup2', } response_1 = netapp_api.NaElement( etree.XML("""<results status="passed"> <attributes-list> <initiator-group-info> <initiator-group-alua-enabled>true</initiator-group-alua-enabled> <initiator-group-name>%(initiator-group-name)s</initiator-group-name> <initiator-group-os-type>default</initiator-group-os-type> <initiator-group-throttle-borrow>false</initiator-group-throttle-borrow> <initiator-group-throttle-reserve>0</initiator-group-throttle-reserve> <initiator-group-type>%(initiator-group-type)s</initiator-group-type> <initiator-group-use-partner>true</initiator-group-use-partner> <initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412 </initiator-group-uuid> <initiator-group-vsa-enabled>false</initiator-group-vsa-enabled> <initiators> <initiator-info> <initiator-name>11:22:33:44:55:66:77:88</initiator-name> </initiator-info> </initiators> <vserver>cinder-iscsi</vserver> </initiator-group-info> </attributes-list> <next-tag>12345</next-tag> <num-records>1</num-records> </results>""" % expected_igroup1)) response_2 = netapp_api.NaElement( etree.XML("""<results status="passed"> <attributes-list> <initiator-group-info> <initiator-group-alua-enabled>true</initiator-group-alua-enabled> <initiator-group-name>%(initiator-group-name)s</initiator-group-name> <initiator-group-os-type>default</initiator-group-os-type> <initiator-group-throttle-borrow>false</initiator-group-throttle-borrow> <initiator-group-throttle-reserve>0</initiator-group-throttle-reserve> <initiator-group-type>%(initiator-group-type)s</initiator-group-type> <initiator-group-use-partner>true</initiator-group-use-partner> <initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412 </initiator-group-uuid> <initiator-group-vsa-enabled>false</initiator-group-vsa-enabled> <initiators> <initiator-info> <initiator-name>11:22:33:44:55:66:77:88</initiator-name> </initiator-info> </initiators> <vserver>cinder-iscsi</vserver> </initiator-group-info> </attributes-list> <num-records>1</num-records> </results>""" % expected_igroup2)) self.connection.invoke_successfully.side_effect = [ response_1, response_2 ] igroups = self.client.get_igroup_by_initiators([initiator]) # make these lists of dicts comparable using hashable dictionaries igroups = set( [netapp_utils.hashabledict(igroup) for igroup in igroups]) expected = set([ netapp_utils.hashabledict(expected_igroup1), netapp_utils.hashabledict(expected_igroup2) ]) self.assertSetEqual(igroups, expected)
xsltTransform = etree.XSLT( etree.XML('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:strip-space elements="*"/> <xsl:template match="charlist"> <root><xsl:apply-templates select="character"/></root> </xsl:template> <xsl:template match="character"> <xsl:if test="surrogate"> <entry> <xsl:attribute name="mathvariant"> <xsl:value-of select="surrogate/@mathvariant"/> </xsl:attribute> <xsl:attribute name="baseChar"> <xsl:value-of select="surrogate/@ref"/> </xsl:attribute> <xsl:attribute name="transformedChar"> <xsl:choose> <xsl:when test="bmp"> <xsl:value-of select="bmp/@ref"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="@id"/> </xsl:otherwise> </xsl:choose> </xsl:attribute> </entry> </xsl:if> </xsl:template> </xsl:stylesheet>'''))
def xml_doc_to_dict(xmlstring_doc): doc_tree = etree.XML(xmlstring_doc) doc_tree_t = xslt_transformer(doc_tree) return doc_tree_to_dict(doc_tree_t)
def csv2tbx(lines, lang, subjectField, id_prefix, ontology_name=None, ontology_link=None): parser = etree.XMLParser(remove_blank_text=True) root_xml = \ '''\ <martif type="TBX-Default" xml:lang="en"> <martifHeader> <fileDesc> <sourceDesc> <p>This is a TBX file generated via .... Address any enquiries to ....</p> </sourceDesc> </fileDesc> <encodingDesc> <p type="XCSURI">TBXXCS.xcs</p> </encodingDesc> </martifHeader> </martif> ''' # -> to verify xcs root = etree.XML(root_xml, parser) text_struct = etree.SubElement(root, "text") body_struct = etree.SubElement(text_struct, "body") csv_reader = pd.read_csv(lines, delimiter=';') for n, row in csv_reader.iterrows(): if len(row) != 9: error_msg = "Error in input CSV: line {} has {} fields instead of 9".format( n, len(row)) raise CsvFormatError(error_msg) # row = [x.strip() for x in row if type(x) == str] field_1_term = row[0] field_2_pos_upper = row[1].upper() if type(row[2]) != float: field_3_mw_pos_upper = row[2].upper() else: field_3_mw_pos_upper = '' if type(row[3]) != float: field_4_flex_lower = row[3].lower() else: field_4_flex_lower = '' if type(row[4]) != float: field_5_variants = [ x.strip() for x in row[4].split(',') if type(x) == str ] else: field_5_variants = '' if type(row[5]) != float: field_6_synonyms = [ x.strip() for x in row[5].split(',') if type(x) == str ] else: field_6_synonyms = '' if type(row[6]) != float: field_7_desc = row[6] else: field_7_descr = '' if type(row[7]) != float: field_8_hyper = [ x.strip() for x in row[7].split(',') if type(x) == str ] else: field_8_hyper = '' if type(row[8]) != float: field_9_class = row[8] else: field_9_class = '' use_ntig = field_3_mw_pos_upper pos_full = POS_MAPPING.get(field_2_pos_upper, POS_MAPPING_OTHER) gender_full, number_full = None, None if field_4_flex_lower: if len(field_4_flex_lower) != 4: error_msg = "Error in input CSV: line {} has error in field 4 ({}): \ inflection should be 4 characters long (e.g., ms+-)".format( n, field_4_flex_lower) raise CsvFormatError(error_msg) if field_4_flex_lower[0] not in ['m', 'f']: error_msg = "Error in input CSV: line {} has error in field 4 ({}): \ first character should be m (masculine) or f (feminine)".format( n, field_4_flex_lower) raise CsvFormatError(error_msg) if field_4_flex_lower[1] not in ['s', 'p']: error_msg = "Error in input CSV: line {} has error in field 4 ({}): \ second character should be s (singular) or p (plural)".format( n, field_4_flex_lower) raise CsvFormatError(error_msg) gender_full = 'masculine' if field_4_flex_lower[ 0] == 'm' else 'feminine' number_full = 'singular' if field_4_flex_lower[ 1] == 's' else 'plural' # term is multi word if first field has spaces and third field (interal structure) is present use_ntig = ' ' in field_1_term and field_3_mw_pos_upper id_str = "{}_{}".format(id_prefix, n + 1) # <termEntry id="RA_286"> termEntry_struct = etree.SubElement(body_struct, "termEntry", id=id_str) descripGrp_struct = etree.SubElement(termEntry_struct, "descripGrp") # <descrip type="subjectField">Archaeology</descrip> etree.SubElement(descripGrp_struct, "descrip", type="subjectField").text = subjectField if field_7_desc: # <descrip type="definition">Definition Text</descrip> etree.SubElement(descripGrp_struct, "descrip", type="definition").text = field_7_desc # <xref type="URI" target= "http://www.cidoc-crm.org/cidoc-crm">CIDOC CRM Ontology</xref> if ontology_name: etree.SubElement(descripGrp_struct, "xref", type="URI", target=ontology_link).text = '{} {}'.format( ontology_name, 'Ontology') # <langSet xml:lang="it"> langSet_struct = etree.SubElement(termEntry_struct, "langSet") #attrib={'xml:lang':lang} attr = langSet_struct.attrib attr['{http://www.w3.org/XML/1998/namespace}lang'] = lang ntig_tig_struct_tag = "ntig" if use_ntig else "tig" ntig_tig_struct = etree.SubElement(langSet_struct, ntig_tig_struct_tag) if use_ntig: field_1_words = row[0].split() # lista di parole pos_internal = [ POS_MAPPING.get(x, POS_MAPPING_OTHER) for x in field_3_mw_pos_upper ] termGrp_struct = etree.SubElement(ntig_tig_struct, "termGrp") parent_node = termGrp_struct else: parent_node = ntig_tig_struct # <term>dinos con anse ad anello</term> etree.SubElement(parent_node, 'term').text = row[0] # <termNote type="termType">fullForm</termNote> etree.SubElement(parent_node, 'termNote', type="termType").text = 'fullForm' # <termNote type="partOfSpeech">noun</termNote> etree.SubElement(parent_node, 'termNote', type="partOfSpeech").text = pos_full if gender_full and number_full: # <termNote type="grammaticalGender">masculine</termNote> etree.SubElement(parent_node, 'termNote', type="grammaticalGender").text = gender_full # <termNote type="grammaticalNumber">singular</termNote> etree.SubElement(parent_node, 'termNote', type="grammaticalNumber").text = number_full # <xref type="URI" target="http://www.cidoc-crm.org/cidoc-crm/E22">CIDOC CRM Class</xref> if ontology_link and ontology_name: ontology_link_term = '{}/{}'.format(ontology_link, field_9_class) etree.SubElement(parent_node, "xref", type="URI", target=ontology_link_term).text = '{} {}'.format( ontology_name, 'Class') if use_ntig: # <termCompList type="lemma"> termCompList_struct = etree.SubElement(parent_node, "termCompList", type="lemma") for i, word in enumerate(field_1_words): termCompGrp_struct = etree.SubElement(termCompList_struct, "termCompGrp") # <termComp>dinos</termComp> etree.SubElement(termCompGrp_struct, "termComp").text = word # <termNote type="partOfSpeech">noun</termNote> etree.SubElement(termCompGrp_struct, "termNote", type="partOfSpeech").text = pos_internal[i] for n, v in enumerate(field_5_variants, 1): # <termNote type="variant01">déinos con anse ad anello</termNote> v_num = str(n).zfill(2) etree.SubElement(parent_node, "termNote", type="variant{}".format(v_num)).text = v for n, s in enumerate(field_6_synonyms, 1): # <termNote type="variant01">déinos con anse ad anello</termNote> s_num = str(n).zfill(2) etree.SubElement(parent_node, "termNote", type="synonym{}".format(s_num)).text = s for n, h in enumerate(field_8_hyper, 1): # <termNote type="hypernyms01">Contenitori e recipienti</termNote> h_num = str(n).zfill(2) etree.SubElement(parent_node, "termNote", type="hypernyms{}".format(h_num)).text = h tbx_string = etree.tostring( root, pretty_print=True, xml_declaration=True, doctype='<!DOCTYPE martif SYSTEM "TBXcoreStructV02.dtd">', # --> verify encoding='UTF-8') return tbx_string
def get_config(self, filter_xml=None, options={}, model=None, namespace=None, remove_ns=True, **kwargs): """ retrieve configuration from the Junos device .. code-block:: python dev.rpc.get_config() dev.rpc.get_config(filter_xml='<system><services/></system>') dev.rpc.get_config(filter_xml='system/services') dev.rpc.get_config( filter_xml=etree.XML('<system><services/></system>'), options={'format': 'json'}) # to fetch junos as well as yang model configs dev.rpc.get_config(model=True) # openconfig yang example dev.rpc.get_config(filter_xml='bgp', model='openconfig') dev.rpc.get_config(filter_xml='<bgp><neighbors></neighbors></bgp>', model='openconfig') # custom yang example dev.rpc.get_config(filter_xml='l2vpn', model='custom', namespace="http://yang.juniper.net/customyang/l2vpn") # ietf yang example dev.rpc.get_config(filter_xml='interfaces', model='ietf') :filter_xml: fully XML formatted tag which defines what to retrieve, when omitted the entire configuration is returned; the following returns the device host-name configured with "set system host-name" .. code-block:: python config = dev.rpc.get_config(filter_xml=etree.XML(''' <configuration> <system> <host-name/> </system> </configuration>''')) :options: is a dictionary of XML attributes to set within the <get-configuration> RPC; the following returns the device host-name either configured with "set system host-name" and if unconfigured, the value inherited from apply-group re0|re1, typical for multi-RE systems .. code-block:: python config = dev.rpc.get_config(filter_xml=etree.XML(''' <configuration> <system> <host-name/> </system> </configuration>'''), options={'database':'committed','inherit':'inherit'}) :param str model: Can provide yang model openconfig/custom/ietf. When model is True and filter_xml is None, xml is enclosed under <data> so that we get junos as well as other model configurations :param str namespace: User can have their own defined namespace in the custom yang models, In such cases they need to provide that namespace so that it can be used to fetch yang modeled configs :param bool remove_ns: remove namespaces, if value assigned is False, function will return xml with namespaces. The same xml returned can be loaded back to devices. This comes handy in case of yang based configs .. code-block:: python dev.rpc.get_config(filter_xml='bgp', model='openconfig', remove_ns=False) """ nmspaces = { 'openconfig': "http://openconfig.net/yang/", 'ietf': "urn:ietf:params:xml:ns:yang:ietf-" } rpc = E('get-configuration', options) if filter_xml is not None: if not isinstance(filter_xml, etree._Element): if re.search("^<.*>$", filter_xml): filter_xml = etree.XML(filter_xml) else: filter_data = None for tag in filter_xml.split('/')[::-1]: filter_data = E(tag) if filter_data is None else E( tag, filter_data) filter_xml = filter_data # wrap the provided filter with toplevel <configuration> if # it does not already have one (not in case of yang model config) if (filter_xml.tag != 'configuration' and model is None and namespace is None): etree.SubElement(rpc, 'configuration').append(filter_xml) else: if model is not None or namespace is not None: if model == 'custom' and namespace is None: raise AttributeError('For "custom" model, ' 'explicitly provide "namespace"') ns = namespace or (nmspaces.get(model.lower()) + filter_xml.tag) filter_xml.attrib['xmlns'] = ns rpc.append(filter_xml) transform = self._junos.transform if remove_ns is False: self._junos.transform = lambda: JXML.strip_namespaces_prefix try: response = self._junos.execute(rpc, **kwargs) finally: self._junos.transform = transform # in case of model provided top level should be data # return response if model and filter_xml is None and options.get('format') \ is not 'json': response = response.getparent() response.tag = 'data' return response
def test_bad_date(self): xml = re.sub(r'2018-05-15T14:02:08Z', 'Nevermore', self.feed_xml) feed_elem = etree.XML(xml.encode('utf-8')) with self.assertRaisesRegex(ValueError, r'Unknown string format'): get_timestamp(feed_elem)
def test_no_node(self): xml = re.sub(r'<updated.*</updated>', '', self.feed_xml) feed_elem = etree.XML(xml.encode('utf-8')) with self.assertRaisesRegex(ValueError, r'^XPath "./atom:updated" not found$'): get_timestamp(feed_elem)
def test_success(self): feed_elem = etree.XML(self.feed_xml.encode('utf-8')) entry_elem = next(e for e in feed_elem if e.tag.endswith('entry')) patient_uuid = get_patient_uuid(entry_elem) self.assertEqual(patient_uuid, 'e8aa08f6-86cd-42f9-8924-1b3ea021aeb4')
def execute(self, rpc_cmd, **kvargs): """ Executes an XML RPC and returns results as either XML or native python :param rpc_cmd: can either be an XML Element or xml-as-string. In either case the command starts with the specific command element, i.e., not the <rpc> element itself :param func to_py': Is a caller provided function that takes the response and will convert the results to native python types. all kvargs will be passed to this function as well in the form:: to_py( self, rpc_rsp, **kvargs ) :raises ValueError: When the **rpc_cmd** is of unknown origin :raises PermissionError: When the requested RPC command is not allowed due to user-auth class privilege controls on Junos :raises RpcError: When an ``rpc-error`` element is contained in the RPC-reply :returns: RPC-reply as XML object. If **to_py** is provided, then that function is called, and return of that function is provided back to the caller; presumably to convert the XML to native python data-types (e.g. ``dict``). """ if self.connected is not True: raise EzErrors.ConnectClosedError(self) if isinstance(rpc_cmd, str): rpc_cmd_e = etree.XML(rpc_cmd) elif isinstance(rpc_cmd, etree._Element): rpc_cmd_e = rpc_cmd else: raise ValueError("Dont know what to do with rpc of type %s" % rpc_cmd.__class__.__name__) # invoking a bad RPC will cause a connection object exception # will will be raised directly to the caller ... for now ... # @@@ need to trap this and re-raise accordingly. try: rpc_rsp_e = self._conn.rpc(rpc_cmd_e)._NCElement__doc except NcOpErrors.TimeoutExpiredError: # err is a TimeoutExpiredError from ncclient, # which has no such attribute as xml. raise EzErrors.RpcTimeoutError(self, rpc_cmd_e.tag, self.timeout) except NcErrors.TransportError: raise EzErrors.ConnectClosedError(self) except RPCError as err: # err is an NCError from ncclient rsp = JXML.remove_namespaces(err.xml) # see if this is a permission error e = EzErrors.PermissionError if rsp.findtext( 'error-message') == 'permission denied' else EzErrors.RpcError raise e(cmd=rpc_cmd_e, rsp=rsp) # Something unexpected happened - raise it up except Exception as err: warnings.warn("An unknown exception occured - please report.", RuntimeWarning) raise # From 14.2 onward, junos supports JSON, so now code can be written as # dev.rpc.get_route_engine_information({'format': 'json'}) if rpc_cmd_e.attrib.get('format') in ['json', 'JSON']: if self._facts == {}: self.facts_refresh() ver_info = self._facts['version_info'] if ver_info.major[0] >= 15 or \ (ver_info.major[0] == 14 and ver_info.major[1] >= 2): return json.loads(rpc_rsp_e.text) else: warnings.warn("Native JSON support is only from 14.2 onwards", RuntimeWarning) # This section is here for the possible use of something other than ncclient # for RPCs that have embedded rpc-errors, need to check for those now # rpc_errs = rpc_rsp_e.xpath('.//rpc-error') # if len(rpc_errs): # raise EzErrors.RpcError(cmd=rpc_cmd_e, rsp=rpc_errs[0]) # skip the <rpc-reply> element and pass the caller first child element # generally speaking this is what they really want. If they want to # uplevel they can always call the getparent() method on it. try: ret_rpc_rsp = rpc_rsp_e[0] except IndexError: # no children, so assume it means we are OK return True # if the caller provided a "to Python" conversion function, then invoke # that now and return the results of that function. otherwise just # return the RPC results as XML if kvargs.get('to_py'): return kvargs['to_py'](self, ret_rpc_rsp, **kvargs) else: return ret_rpc_rsp
def test_empty_path(self): converter = svglib.Svg2RlgShapeConverter(None) node = svglib.NodeTracker(etree.XML('<path id="W"/>')) group = converter.convertPath(node) assert group is None
def test_serialize_extensions(self): serializer = extensions.ExtensionsXMLSerializer() data = { "extensions": [{ "name": "Public Image Extension", "namespace": "http://foo.com/api/ext/pie/v1.0", "alias": "RS-PIE", "updated": "2011-01-22T13:25:27-06:00", "description": "Adds the capability to share an image.", "links": [{ "rel": "describedby", "type": "application/pdf", "type": "application/vnd.sun.wadl+xml", "href": "http://foo.com/api/ext/cs-pie.pdf" }, { "rel": "describedby", "type": "application/vnd.sun.wadl+xml", "href": "http://foo.com/api/ext/cs-pie.wadl" }] }, { "name": "Cloud Block Storage", "namespace": "http://foo.com/api/ext/cbs/v1.0", "alias": "RS-CBS", "updated": "2011-01-12T11:22:33-06:00", "description": "Allows mounting cloud block storage.", "links": [{ "rel": "describedby", "type": "application/pdf", "href": "http://foo.com/api/ext/cs-cbs.pdf" }, { "rel": "describedby", "type": "application/vnd.sun.wadl+xml", "href": "http://foo.com/api/ext/cs-cbs.wadl" }] }] } xml = serializer.serialize(data, 'index') print xml root = etree.XML(xml) ext_elems = root.findall('{0}extension'.format(NS)) self.assertEqual(len(ext_elems), 2) for i, ext_elem in enumerate(ext_elems): ext_dict = data['extensions'][i] self.assertEqual(ext_elem.findtext('{0}description'.format(NS)), ext_dict['description']) for key in ['name', 'namespace', 'alias', 'updated']: self.assertEqual(ext_elem.get(key), ext_dict[key]) link_nodes = ext_elem.findall('{0}link'.format(ATOMNS)) self.assertEqual(len(link_nodes), 2) for i, link in enumerate(ext_dict['links']): for key, value in link.items(): self.assertEqual(link_nodes[i].get(key), value) xmlutil.validate_schema(root, 'extensions')
def test_fillrule(self): converter = svglib.Svg2RlgShapeConverter(None) node = etree.XML('<polygon fill-rule="evenodd"/>') poly = Polygon() converter.applyStyleOnShape(poly, node) assert poly._fillRule == FILL_EVEN_ODD
fh = file('cidoc.xml') data = fh.read() fh.close() NS = { 'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#", 'xsd': "http://www.w3.org/2001/XMLSchema#", 'rdfs': "http://www.w3.org/2000/01/rdf-schema#", 'dcterms': "http://purl.org/dc/terms/", 'owl': "http://www.w3.org/2002/07/owl#", 'crm': "http://www.cidoc-crm.org/cidoc-crm/", 'xml': "http://www.w3.org/XML/1998/namespace" } dom = etree.XML(data) stuff = [] names = [] props = dom.xpath("//rdf:Property", namespaces=NS) for p in props: name = p.xpath('@rdf:about', namespaces=NS)[0] names.append(name) for p in props: name = p.xpath('@rdf:about', namespaces=NS)[0] fu = name.find('_') pid = name[:fu] if pid[-1] == "i": pid = pid[:-1]
def assertXMLEqual(self, s1, s2, entity=None): """Assert that the two XML fragments are equal, tolerating the following variations: * whitespace outside of element content and attribute values. * order of attributes. * order of certain child elements (see `sort_elements` in this function). Parameters: * s1 and s2 are string representations of an XML fragment. The strings may be Unicode strings or UTF-8 encoded byte strings. The strings may contain an encoding declaration even when they are Unicode strings. Note: An encoding declaration is the `encoding` attribute in the XML declaration (aka XML processing statement), e.g.: <?xml version="1.0" encoding="utf-8" ?> """ # Ensure Unicode strings and remove encoding from XML declaration encoding_pattern = re.compile( r'^<\?xml +(([a-zA-Z0-9_]+=".*")?) +' + r'encoding="utf-8" +(([a-zA-Z0-9_]+=".*")?) *\?>') encoding_repl = r'<?xml \1 \3 ?>' s1 = re.sub(encoding_pattern, encoding_repl, _ensure_unicode(s1)) s2 = re.sub(encoding_pattern, encoding_repl, _ensure_unicode(s2)) parser = etree.XMLParser(remove_blank_text=True) x1 = etree.XML(s1, parser=parser) x2 = etree.XML(s2, parser=parser) # Sort certain elements def sort_children(root, sort_elements): for tag, attr in sort_elements: # elems is a list of elements with this tag name elems = root.xpath("//*[local-name() = $tag]", tag=tag) if len(elems) > 0: parent = elems[0].getparent() first = None after = None for i in range(0, len(parent)): if parent[i].tag == tag and first is None: first = i if parent[i].tag != tag and first is not None: after = i # The following changes the original XML tree: # The following pylint warning can safely be disabled, see # http://stackoverflow.com/a/25314665 # pylint: disable=cell-var-from-loop parent[first:after] = sorted(elems, key=lambda e: e.attrib[attr]) sort_elements = [ # Sort sibling elements with <first> tag by its <second> attribute ("IPARAMVALUE", "NAME"), ("PROPERTY", "NAME"), ("PARAMETER", "NAME"), ] sort_children(x1, sort_elements) sort_children(x2, sort_elements) ns1 = _ensure_unicode(etree.tostring(x1)) ns2 = _ensure_unicode(etree.tostring(x2)) checker = doctestcompare.LXMLOutputChecker() # This tolerates differences in whitespace and attribute order if not checker.check_output(ns1, ns2, 0): diff = checker.output_difference(doctest.Example("", ns1), ns2, 0) raise AssertionError("XML is not as expected in %s: %s"%\ (entity, diff))
def post_page(self): parser = etree.XMLParser(strip_cdata=False) root = etree.XML(CDATA_XML, parser) self.doc.put(root)
def toTree(self, xmlstring): """ Changes String back to XML tree :param xmlstring: XML string """ return etree.XML(xmlstring)
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False): """ If we came from invoice, we send in context 'force_line_edit' and we change tree view to make editable and also field qty """ res = super().fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu) force_line_edit = self._context.get('force_line_edit') if force_line_edit and view_type == 'tree': doc = etree.XML(res['arch']) # add to invoice qty field (before setupmodifis because if not # it remains editable) placeholder = doc.xpath("//field[1]")[0] placeholder.addprevious( etree.Element('field', { 'name': 'qty_to_invoice', 'readonly': '1', })) # make all fields not editable for node in doc.xpath("//field"): node.set('readonly', '1') setup_modifiers(node, res['fields'], in_tree_view=True) # add qty field placeholder.addprevious( etree.Element( 'field', { 'name': 'invoice_qty', # we force editable no matter user rights 'readonly': '0', })) res['fields'].update( self.fields_get(['invoice_qty', 'qty_to_invoice'])) # add button to add all placeholder.addprevious( etree.Element( 'button', { 'name': 'action_add_all_to_invoice', 'type': 'object', 'icon': 'fa-plus-square', 'string': _('Agregar las cantidades en ' '"Para Facturar" a la factura actual'), })) # add button tu open form placeholder = doc.xpath("//tree")[0] placeholder.append( etree.Element( 'button', { 'name': 'action_line_form', 'type': 'object', 'icon': 'fa-external-link', 'string': _('Open Purchase Line Form View'), })) # make tree view editable for node in doc.xpath("/tree"): node.set('edit', 'true') node.set('create', 'false') node.set('editable', 'top') res['arch'] = etree.tostring(doc) return res
if sensor_id is None: continue samples.append( StaticSample(time=timestamp, data=float(data.text), sensor_id=sensor_id)) return samples if __name__ == "__main__": s = load_session() s.autoflush = False while True: try: f = urllib.urlopen("http://sensors.media.mit.edu/rb/alldata.fcgi") xml = ET.XML(f.read()) f.close() samples = parse_data(s, xml) logger.info("Inserting %d new rows", len(samples)) try: s.add_all(samples) s.commit() except IntegrityError: logger.warning( "Insert failed due to duplicate time, retrying one-by-one") s.rollback() for sample in samples: try: s.add(sample) s.commit() except IntegrityError:
def test_clone_file_when_clone_fails(self): """Ensure clone is cleaned up on failure.""" expected_src_path = "fake_src_path" expected_dest_path = "fake_dest_path" fake_volume_id = '0309c748-0d94-41f0-af46-4fbbd76686cf' fake_clone_op_id = 'c22ad299-ecec-4ec0-8de4-352b887bfce2' fake_clone_id_response = netapp_api.NaElement( etree.XML("""<results status="passed"> <clone-id> <clone-id-info> <volume-uuid>%(volume)s</volume-uuid> <clone-op-id>%(clone_id)s</clone-op-id> </clone-id-info> </clone-id> </results>""" % { 'volume': fake_volume_id, 'clone_id': fake_clone_op_id })) fake_clone_list_response = netapp_api.NaElement( etree.XML("""<results> <clone-list-status> <clone-id-info> <volume-uuid>%(volume)s</volume-uuid> <clone-op-id>%(clone_id)s</clone-op-id> </clone-id-info> <clone-op-id>%(clone_id)s</clone-op-id> </clone-list-status> <status> <ops-info> <clone-state>failed</clone-state> </ops-info> </status> </results>""" % { 'volume': fake_volume_id, 'clone_id': fake_clone_op_id })) fake_clone_clear_response = mock.Mock() self.connection.invoke_successfully.side_effect = [ fake_clone_id_response, fake_clone_list_response, fake_clone_clear_response ] self.assertRaises(netapp_api.NaApiError, self.client.clone_file, expected_src_path, expected_dest_path) __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] enable_tunneling = _kwargs['enable_tunneling'] actual_src_path = actual_request \ .get_child_by_name('source-path').get_content() actual_dest_path = actual_request.get_child_by_name( 'destination-path').get_content() self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) self.assertEqual( actual_request.get_child_by_name('destination-exists'), None) self.assertTrue(enable_tunneling) __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[1] actual_request = _args[0] enable_tunneling = _kwargs['enable_tunneling'] actual_clone_id = actual_request.get_child_by_name('clone-id') actual_clone_id_info = actual_clone_id.get_child_by_name( 'clone-id-info') actual_clone_op_id = actual_clone_id_info.get_child_by_name( 'clone-op-id').get_content() actual_volume_uuid = actual_clone_id_info.get_child_by_name( 'volume-uuid').get_content() self.assertEqual(fake_clone_op_id, actual_clone_op_id) self.assertEqual(fake_volume_id, actual_volume_uuid) self.assertTrue(enable_tunneling) # Ensure that the clone-clear call is made upon error __, _args, _kwargs = self.connection.invoke_successfully.mock_calls[2] actual_request = _args[0] enable_tunneling = _kwargs['enable_tunneling'] actual_clone_id = actual_request \ .get_child_by_name('clone-id').get_content() self.assertEqual(fake_clone_op_id, actual_clone_id) self.assertTrue(enable_tunneling)
def _parse_response(self, response): """Get the NaElement for the response.""" if not response: raise NaApiError('No response received') xml = etree.XML(response) return NaElement(xml)
def test_validity(self): siteschema.assertValid(etree.XML(str.encode(self.cont.body)).getroottree()) assert(siteschema.validate(etree.XML(str.encode(self.cont.body))))
def __init__(self, xml): super(PubMedArticleSet, self).__init__(xml=xml) self._xmlroot = le.XML(xml) self.articles = self._xmlroot.findall('PubmedArticle')
def test_render_response_xml(self): # Generate some XML for a string response kwargs = { 'question_text': "Test question", 'explanation_text': "Test explanation", 'answer': 'Test answer', 'hints': [('test prompt', 'test_hint', 'test hint text')] } xml_str = StringResponseXMLFactory().build_xml(**kwargs) # Mock out the template renderer the_system = test_capa_system() the_system.render_template = mock.Mock() the_system.render_template.return_value = "<div class='input-template-render'>Input Template Render</div>" # Create the problem and render the HTML problem = new_loncapa_problem(xml_str, capa_system=the_system) rendered_html = etree.XML(problem.get_html()) # Expect problem has been turned into a <div> self.assertEqual(rendered_html.tag, "div") # Expect that the response has been turned into a <div> with correct attributes response_element = rendered_html.find('div') self.assertEqual(response_element.tag, "div") self.assertEqual(response_element.attrib["aria-label"], "Question 1") # Expect that the response div.wrapper-problem-response # that contains a <div> for the textline textline_element = response_element.find('div') self.assertEqual(textline_element.text, 'Input Template Render') # Expect a child <div> for the solution # with the rendered template solution_element = rendered_html.xpath('//div[@class="input-template-render"]')[0] self.assertEqual(solution_element.text, 'Input Template Render') # Expect that the template renderer was called with the correct # arguments, once for the textline input and once for # the solution expected_textline_context = { 'STATIC_URL': '/dummy-static/', 'status': the_system.STATUS_CLASS('unsubmitted'), 'value': '', 'preprocessor': None, 'msg': '', 'inline': False, 'hidden': False, 'do_math': False, 'id': '1_2_1', 'trailing_text': '', 'size': None, 'response_data': {'label': 'Test question', 'descriptions': {}}, 'describedby_html': HTML('aria-describedby="status_1_2_1"') } expected_solution_context = {'id': '1_solution_1'} expected_calls = [ mock.call('textline.html', expected_textline_context), mock.call('solutionspan.html', expected_solution_context), mock.call('textline.html', expected_textline_context), mock.call('solutionspan.html', expected_solution_context) ] self.assertEqual( the_system.render_template.call_args_list, expected_calls )
def testFirstNameLastNameRecord(self, srefData, srunData, bVisual): """ test firstname in record group by page """ cntOk = cntErr = cntMissed = 0 # srefData = srefData.decode('utf-8') #.strip("\n") RefData = etree.XML(srefData.strip("\n").encode('utf-8')) RunData = etree.XML(srunData.strip("\n").encode('utf-8')) lRef = [] lPages = RefData.xpath('//%s' % ('PAGE[@number]')) lRefKeys = {} for page in lPages: pnum = page.get('number') key = page.get('pagenum') lRefKeys[key] = 1 xpath = "./%s" % ("RECORD") lrecord = page.xpath(xpath) if len(lrecord) == 0: lRef.append([]) else: for record in lrecord: xpath = "./%s" % ("./@firstname") lf = record.xpath(xpath) xpath = "./%s" % ("./@lastname") ln = record.xpath(xpath) if len(lf) > 0: lRef.append((pnum, key, lf[0], ln[0])) lPageMapping = {} lRun = [] if RunData is not None: lpages = RunData.xpath('//%s' % ('PAGE[@number]')) for page in lpages: pnum = page.get('number') key = page.get('pagenum') # key= page.get('number') if key in lRefKeys.keys(): lPageMapping[key] = pnum #record level! xpath = "./%s" % ("RECORD[@firstname and @lastname]") lrecord = page.xpath(xpath) if len(lrecord) == 0: pass else: for record in lrecord: xpath = "./%s" % ("./@firstname") lf = record.xpath(xpath) xpath = "./%s" % ("./@lastname") ln = record.xpath(xpath) if len( lf ) > 0: # and lf[0].getContent() != ln[0].getContent(): lRun.append((pnum, key, lf[0], ln[0])) ltisRefsRunbErrbMiss = list() for key in lRunPerPage: # for key in ['Neuoetting_009_05_0150']: lRun = lRunPerPage[key] lRef = lRefKeys[key] runLen = len(lRunPerPage[key]) refLen = len(lRefKeys[key]) bT = False if refLen <= runLen: rows = lRef cols = lRun else: rows = lRun cols = lRef bT = True cost_matrix = np.zeros((len(rows), len(cols)), dtype=float) for a, i in enumerate(rows): curRef = i for b, j in enumerate(cols): runElt = j ret, val = self.testCompareRecordField(curRef, runElt) val /= 100 if val == 0: dist = 10 else: dist = 1 / val cost_matrix[a, b] = dist m = linear_sum_assignment(cost_matrix) r1, r2 = m # print (bT,r1,r2) # print (list(x[2] for x in rows)) # print (list(x[2] for x in cols)) lcsTH = self.lcsTH / 100 lCovered = [] for a, i in enumerate(r2): # print (key,a,r1[a],i,rows[r1[a]][2],cols[i][2], 1/cost_matrix[r1[a],i]) if 1 / cost_matrix[r1[a, ], i] > lcsTH: cntOk += 1 if bT: ltisRefsRunbErrbMiss.append( (runElt[1], int(runElt[0]), cols[i], rows[r1[a]], False, False)) else: ltisRefsRunbErrbMiss.append( (runElt[1], int(runElt[0]), rows[r1[a]], cols[i], False, False)) else: #too distant: false if bT: lCovered.append(i) ltisRefsRunbErrbMiss.append( (runElt[1], int(runElt[0]), "", rows[r1[a]], True, False)) else: lCovered.append(r1[a]) ltisRefsRunbErrbMiss.append((runElt[1], int(runElt[0]), "", cols[i], True, False)) cntErr += 1 for iref in r1: if iref not in r2: ltisRefsRunbErrbMiss.append((runElt[1], int(runElt[0]), lRef[iref], '', False, True)) cntMissed += 1 for iref in lCovered: ltisRefsRunbErrbMiss.append( (runElt[1], int(runElt[0]), lRef[iref], '', False, True)) cntMissed += 1 ltisRefsRunbErrbMiss.sort(key=lambda x: x[0]) # runLen = len(lRun) # refLen = len(lRef) # # bVisual = True # ltisRefsRunbErrbMiss= list() # lRefCovered = [] # for i in range(0,len(lRun)): # iRef = 0 # bFound = False # bErr , bMiss= False, False # runElt = lRun[i] # # print '\t\t===',runElt # while not bFound and iRef <= refLen - 1: # curRef = lRef[iRef] # if runElt and curRef not in lRefCovered and self.testCompareRecordFirstNameLastName(curRef,runElt): # bFound = True # lRefCovered.append(curRef) # iRef+=1 # if bFound: # if bVisual:print("FOUND:", runElt, ' -- ', lRefCovered[-1]) # cntOk += 1 # else: # curRef='' # cntErr += 1 # bErr = True # if bVisual:print("ERROR:", runElt) # if bFound or bErr: # ltisRefsRunbErrbMiss.append( (runElt[1],int(runElt[0]), curRef, runElt,bErr, bMiss) ) # for i,curRef in enumerate(lRef): # if curRef not in lRefCovered: # if bVisual:print("MISSED:", curRef) # ltisRefsRunbErrbMiss.append( (curRef[1],int(lPageMapping[curRef[1]]), curRef, '',False, True) ) # cntMissed+=1 # # ltisRefsRunbErrbMiss.sort(key=lambda x:x[0]) return (cntOk, cntErr, cntMissed, ltisRefsRunbErrbMiss)