def persistTripData(sim_exec_id): db = DBUtil().getDatabase() tree = ElementTree() tree.parse(TRIP_OUT_FILE) trips = tree.findall('tripinfo') route_tree = ElementTree() route_tree.parse(ROUTE_OUT_FILE) routes = route_tree.findall('vehicle') for trip in trips: vehicle_route = [] for route in routes: if route.attrib['id'] == trip.attrib['id']: edge_string = route.find('route').attrib['edges'] vehicle_route = edge_string.split() break db.simulation_trip_data.insert({ "sim_exec_id": sim_exec_id, "vehicle_id": trip.attrib['id'], "departstep": float(trip.attrib['depart']), "duration": float(trip.attrib['duration']), "routelength": convertMeterToFeet(float(trip.attrib['routeLength'])), "waittime": float(trip.attrib['waitSteps']), "speed": convertMperSecToMPH(float(trip.attrib['routeLength']) / float(trip.attrib['duration'])), "type": trip.attrib['vType'], "route": vehicle_route })
def getAverageResults(*files): distortionResults = [] intrinsicsResults = [] for filename in files: tree = ElementTree() try: tree.parse(filename) except (OSError, IOError): print "unable to read file %s" % filename continue for distortion in tree.findall('Distortion'): # rtslam only takes first 3 distortion coefficients distortionResult = tuple(float(x) for x in distortion.find('data').text.split()[:3]) distortionResults.append(distortionResult) for intrinsic in tree.findall('Intrinsics'): rawIntrinsics = [float(x) for x in intrinsic.find('data').text.split()] # rawIntrinsics will be a flattened matrix of the form: # | alphaU 0 u0 | # | 0 alphaV v0 | # | 0 0 1 | # We want u0, v0, alphaU, alphaV - so we want indices 2,5,0,4 intrinsicsResults.append( (rawIntrinsics[2], rawIntrinsics[5], rawIntrinsics[0], rawIntrinsics[4], ) ) from pprint import pprint pprint(distortionResults) pprint(intrinsicsResults) return averageTuples(distortionResults), averageTuples(intrinsicsResults)
def load_skos_vocab(self, fname): """Import a vocabulary into the DB from xml file fname in SKOS format""" doc = ElementTree() goto = '/' try: doc.parse(fname) except IOError: # In case I passed in a string, like in the upload script instead of a file object f = NamedTemporaryFile(delete=False) f.write(fname) f.close() doc.parse(f.name) os.unlink(f.name) except TypeError: self.message(40, 'Sorry, that wasn\'t a SKOS RDF file.') goto = '/vocabularies/load-skos' if doc.getroot().tag != TAG('rdf:RDF'): self.message(40, "We need a SKOS RDF file. Try again.") goto = '/vocabularies/load-skos' for vocab in doc.findall('.//'+TAG('skos:ConceptScheme')): vocab = self.load_vocab_instance(vocab) goto = vocab.get_absolute_url() for concept in doc.findall('.//'+TAG('skos:Concept')): self.load_concept_instance(concept) return goto
def modify_serverxml(serverxml, port, docbase): try: tree = ElementTree() tree.parse(serverxml) serverElem = tree.findall(".") for s in serverElem: s.set("port", str(int(port)+1)); connectors = tree.findall("Service/Connector") for c in connectors: if c.get("port") == "8080": c.set("port", port) continue if c.get("port") == "8009": c.set("port", str(int(port)+2)) continue hosts = tree.findall("Service/Engine/Host") for h in hosts: if h.get("name") == "localhost": context_properties={'docBase':docbase, 'path':'', 'reloadable':'false'} context = Element("Context", context_properties) h.append(context) break tree.write(serverxml, encoding='utf-8', xml_declaration=True) return 0 except: return 1
def load_skos_vocab(self, fname): """Import a vocabulary into the DB from xml file fname in SKOS format""" doc = ElementTree() goto = '/' try: doc.parse(fname) except IOError: # In case I passed in a string, like in the upload script instead of a file object f = NamedTemporaryFile(delete=False) f.write(fname) f.close() doc.parse(f.name) os.unlink(f.name) except TypeError: self.message(40, 'Sorry, that wasn\'t a SKOS RDF file.') goto = '/vocabularies/load-skos' if doc.getroot().tag != TAG('rdf:RDF'): self.message(40, "We need a SKOS RDF file. Try again.") goto = '/vocabularies/load-skos' for vocab in doc.findall('.//' + TAG('skos:ConceptScheme')): vocab = self.load_vocab_instance(vocab) goto = vocab.get_absolute_url() for concept in doc.findall('.//' + TAG('skos:Concept')): self.load_concept_instance(concept) return goto
def lvl(self): lvl = dict() xmltree = ElementTree() xmltree.parse(self.level.path) lvl.update(self._get_attributes_data(xmltree.getroot())) layers = list() for layer_node in xmltree.findall('layer'): layer = dict() layer.update(self._get_attributes_data(layer_node)) data = list() for tile_node in layer_node.findall('data/tile'): data.append(self._normalize_type_attribute(tile_node.get('gid'))) layer["data"] = data layer["type"] = "tilelayer" layers.append(layer) for objectgroup_node in xmltree.findall('objectgroup'): objectgroup = dict() objectgroup.update(self._get_attributes_data(objectgroup_node)) objects = list() for object in objectgroup_node.findall('object'): objects.append(self._get_attributes_data(object)) objectgroup["objects"] = objects objectgroup["type"] = "objectgroup" layers.append(objectgroup) lvl["layers"] = layers return lvl
def parse_file(filename): tree = ElementTree() tree.parse(filename) #config_element = tree.find("config") classname = tree.getroot().attrib["name"] include = tree.findtext("include") enums = {} for enum in tree.findall("enums/enum"): elements = list(map(lambda f: f.attrib["name"], enum.getiterator("element"))) enums[enum.attrib["name"]] = elements values = [] for v_elem in tree.findall('values/value'): v = {} v['name'] = v_elem.attrib['name'] v['type'] = v_elem.attrib['type'] v['default'] = v_elem.attrib['default'] v['comment'] = v_elem.text values.append(v) global_vars = [] for g_elem in tree.findall('global'): global_vars.append(g_elem.attrib['name']) return {"classname":classname, "include":include, "enums":enums, "values":values, "filename":filename, "globals":global_vars}
def parse_file(filename): tree = ElementTree() tree.parse(filename) #config_element = tree.find("config") classname = tree.getroot().attrib["name"] include = tree.findtext("include") enums = {} for enum in tree.findall("enums/enum"): elements = map(lambda f: f.attrib["name"], enum.getiterator("element")) enums[enum.attrib["name"]] = elements values = [] for v_elem in tree.findall('values/value'): v = {} v['name'] = v_elem.attrib['name'] v['type'] = v_elem.attrib['type'] v['default'] = v_elem.attrib['default'] v['comment'] = v_elem.text values.append(v) global_vars = [] for g_elem in tree.findall('global'): global_vars.append(g_elem.attrib['name']) return {"classname":classname, "include":include, "enums":enums, "values":values, "filename":filename, "globals":global_vars}
def parse_xml(root: et.ElementTree): """Parse the XML tree to return an equivalent dict.""" parsed = {} for value in [ 'title', 'slug', 'date', 'category', 'subcategory', 'question', 'answer', 'difficulty' ]: parsed[value] = root.find(value).text parsed['interviews'] = [x.text for x in root.findall('interview')] parsed['links'] = {x.get('name'): x.text for x in root.findall('link')} parsed['include'] = bool(root.find('include').text) # I'm not 100% sure whether this is not just a jupyter-book issue parsed['answer'] = parsed['answer'].replace( r"\\", r"\\\\\\\\") # Fixes align newlines. parsed['question'] = parsed['question'].replace( r"\\\\", r"\\\\\\\\") # Fixes align newlines. if parsed['answer'].strip('\n') == '': parsed['answer'] = None return parsed
class xmler: def __init__(self, xml, xmlcol): self.xml = xml self.xmlcol = xmlcol if self.xml is None: self.root = None elif self.xmlcol in ('InterConnectResponse', 'InterConnectRequest', 'Request', 'Response', 'RawSoapResponse', 'RawSoapRequest'): self.root = ElementTree(fromstring(xml)).getroot() self.ns = None else: raise TypeError('This XML type might not be supported') # self.attrs = [(elem.tag) if self.root is not None else '' for elem in self.root.iter()] # self.attrs = attr_sorter(self.root, self.root_ns, LOB, PL_Request) def find_xpath(self, xpath): def reconfigure_xpath(xpath): if xpath[0] != '.': xpath = ".//ns:" + xpath return xpath elif xpath[0] == '.': if xpath[0:3] != r'.//': raise ValueError(r'Non-singular Xpaths must start .//') else: split = xpath.split('//') xpath = '.' for chunk in split[1:]: xpath = xpath + '//ns:' + chunk i = xpath.find('[') if i > 0: xpath = xpath[0:i + 1] + 'ns:' + xpath[i + 1:] return xpath else: return xpath if self.root is not None: if self.xmlcol in ('InterConnectResponse', 'InterConnectRequest', 'Request', 'Response'): list1 = [ elem.text for elem in self.root.findall( reconfigure_xpath(xpath), namespaces={ 'ns': 'http://xml.equifax.com/XMLSchema/InterConnect' }) ] elif self.xmlcol in ('RawSoapResponse', 'RawSoapRequest'): list1 = [elem.text for elem in self.root.findall(xpath)] shorten = lambda list1: list1 if len(list1) > 1 else (list1[ 0] if len(list1) == 1 else None) return shorten(list1) elif self.root is None: return None
def _parse_non_trade_operations(self, xml_tree: ET.ElementTree): bonds_redemption = {} for rec in xml_tree.findall('spot_non_trade_security_operations/item'): f = rec.attrib if 'Снятие ЦБ с учета. Погашение облигаций' in f['comment']: ticker = self._tickers.get(grn=f['grn_code']) key = (ticker, _parse_datetime(f['operation_date'])) assert key not in bonds_redemption qnty = float(f['quantity']) assert float(int(qnty)) == qnty bonds_redemption[key] = int(qnty) elif '(Конвертация ЦБ)' in f['comment']: self._parse_cb_convertation(f) else: # print(f) # exit(1) pass for rec in xml_tree.findall('spot_non_trade_money_operations/item'): f = rec.attrib comment = f['comment'] if any(comment.startswith(p) for p in ('Поставлены на торги средства клиента', 'Перевод денежных средств с клиента')): self._deposits_and_withdrawals.append(( _parse_datetime(f['operation_date']), Money(f['amount'], Currency.parse(f['currency_code'])), )) continue if comment.startswith('Выплата дохода клиент'): self._parse_money_payment(f, bonds_redemption) continue known_prefixes = [ 'Комиссия ', 'Вознаграждение ', 'Ежегодная комиссия за', 'Возмещение за депозитарные услуги', 'Депозитарная комиссия за операции', 'Удержан налог на доход по дивидендам', 'Налог на доход за', 'Удержан налог на доход с клиента ', 'Перечисление дохода по акциям', 'Возврат ошибочно удержанного налога с клиента', 'Возврат излишне удержанного налога с клиента', 'Проценты по предоставленным займам ЦБ', 'Списаны средства клиента', ] if any(comment.startswith(p) for p in known_prefixes): continue raise Exception(f'unsupported description {f}') assert not bonds_redemption, 'not empty'
def main(source, s2): PATTERN = re.compile('^\d+$') digits = { } with open(s2) as c: xml = ElementTree(file=c) for currency in xml.findall('.//ISO_CURRENCY'): code = currency.find('ALPHABETIC_CODE').text digit = currency.find('MINOR_UNIT').text if code is not None and digit is not None: if PATTERN.match(digit): digits[code] = int(digit); xml = None with open(source) as c: xml = ElementTree(file=c) max_len = 0 currencies = [] for currency in xml.findall('.//currency'): symbol = None code = currency.get('type') elm = currency.find('symbol') digit = -1; if elm is not None: symbol = unicode(elm.text) if max_len < len(symbol): max_len = len(symbol) else: symbol = u"" elm2 = currency.find('displayName') name = "" if elm2 is not None: t = elm2.text # TODO(Constellation) more good method... if is_ascii(unicode(name)): name = '"' + t.encode('utf-8') + '"' else: name = 'NULL' if digits.has_key(code): digit = digits[code] currencies.append((code, symbol, name, digit)) print (HEADER % ( max_len, len(currencies), ', // NOLINT\n'.join([ ' { "' + c[0] + '", ' + c[2] + ', ' + str(c[3]) + ', { ' + str(len(c[1])) + 'U, ' + dump_line_currency(c[1]) + ' } }' for c in currencies]) + ' // NOLINT', ',\n'.join([ ' CURRENCY_' + c[0].upper() for c in currencies]) ) ).strip()
def main(source, s2): PATTERN = re.compile('^\d+$') digits = {} with open(s2) as c: xml = ElementTree(file=c) for currency in xml.findall('.//ISO_CURRENCY'): code = currency.find('ALPHABETIC_CODE').text digit = currency.find('MINOR_UNIT').text if code is not None and digit is not None: if PATTERN.match(digit): digits[code] = int(digit) xml = None with open(source) as c: xml = ElementTree(file=c) max_len = 0 currencies = [] for currency in xml.findall('.//currency'): symbol = None code = currency.get('type') elm = currency.find('symbol') digit = -1 if elm is not None: symbol = unicode(elm.text) if max_len < len(symbol): max_len = len(symbol) else: symbol = u"" elm2 = currency.find('displayName') name = "" if elm2 is not None: t = elm2.text # TODO(Constellation) more good method... if is_ascii(unicode(name)): name = '"' + t.encode('utf-8') + '"' else: name = 'NULL' if digits.has_key(code): digit = digits[code] currencies.append((code, symbol, name, digit)) print(HEADER % (max_len, len(currencies), ', // NOLINT\n'.join([ ' { "' + c[0] + '", ' + c[2] + ', ' + str(c[3]) + ', { ' + str(len(c[1])) + 'U, ' + dump_line_currency(c[1]) + ' } }' for c in currencies ]) + ' // NOLINT', ',\n'.join( [' CURRENCY_' + c[0].upper() for c in currencies]))).strip()
def failures(test_summary): doc = ElementTree(file=test_summary) failures = [ TestFailure(_testname(t), t.find('./failure').text) for t in doc.findall('.//testcase[failure]') ] errors = [ TestFailure(_testname(t), t.find('./error').text) for t in doc.findall('.//testcase[error]') ] return failures + errors
def get_dependencies(stack_folder): # get the stack dependencies print "Get the dependencies of stack in folder %s"%stack_folder try: print "Parsing stack.xml..." root = ElementTree(None, os.path.join(stack_folder, 'stack.xml')) stack_dependencies = [d.text for d in root.findall('depends')] system_dependencies = [d.text for d in root.findall('build_depends')] print "Stack Dependencies: %s"%(' '.join(stack_dependencies)) print "System Dependencies: %s"%(' '.join(system_dependencies)) return stack_dependencies + system_dependencies except Exception, ex: raise BuildException("Failed to parse stack.xml of stack in folder %s"%stack_folder)
def __init__(self, *args, **kwargs): super(UIMenu, self).__init__(*args, **kwargs) doc = ElementTree(file=get_ui_path(self.__class__, **kwargs)) # The action text is separate from the addaction tags in the ui file, so # load each action text into a dictionary to make it accessible from # the traverse function. actions = {} for act in doc.findall('//action'): actions[act.attrib['name']] = act.find('property/string').text # Parse the entire ui file, looking specifically for menus and actions. # This also generates the Maya menu bar. self.traverse(doc.findall('*'), actions)
def _create_ncx(self): """ Create and old style TOC file ('ncx' file): ``toc.ncx``. To be used for reading systems that cannot handle EPUB3 specific TOC. """ # noinspection PyPep8Naming,PyPep8 def set_nav_point(parent, href, label, the_index): the_navPoint = SubElement(parent, "{http://www.daisy.org/z3986/2005/ncx/}navPoint") the_navPoint.set("id", "nav%s" % the_index) the_navPoint.set("playOrder", "%s" % the_index) the_navPoint.set("class", "h1") the_navLabel = SubElement(the_navPoint, "{http://www.daisy.org/z3986/2005/ncx/}navLabel") the_txt = SubElement(the_navLabel, "{http://www.daisy.org/z3986/2005/ncx/}text") the_txt.text = label content = SubElement(the_navPoint, "{http://www.daisy.org/z3986/2005/ncx/}content") content.set("src", href) return the_navPoint # The doc title is also set in the TOC if exists ET.register_namespace('', "http://www.daisy.org/z3986/2005/ncx/") ncx = ElementTree(ET.fromstring(TOC)) # Set the title title = ncx.findall(".//{http://www.daisy.org/z3986/2005/ncx/}docTitle")[0] txt = SubElement(title, "{http://www.daisy.org/z3986/2005/ncx/}text") txt.text = self.document.title # Set the authors authors = ncx.findall(".//{http://www.daisy.org/z3986/2005/ncx/}docAuthor")[0] txt = SubElement(authors, "{http://www.daisy.org/z3986/2005/ncx/}text") txt.text = self.document.editors # Set the book ID meta_id = ncx.findall(".//{http://www.daisy.org/z3986/2005/ncx/}meta[@name='dtb:uid']")[0] meta_id.set('content', self.document.dated_uri) navMap = ncx.findall(".//{http://www.daisy.org/z3986/2005/ncx/}navMap")[0] # One element must be removed from the general template to_remove = ncx.findall(".//{http://www.daisy.org/z3986/2005/ncx/}navPoint[@id='toc']")[0] navMap.remove(to_remove) index = 2 for toc_entry in self.document.toc: set_nav_point(navMap, toc_entry.href, toc_entry.label, index) index += 1 self.book.write_element('toc.ncx', ncx)
def on_loadWeightsButton_clicked(self): for mesh in selected(): sc = mel.findRelatedSkinCluster(mesh) if not sc: continue sc = ls(sc)[0] doc = ElementTree(file=path + mesh.nodeName().replace('|', '').replace(':', '.') + '.xml') influences = [inf.attrib['name'] for inf in doc.findall('//Influence')] for i, vtx in enumerate(doc.findall('//Vertex')): weights = [(influences[int(w.attrib['influence'])], float(w.attrib['value'])) for w in vtx.findall('Weight')] skinPercent(sc, '%s.vtx[%d]' % (mesh, i), transformValue=weights)
def stats(file): try: doc = ElementTree(file=open(file, 'r')) return ' <td> %s </td><td> %s </td><td> %s </td><td> %s </td> ' % ( "<font color=\"red\"><b>FAIL</b></font>" if [e.findtext('.') for e in doc.findall('./failed')][0].strip() == "true" else " OK ", [e.findtext('.') for e in doc.findall('./errorTotal')][0].strip(), [e.findtext('.') for e in doc.findall('./warningsTotal')][0].strip(), [e.findtext('.') for e in doc.findall('./yikesTotal')][0].strip()) except Exception, inst: return str(inst)
def _create_nav(self): """ Create a new style TOC file ('nav' file): ``nav.xhtml``. """ full_nav = True if len(self.document.nav_toc) != 0 else False final_nav_header = NAV % (NAV_CSS_NO_NUMBERING if full_nav else NAV_CSS_NUMBERING) # Setting the default namespace; this is important when the file is generated ET.register_namespace('', "http://www.w3.org/1999/xhtml") ET.register_namespace('epub', "http://www.idpf.org/2007/ops") nav = ElementTree(ET.fromstring(final_nav_header)) # Set the title title = nav.findall(".//{http://www.w3.org/1999/xhtml}title")[0] title.text = self.document.title + " - Table of Contents" # Set the date date = nav.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='date']")[0] date.set("content", self.document.date.strftime(DATE_FORMAT_STRING)) # # The landmark part of the nav file has to be changed; there is no explicit cover page # li_landmark = nav.findall(".//{http://www.w3.org/1999/xhtml}a[@href='cover.xhtml']")[0] # li_landmark.set("href", "Overview.xhtml") navMap = nav.findall(".//{http://www.w3.org/1999/xhtml}nav[@id='toc']")[0] h2 = SubElement(navMap, "{http://www.w3.org/1999/xhtml}h2") h2.text = "Table of Contents" ol = SubElement(navMap, "{http://www.w3.org/1999/xhtml}ol") li = SubElement(ol, "{http://www.w3.org/1999/xhtml}li") a = SubElement(li, "{http://www.w3.org/1999/xhtml}a") a.set("href", "cover.xhtml") a.text = "Cover" a.set("class", "toc") if full_nav: for toc_entry in self.document.nav_toc: ol.append(toc_entry) else: for toc_entry in self.document.toc: li = SubElement(ol, "{http://www.w3.org/1999/xhtml}li") a = SubElement(li, "{http://www.w3.org/1999/xhtml}a") a.set("href", toc_entry.href) a.text = toc_entry.short_label a.set("class", "toc") self.book.write_element('nav.xhtml', nav)
def persistQueueingData(sim_exec_id): # work around for invalid xml generate in sumo version 0.17.1 f = open(QUEUE_OUT_FILE, 'r') outfile = open(QUEUE_OUT_FILE + ".cleaned", 'w') for line in f: linestring = line.rstrip('\n') if(linestring == "/>"): outfile.write(" </lanes>\n") elif(linestring != ">"): outfile.write(line) outfile.flush(); f.close() outfile.close() db = DBUtil().getDatabase() tree = ElementTree() tree.parse(QUEUE_OUT_FILE + ".cleaned") steps = tree.findall('data') for step in steps: lanes = step.findall('lanes/lane') if lanes is not None: for lane in lanes: db.simulation_queue_data.insert({ "sim_exec_id": sim_exec_id, "lane_id": lane.attrib['id'], "timestep": float(step.attrib['timestep']), "queueingtime": float(lane.attrib['queueing_time']), #store in feet instead of meters "queueinglength": convertMeterToFeet(float(lane.attrib['queueing_length'])) })
def xml_pase(xmlFileName): tree = ElementTree() tree.parse(xmlFileName) for child in tree.getroot(): print(child.tag, child.attrib) neList = tree.findall("NE") print neList for s in neList: print(s.tag, s.attrib) chassislist = list(s) for row in chassislist: print "for row in chassislist:%s" % row.attrib['Value'] print row.attrib cfgMode = row.get('ConfigMode') if cfgMode: print "cfgMode is %s\n" % cfgMode elem = list(row)[0] print elem, elem.tag, list(row) vmlist = list(elem) for vm in vmlist: print "vm is %s" % vm print vm.tag, vm.attrib print vm.items() #return (key,value) sequence print "list vm:%s" % vm for tam in list(vm): print tam.tag, tam.attrib
def parse_shed_tool_conf(file): """ Parses the xml in shed_tool_conf xml and returns a dictionary in the following format: { section_id: [ name: owner: revision: tool_shed_url: ] } """ sections = defaultdict(lambda: {}) doc = ElementTree(file=file) for section in doc.findall("//section"): for tool in section.findall('tool'): sections[ section.get('id')][ tool.find('repository_name').text + '|' + tool.find('installed_changeset_revision').text] = { 'name': tool.find('repository_name').text, 'owner': tool.find('repository_owner').text, 'revision': tool.find('installed_changeset_revision').text, 'tool_shed_url': 'https://' + tool.find('tool_shed').text} return sections
def list_qkan_layers(qgs_template: str = None) -> Dict[str, List]: """Dictionary mit den Namen aller QKan-Layer und einer Liste mit: Tabellenname, Geometriespalte, SQL-Where-Bedingung, Gruppenname Die Zusammenstellung wird aus der Template-QKanprojektdatei gelesen """ if not qgs_template: return {} # templateDir = os.path.join(pluginDirectory('qkan'), "templates") # qgsTemplate = os.path.join(templateDir, 'Projekt.qgs') qgsxml = ElementTree() qgsxml.parse(qgs_template) tag_group = "layer-tree-group/layer-tree-group" qgs_groups = qgsxml.findall(tag_group) qkan_layers = {} for group in qgs_groups: group_name = group.attrib["name"] group_layers = group.findall("layer-tree-layer") for layer in group_layers: layer_name = layer.attrib["name"] layer_source = layer.attrib["source"] dbname, table, geom, sql = get_qkanlayer_attributes(layer_source) qkan_layers[layer_name] = [table, geom, sql, group_name] logger.debug("qkan_layers: \n{}".format(qkan_layers)) return qkan_layers
def parliamentary_seat_leaders(self, seat_initials): """Fetch parliamentary seat leaders. List all leaders and vice-leaders of a specific paliamentary seat. Args: seat_initials (str): Parliamentary seat initials identifier. Returns: dict: A dictionary with two keys: 'lider' and 'vice_lider', where 'lider' is a dictionary and 'vice_lider' a list of dictionaries. Both dictionaries contains deputies informations. For example:: {'lider': {'ideCadastro': 74558, 'nome': 'GIVALDO CARIMBÃO', 'partido': 'PHS', 'uf': 'AL'}, 'vice_lider': [{'ideCadastro': 178929, 'nome': 'DIEGO GARCIA', 'partido': 'PHS', 'uf': 'PR'}, ...]} """ xml_response = self._get('ObterLideresBancadas') element_tree = ElementTree(fromstring(xml_response)) seats = element_tree.findall('bancada') for seat in seats: # There is better ways to do this, but we want to support python2.6 if seat.attrib.get('sigla') == seat_initials: parliamentary_seat = seat dict_response = self._make_dict_from_tree(parliamentary_seat) return self._safe(dict_response['bancada'])
def parse_repomd(repo): filename = os.path.join(repo, "repodata", "repomd.xml") toret = {} tree = ElementTree() tree.parse(open(filename, 'r')) for data in tree.findall(XMLREPONS+"data"): filetype = data.get('type') f_location = data.find(XMLREPONS+"location") if f_location is not None: filepath = f_location.get('href') else: raise TypeError("Bad repomd: Failed to get location of '" + filetype + "'") # Check checksum for file f_checksum = data.find(XMLREPONS+"checksum") if f_checksum is not None: checksumtype = f_checksum.get('type') checksum = f_checksum.text if not check_file(os.path.join(repo, filepath), checksumtype, checksum): raise TypeError("Bad repo: Wrong checksum for file '" + os.path.join(repo, filepath) + "', expected '" + checksum + "'") # Append file info toret[filetype] = filepath return toret
def vm_read_config(self): """ This method parses the libvirt xml config file and fills the cfg_details dictionary. This method returns the dictionary or raise exception if xml is not valid. """ domain = ET().parse(self.cfg_file) vm_type = domain.get('type') self.cfg_details['vm_type'] = HVM_LIBVIRT_NAMEMAP[vm_type] self.cfg_details['vm_type_str'] = vm_type self.cfg_details['displayName'] = domain.find('name').text self.cfg_details['memsize'] = int(domain.find('memory').text) >> 10 primary_disk_list = [] for disk in domain.findall('devices/disk'): disk_details = self.get_disk_details(disk) if disk.get('type') == 'file' and \ disk.get('device') == 'disk' and \ disk_details['dev'] in ('sda', 'hda', 'vda') and \ disk_details['bus'] in ('ide', 'scsi', 'virtio'): primary_disk_list.append(os.path.basename( disk_details['file'])) break self.cfg_details['primary_disk'] = primary_disk_list self.cfg_details['primary_disk_str'] = ','.join(primary_disk_list) if not self.cfg_details: raise config_file_invalid() else: return self.cfg_details
def getWorkflow(styleneRUNS, setName): ''' Retrieve workflow id for train and test. Example: <run> <type>TRAINING</type> <run-number>1</run-number> <set-name>fold-0</set-name> <date>6/22/11 11:26 AM</date> <workflow-map>stylenerun/data//Workflow_5e1cefa6-b730-425c-99fc-7e2cc0e1d9bd_TRAINING_1_fold-0</workflow-map> <url /> </run> <run> <type>TEST</type> <run-number>1</run-number> <set-name>fold-0</set-name> <date>6/22/11 11:27 AM</date> <workflow-map>stylenerun/data//Workflow_2c1f86e0-bb05-4ab8-a6df-171c32d6def5_TEST_1_fold-0</workflow-map> <url></url> </run> ''' tree = ElementTree() tree.parse(styleneRUNS) runs = tree.findall('run') trainflow, testflow = zip(runs[::2], runs[1::2])[0] assert testflow.find('type').text.lower() in ['test', 'testing'] assert trainflow.find('set-name').text == testflow.find('set-name').text == setName trainflow, testflow = trainflow.find('workflow-map').text, testflow.find('workflow-map').text return (trainflow, testflow)
def parse_entry_element(self, entry: ET.ElementTree) -> dict: """Converts the XML entry element into a python dictionary. Arguments: ---- entry {ET.ElementTree} -- An entry element, that contains filing information. Returns: ---- dict -- A dictionary version of the entry element. """ entry_element_dict = {} replace_tag = self.entries_namespace['atom_with_quote'] for entry in entry.findall("./", namespaces=self.entries_namespace): for element in entry.iter(): name = element.tag.replace(replace_tag, '') if element.text: name = name.replace('-', '_') entry_element_dict[name] = element.text.strip() if element.attrib: for key, value in element.attrib.items(): key = key.replace('-', '_') entry_element_dict[name + "_{}".format(key)] = value return entry_element_dict
def parse_svg_fonts(): print 'Parsing svg fonts...', data = [] flen = len(fonts) for i, f in enumerate(fonts): font_filename = f.get('rescaled', f['orig']) basename = os.path.splitext(font_filename)[0] svg_filename = '%s.svg' % basename svg_path = '%s/%s' % (outfont_dir, svg_filename) tree = ElementTree() tree.parse(svg_path) root_tag = tree.getroot().tag if root_tag[0] == '{': namespace = '{%s}' % root_tag[1:].split('}')[0] else: namespace = '' el = tree.find('{ns}defs/{ns}font'.format(ns=namespace)) fontname = el.get('id', 'unknown') if len(el) else 'unknown' el = tree.findall('{ns}defs/{ns}font/{ns}glyph'.format(ns=namespace)) glyph_codes = filter(lambda x: x.get('unicode', None) and x.get("d", None), el) glyph_codes = map(lambda x: x.get('unicode', None) , glyph_codes) data.append({ 'font_id': fontname, 'basename': basename, 'glyph_codes': glyph_codes }) print 'done\n' return data
def build_app_info(self, project_dir): tiapp = ElementTree() tiapp.parse(open(os.path.join(project_dir, "build", "android", "bin", "assets", "tiapp.xml"), "r")) self.app_info = {} self.app_properties = {} for key in [ "id", "name", "version", "publisher", "url", "copyright", "description", "icon", "analytics", "guid", ]: el = tiapp.find(key) if el != None: self.app_info[key] = el.text for property_el in tiapp.findall("property"): name = property_el.get("name") type = property_el.get("type") value = property_el.text if name == None: continue if type == None: type = "string" if value == None: value = "" self.app_properties[name] = {"type": type, "value": value}
def parse_repomd(repo): filename = os.path.join(repo, "repodata", "repomd.xml") toret = {} tree = ElementTree() tree.parse(open(filename, 'r')) for data in tree.findall(XMLREPONS + "data"): filetype = data.get('type') f_location = data.find(XMLREPONS + "location") if f_location is not None: filepath = f_location.get('href') else: raise TypeError("Bad repomd: Failed to get location of '" + filetype + "'") # Check checksum for file f_checksum = data.find(XMLREPONS + "checksum") if f_checksum is not None: checksumtype = f_checksum.get('type') checksum = f_checksum.text if not check_file(os.path.join(repo, filepath), checksumtype, checksum): raise TypeError("Bad repo: Wrong checksum for file '" + os.path.join(repo, filepath) + "', expected '" + checksum + "'") # Append file info toret[filetype] = filepath return toret
def ui_magic(object, ui_file, prefix): main_ui_filename = ui_file object.xml = gtk.Builder () object.xml.add_from_file (main_ui_filename) objects = object.xml.get_objects() for content in objects: try: if isinstance(content, gtk.Label): if content.get_label() != None and len(content.get_label()) > 0 : content.set_markup(_(content.get_label())) elif isinstance(content, gtk.Button): if content.get_label() != None and len(content.get_label()) > 0 : content.set_label(_(content.get_label())) else: if content.get_text() != None and len(content.get_text()) > 0 : content.set_text(_(content.get_text())) except AttributeError: pass # This is a workarround. For some reason obj.get_name don't return # the real name of the widget from xml.etree.ElementTree import ElementTree xml = ElementTree() xml.parse(main_ui_filename) for obj in xml.findall ('.//object'): try: if obj.attrib["id"].startswith(prefix) : widget = object.xml.get_object(obj.attrib["id"]) widget_name = obj.attrib["id"][len(prefix)+1:] exec ('object.%s = widget' % widget_name) except: print "Something fails at ui_magic"
def test_separate_timer_test_case(self): """Check that the elapsed time for each test is set separately. This test encodes a bug in which the elapsed time of the most recently run test was reported as the elapsed time for each test. """ # reset runner to record elapsed times self.runner = xmlrunner.XMLTestRunner(output=self.stream, stream=self.fake_stream, elapsed_times=True) self.runner.run(unittest.makeSuite(testcases.SeparateTimerTestCase)) f = StringIO(self.stream.getvalue()) try: tree = ElementTree(file=f) (first, second) = tree.findall('testcase') # allow 25ms beyond the sleep() time for garbage collection self.assertEqual('test_run_for_100ms', first.attrib['name']) first_time = float(first.attrib['time']) self.assertTrue(0.100 <= first_time < 0.125, 'expected about 0.1s. actual: %ss' % first_time) self.assertEqual('test_run_for_50ms', second.attrib['name']) second_time = float(second.attrib['time']) self.assertTrue(0.050 <= second_time < 0.075, 'expected about 0.05s. actual: %ss' % second_time) finally: f.close()
def replace_rank_text_with_attribute(tree: ET.ElementTree) -> None: """Replace `rank` element text with a `value` attribute.""" for country in tree.findall("country"): rank_elem = country.find("rank") assert rank_elem is not None rank_elem.set("value", rank_elem.text if rank_elem.text else "") rank_elem.text = None expected = """\ <data> <country name="Liechtenstein"> <rank value="1"/> <year>2008</year> <gdppc>141100</gdppc> </country> <country name="Monaco"> <rank value="2"/> </country> <country name="Panama"> <rank value="68"/> <year>2011</year> <gdppc>13600</gdppc> </country> <country name="Malaysia"> <rank value="69"/> </country> </data>""" assert_xml_equal(tree, expected)
def __init__(self): root_path = Defaults.get_system_root_path() self.products_metadata = os.sep.join([root_path, 'etc', 'products.d']) self.prod_filenames = glob.glob( os.path.join(self.products_metadata, '*.prod')) base_product_files = [] xml = ElementTree() for prod_filename in self.prod_filenames: try: xml.parse(prod_filename) register_sections = xml.findall('register') for register in register_sections: flavor = register.findall('flavor') if not flavor: base_product_files.append(prod_filename) except Exception as issue: message = \ 'Parsing XML file {0} failed with: {1}'.format( prod_filename, issue ) log.warning(message) if len(base_product_files) != 1: if not base_product_files: message = 'There is no baseproduct' else: message = ('Found multiple product definitions ' 'without element <flavor>: \n{0}'.format( '\n'.join(base_product_files))) log.error(message) raise DistMigrationSUSEBaseProductException(message) self.base_product = base_product_files[0]
def getSchemaList(self): # If answer file exists, see if it contains values we can use. # Check for XML answer file first self.optioncmds = [] self.schemas = [] if os.path.exists("C:\AnswerFile\MACHINE_ANSWER_FILE.xml"): tree = ElementTree(file="C:\AnswerFile\MACHINE_ANSWER_FILE.xml") for keynode in tree.findall(".//Group[@Name='DBScripts']/Key"): if keynode.attrib.get("Name").endswith("_SCHEMA"): self.schemas.append(keynode.attrib.get("Name").replace("_SCHEMA", "")) self.optioncmds.append("options." + keynode.attrib.get("Name") + "=\"" + "".join(keynode.itertext()) + "\"") elif os.path.exists("C:\AnswerFile\MACHINE_ANSWER_FILE.ini"): fi = open("C:\AnswerFile\MACHINE_ANSWER_FILE.ini", "r") ini_lines = fi.readlines() for ini_line in ini_lines: if not ini_line.startswith(";"): if ini_line.startswith("["): if ini_line.find("]") > -1: section = ini_line.split("[")[1].split("]")[0] else: if len(ini_line.split("=")) > 1 and section == "DBScripts": ini_line = ini_line.replace("\n", "") keyname = ini_line.split("=")[0] keyvalue = ini_line.partition("=")[2] if keyname.endswith("_SCHEMA"): self.schemas.append(keyname.replace("_SCHEMA", "")) self.optioncmds.append("options." + keyname + "= \"" + keyvalue + "\"") fi.close()
def _getTVDBThumbnail(self): import os, time if self.id: # check if the file already exists if os.path.isfile(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml'): # if it is older than config['cacherenew'] days, delete the files and download again if os.path.getctime(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml') < time.time()-(Config['cacherenew']*86400): os.remove(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml') if not os.path.isfile(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml'): URL('http://www.thetvdb.com/api/'+Config['tvdbapikey']+'/series/'+self.id+'/all/'+Config['tvdblang']+'.xml').download(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml') from xml.etree.ElementTree import ElementTree tree = ElementTree() try: tree.parse(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml') if Config['posterforpilot'] == True and self.season == 1 and self.episode == 1: series = tree.find('Series') if series.find('poster').text: self.thumbnail = 'http://www.thetvdb.com/banners/'+series.find('poster').text return True for episode in tree.findall('Episode'): if int(episode.find('SeasonNumber').text) == self.season and int(episode.find('EpisodeNumber').text) == self.episode: if episode.find('filename').text: self.thumbnail = 'http://www.thetvdb.com/banners/'+episode.find('filename').text return True except: pass return False
def _remove_old_items(channel: ElementTree) -> None: items = channel.findall("item") num_to_delete = max(0, len(items) - MAX_KEEP_ITEMS) elements_to_delete = items[:num_to_delete] for element in elements_to_delete: channel.remove(element)
class Topology: def __init__(self): self.tree = ElementTree() self.tree.parse('topology.xml') self.nodes = self.tree.findall('nodes/node') def getHost(self): hostList = [] for node in self.nodes: if node.get('type') == 'host': name = node.find('name').text hostList.append(name) return hostList def getOpenflowSwitch(self): openflowSwitchList = [] for node in self.nodes: if node.get('type') == 'openflowSwitch': name = node.find('name').text openflowSwitchList.append(name) return openflowSwitchList def getOpenflowSwitchInterface(self, switchName): interfaceList = [] for node in self.nodes: if node.get('type') == 'openflowSwitch' \ and node.find('name').text == switchName: interfaces = node.findall('interfaces/interface') for interface in interfaces: name = interface.find('name').text interfaceList.append(name) return interfaceList
def print_error_reports_from_report_file(file_path): tree = ElementTree() try: tree.parse(file_path) except: print "-" * 50 print "Error parsing {0!s}".format(file_path) f = open(file_path, "r"); print f.read(); print "-" * 50 return testcases = tree.findall(".//testcase") for testcase in testcases: error = testcase.find("error") if error is not None: print_detail_information(testcase, error) fail = testcase.find("fail") if fail is not None: print_detail_information(testcase, fail) failure = testcase.find("failure") if failure is not None: print_detail_information(testcase, failure)
def vm_read_config(self): """ This method parses the libvirt xml config file and fills the cfg_details dictionary. This method returns the dictionary or raise exception if xml is not valid. """ domain = ET().parse(self.cfg_file) vm_type = domain.get('type') self.cfg_details['vm_type'] = HVM_LIBVIRT_NAMEMAP[vm_type] self.cfg_details['vm_type_str'] = vm_type self.cfg_details['displayName'] = domain.find('name').text self.cfg_details['memsize'] = int(domain.find('memory').text) >> 10 primary_disk_list = [] for disk in domain.findall('devices/disk'): disk_details = self.get_disk_details(disk) if disk.get('type') == 'file' and \ disk.get('device') == 'disk' and \ disk_details['dev'] in ('sda', 'hda', 'vda') and \ disk_details['bus'] in ('ide', 'scsi', 'virtio'): primary_disk_list.append(os.path.basename(disk_details['file'])) break self.cfg_details['primary_disk'] = primary_disk_list self.cfg_details['primary_disk_str'] = ','.join(primary_disk_list) if not self.cfg_details: raise config_file_invalid() else: return self.cfg_details
def updatetrafficlights(self): tree = ElementTree() tree.parse(self.trafficsignaldataxml) #Get traffic light states #format: ['202305458', 'grygrygrygrygrygry'], ['202305472', 'gGGGgGGGGgGGGgGGGG']]} trafficlights = self.getTrafficLightValues() for trafficlight in trafficlights: trafficlightID = trafficlight[0] linklights = list(trafficlight[1]) for link_index in range(len(linklights)): items = tree.findall('trafficlight') for item in items: # Remove letter g from links purestr = str(item.attrib['link_index']).replace("g", "") link_indexes = purestr.split("!") # if the traffic light id matches if str(item.attrib['intersection_id']) == trafficlightID: # if link index is in the link_indexes if str(link_index) in link_indexes: item.attrib['state'] = getTrafficLightState(linklights[link_index]) #logging.info({"id":trafficlight,"intersection id":trafficlightID,"link index":link_index, "state":getTrafficLightState(linklights[link_index])}) tree.write(self.trafficsignaldataxml)
def parse_test_objects(category, feature_name, percentile, trial, paths, feature_paths): info_file = "/Users/isa/Experiments/bof_bmvc12/trial_" + str(trial) + "/bof_category_test_info.xml" info_tree = ElementTree(); info_tree.parse(info_file); scene_elms = info_tree.findall('scene'); print 'Found: ' + str(len(scene_elms)) + 'scenes' for scene in scene_elms: site_name = scene.get('site_name'); obj_elms = scene.findall('object'); if obj_elms is None: print "Invalid scene info file: No objects element" sys.exit(-1); print 'Found: ' + str(len(obj_elms)) + 'objects' for elm in obj_elms: class_name = elm.get('class_name'); if(class_name==category): mesh_name = elm.get('ply_name') ply_path = "/data/helicopter_providence_3_12/" + site_name + "/objects_with_aux/" + category + "_" + str(percentile) + "/" + mesh_name + ".ply"; feature_path = "/Users/isa/Experiments/shape_features_bmvc12/" + site_name + "/" + feature_name + "/" + category + "_" + str(percentile) + "/" + mesh_name + ".npy"; paths.append(ply_path); feature_paths.append(feature_path)
def import_opml(opml_file): tree = ElementTree() tree.parse(opml_file) outlines = tree.findall(".//outline") tag = None # TODO: fix this for all opml formats for o in outlines: xmlurl = None try: xmlurl = o.attrib['xmlUrl'] except: tag = o.attrib['text'] if xmlurl: try: # print "inserting ", tag, o.attrib['xmlUrl'], o.attrib['htmlUrl'], o.attrib['text'], tag f = { '_id': str(uuid.uuid1()), 'title': o.attrib['text'], 'url': o.attrib['xmlUrl'], 'web_url': o.attrib['htmlUrl'], 'tag': tag, } db.feeds.update({'url': f['url']}, f, True) except: pass
def build_app_info(self, project_dir): tiapp = ElementTree() assets_tiappxml = os.path.join(project_dir, 'build', 'android', 'bin', 'assets', 'tiapp.xml') self.app_info = {'fullscreen': 'false', 'navbar-hidden': 'false'} self.app_properties = {} if not os.path.exists(assets_tiappxml): shutil.copy(os.path.join(project_dir, 'tiapp.xml'), assets_tiappxml) tiapp.parse(open(assets_tiappxml, 'r')) for key in [ 'id', 'name', 'version', 'publisher', 'url', 'copyright', 'description', 'icon', 'analytics', 'guid', 'navbar-hidden', 'fullscreen' ]: el = tiapp.find(key) if el != None: self.app_info[key] = el.text for property_el in tiapp.findall("property"): name = property_el.get("name") type = property_el.get("type") value = property_el.text if name == None: continue if type == None: type = "string" if value == None: value = "" self.app_properties[name] = {"type": type, "value": value}
def getWeatherData(self, location): """ """ ## if cached yymmddhh = datetime.datetime.now().replace(tzinfo=UTC).astimezone(JST).strftime('%Y%m%d%H') key = str(yymmddhh + "_" + location) mc = Caching.MemcacheStore() data = mc.get(key) if data: return data data = [] patt = re.compile(u"^.* \[ ([0-9]+).* \] ([0-9.]+).*$") cur = self._rdb.cursor(MySQLdb.cursors.DictCursor) cur.execute("""SELECT sht.prefid AS prefid FROM area a JOIN area_has_state ahs ON (a.areaid=ahs.areaid) JOIN state s ON (s.stateid=ahs.stateid) JOIN state_has_tenkijpPref sht ON (s.stateid=sht.stateid) WHERE a.location=%s""", (location,)) row = cur.fetchone() cur.close() ## temperature fh = urllib2.urlopen(WEATHERAPI % row['prefid']) rss = ElementTree(file=fh) items = rss.findall('.//item') for item in items: title = item.find('title').text data.append({'title':title}) mc.set(key, data) return data
def create_json_items_from_embark_xml( embark_xml_filename, pnx_output_directory='pnx', csv_output_root_directory='mellon_input_directory'): """Create JSON representation of each item from embark xml file.""" try: embark_xml_doc = ElementTree(file=embark_xml_filename) except OSError: print('Unable to open the file you specified. Please try again.') raise else: emb_ark_field_definitions = read_embark_fields_json_file() xpath_of_embark_item = get_item_xpath(emb_ark_field_definitions) fields_definition = get_fields_definition(emb_ark_field_definitions) # Loop through each EmbArk record, processing each individually for xml_of_embark_item in embark_xml_doc.findall(xpath_of_embark_item): json_of_embark_item = {} try: embark_instance = ParseEmbarkXml(fields_definition) json_of_embark_item = embark_instance.parse_embark_record( xml_of_embark_item) except ValueError: print('EmbArk Item didn\'t process as expected.') # We will need to add some logging here raise else: mellon_input_directory = csv_output_root_directory + '/' + embark_instance.id write_json_output(mellon_input_directory, embark_instance.id + '.json', json_of_embark_item) create_pnx_from_json_and_write_file(pnx_output_directory, json_of_embark_item) write_main_csv.write_main_csv(mellon_input_directory, json_of_embark_item)
def persistInductionData(sim_exec_id): db = DBUtil().getDatabase() tree = ElementTree() tree.parse(INDUCTION_OUT_FILE) intervals = tree.findall('interval') for interval in intervals: vehiclecount = int(interval.attrib['nVehContrib']) if vehiclecount != 0: # valid data xmlloop_id = interval.attrib['id'] intervals_with_type = interval.findall('typedInterval') for interval_with_type in intervals_with_type: type_vehicle_count = int(interval_with_type.attrib['nVehContrib']) if type_vehicle_count != 0: db.simulation_induction_data.insert({ "sim_exec_id": sim_exec_id, "induction_id": interval_with_type.attrib['id'], "begintime": float(interval_with_type.attrib['begin']), "endtime": float(interval_with_type.attrib['end']), "vehicletype": interval_with_type.attrib['type'], "count": type_vehicle_count })
def print_error_reports_from_report_file(file_path): tree = ElementTree() try: tree.parse(file_path) except: print "-" * 50 print "Error parsing %s" % file_path f = open(file_path, "r") print f.read() print "-" * 50 return testcases = tree.findall(".//testcase") for testcase in testcases: error = testcase.find("error") if error is not None: print_detail_information(testcase, error) fail = testcase.find("fail") if fail is not None: print_detail_information(testcase, fail) failure = testcase.find("failure") if failure is not None: print_detail_information(testcase, failure)
def getWeatherData(self, location): """ """ ## if cached yymmddhh = datetime.datetime.now().replace( tzinfo=UTC).astimezone(JST).strftime('%Y%m%d%H') key = str(yymmddhh + "_" + location) mc = Caching.MemcacheStore() data = mc.get(key) if data: return data data = [] patt = re.compile(u"^.* \[ ([0-9]+).* \] ([0-9.]+).*$") cur = self._rdb.cursor(MySQLdb.cursors.DictCursor) cur.execute( """SELECT sht.prefid AS prefid FROM area a JOIN area_has_state ahs ON (a.areaid=ahs.areaid) JOIN state s ON (s.stateid=ahs.stateid) JOIN state_has_tenkijpPref sht ON (s.stateid=sht.stateid) WHERE a.location=%s""", (location, )) row = cur.fetchone() cur.close() ## temperature fh = urllib2.urlopen(WEATHERAPI % row['prefid']) rss = ElementTree(file=fh) items = rss.findall('.//item') for item in items: title = item.find('title').text data.append({'title': title}) mc.set(key, data) return data
def __ReadDatabase(self): tree = ElementTree() tree.parse(self.__database) nodes = tree.findall(".//*") for node in nodes: f = File(node.text, node.attrib["crc"]) self.__crcDatabase[f.FileName] = f
def check_xml(fname): """ Check if a file has the format of xml @type fname: str @param fname: path of the file to be checked @rtype: bool @return: whether the file passed the xml check """ if not check_common(fname, ".xml", maxsize=10485760): # 10M = 1024*1024*10 = 10485760 return False xmltree = ElementTree() try: xmltree.parse(fname) except: error("Fail to parser the xml file.") error( "The input XML file %s has a wrong format, please check it again." % fname) return False for pos in xmltree.findall("motif"): #get key and set empty element key = pos.get('id') if not key: error("No 'id' found for node, not a xml for motif information?") return False return True