def Write(self, outfile): """ Write the entity cache to an open file object. """ self.Fortify() et = ElementTree(self.history.ToElement()) et.write(outfile)
def scan_kid_files(self, potfile, files): messages = [] tags_to_ignore = ['script', 'style'] keys = [] kid_expr_re = re.compile(r"_\(('(?P<texta>[^']*)'|\"(?P<textb>[^\"]*)\")\)") for fname in files: print 'working on', fname tree = None try: tree = ElementTree(file=fname).getroot() except Exception, e: print 'Skip %s: %s' % (fname, e) continue for el in tree.getiterator(): if self.options.loose_kid_support or el.get('lang', None): tag = re.sub('({[^}]+})?(\w+)', '\\2', el.tag) ents = [] if el.text: ents = [el.text.strip()] if el.attrib: ents.extend(el.attrib.values()) for k in ents: key = None s = kid_expr_re.search(k) if s: key = s.groupdict()['texta'] or s.groupdict()['textb'] if key and (key not in keys) and (tag not in tags_to_ignore): messages.append((tag, fname, key)) keys.append(key)
def get_authoreds(researcher_object): """ Asks Symplectic API for info about specified researcher Receives XML File as response Parses XML File to find all publications for that researcher & notes preferences they have for each publication """ # checking # if not(researcher_object) or (researcher_object.symplectic_int_id is # None): # int_id version if not(researcher_object) or (researcher_object.symplectic_id is None): # guid version return # symplectic api url and local file path # url = SYMPLECTIC_API_URL + 'users/' + # str(researcher_object.symplectic_int_id) # int_id version url = "".join([ SYMPLECTIC_API_URL, 'users/', str(researcher_object.symplectic_id) ]) # # tmp_filename = SYMPLECTIC_LOCAL_XML_FOLDER + # SYMPLECTIC_LOCAL_AUTH_FOLDER + # str(researcher_object.symplectic_int_id) # + '.xml' # int_id version tmp_filename = "".join([ SYMPLECTIC_LOCAL_XML_FOLDER, SYMPLECTIC_LOCAL_AUTH_FOLDER, str(researcher_object.symplectic_id), '.xml' ]) # get xml document from symplectic api and store on hd (tmp_filename, http_headers) = urllib.urlretrieve(url, tmp_filename) # parse xml file publications_etree = ElementTree(file=tmp_filename) #delete local file from hd #try: os.remove(tmp_filename) #except: #pass #publication elements are held in a subtree publications_subtree = publications_etree.find( SYMPLECTIC_NAMESPACE + 'publications' ) # check if any publication elements in subtree if publications_subtree is None or len(publications_subtree) < 1: return # now that we have their newest "i authored that pub" info, we can # delete their old "i authored that pub" info researcher_object.remove_all_authored() # for each publication element in subtree for publication_element in publications_subtree.getchildren(): _create_authored(publication_element, researcher_object)
def __init__(self, f): self._artifacts = [] root = ElementTree().parse(f) for artifact in root.find('artifacts'): self._artifacts.append(Artifact(artifact))
def requestMonitorId(self,monitorTag): req = urllib2.Request(str('{0}/?apikey={1}&output={2}'+\ '&version={3}&action=getMonitors&tag={4}')\ .format(self.url,self.apiKey,self.output,self.version,monitorTag)) res = urllib2.urlopen(req) xml = res.read() root = ElementTree(file=StringIO.StringIO(xml)).getroot() return root.find('./monitor/id').text
def process_pom(self, config, pom_path): doc = ElementTree(file=pom_path) mc_version = "" try: mc_version = doc.findall('/{POM}properties/{POM}minecraft_version'.format(POM=POM_NS))[0].text except: mc_version = "" config["minecraft_version"] = mc_version
def install_xpi(self, filename): extract_path = os.path.join(self.profiledir, 'extensions', os.path.basename(filename)) os.makedirs(extract_path) z = zipfile.ZipFile(filename, 'r') z.extractall(extract_path) doc = ElementTree(file = os.path.join(extract_path, 'install.rdf')) eid = doc.find('.//{http://www.mozilla.org/2004/em-rdf#}id').text os.rename(extract_path, os.path.join(os.path.dirname(extract_path), eid))
def createXmlFile(filePath, rootElement , version='1.0', encoding=ENCODING_IN ): """ Create an xml file """ doc = ElementTree(rootElement) outfile = open(filePath, 'w') outfile.write('<?xml version="' + version + '" encoding="' + encoding + '" ?>') doc._write(outfile, doc._root, ENCODING_IN, {}) outfile.close()
def process(self, lang): assert len(lang) == 2, 'Language name must be two letters long' doc = ElementTree(file='%s.xml' % lang) root = doc.getroot() if root.tag == 'resources': for child in root: self.walk(child, (child.get('name'),), lang)
def process(self, lang): assert len(lang) == 2, "Language name must be two letters long" doc = ElementTree(file=os.path.join(self._dirname, "%s.xml" % lang)) root = doc.getroot() if root.tag == "resources": for child in root: self.walk(child, (child.get("name"),), lang)
def gettemp(): cmd = [ "omreport", "chassis", "temps", "-fmt", "xml" ] (omstdin, omstdout) = popen2.popen2(cmd) tree = ElementTree() root = tree.parse(omstdin) iter = root.getiterator() sensors = [] for element in iter: if element.tag == "TemperatureProbe": sensors.append(tempprobe(element)) return sensors
def requestMonitorId(self,monitorTag): xml = self._apiRequestXml(str('{0}/?apikey={1}&output={2}'+\ '&version={3}&action=getMonitors&tag={4}')\ .format(self.url,self.apiKey,self.output,self.version,monitorTag)) root = ElementTree(file=StringIO.StringIO(xml)).getroot() monitor = root.find('./monitor/id') # Just the first matching monitor # TODO handle multiple monitors with the same tag # Dependent code assumes that exactly one is returned if monitor is None: raise Exception("No monitors matching " + monitorTag) return root.find('./monitor/id').text
def getpdisks(controller="0"): cmd = [ "omreport", "storage", "pdisk", "controller=" + controller, "-fmt", "xml" ] (omstdin, omstdout) = popen2.popen2(cmd) tree = ElementTree() root = tree.parse(omstdin) iter = root.getiterator() pdisks = [] for element in iter: if element.tag == "DCStorageObject": pdisks.append(pdisk(element)) return pdisks
def PrintStats(): """Looks at the XML output and dumps render time.""" try: from elementtree.ElementTree import ElementTree except: print "Unable to load ElementTree, skipping statistics." else: doc = ElementTree(file='stats.xml') for timer in doc.findall('//timer'): if "totaltime" == timer.get("name"): print "Render time was %s seconds" % timer[0].text break
def get_dependencies(path): dependencies = {} doc = ElementTree(file=path) deps = doc.findall('/%sdependencies' % POM_NS) for dep in deps[0]: groupId = dep.findall("%sgroupId" % POM_NS)[0].text artifactId = dep.findall("%sartifactId" % POM_NS)[0].text version = dep.findall("%sversion" % POM_NS)[0].text path = ".".join([groupId, artifactId]) dependencies[path] = version return dependencies
def xml_to_dict(fPath): """ Converts study data from (ref man generated) XML to a dictionary matching study IDs (keys) to title/abstract tuples (values). For example: dict[n] might map to a tuple [t_n, a_n] where t_n is the title of the nth paper and a_n is the abstract """ ref_ids_to_abs = {} num_no_abs = 0 tree = ElementTree(file=fPath) for record in tree.findall(".//record"): pubmed_id = None refmanid = eval(record.findtext(".//rec-number")) # attempt to grab the pubmed id pubmed_id = "" try: pubmed = record.findtext(".//notes/style") pubmed = pubmed.split("-") for i in range(len(pubmed)): if "UI" in pubmed[i]: pubmed_str = pubmed[i + 1].strip() pubmed_id = eval("".join([x for x in pubmed_str if x in string.digits])) except Exception, ex: print "problem getting pmid ..." print ex ab_text = record.findtext(".//abstract/style") if ab_text is None: num_no_abs += 1 title_text = record.findtext(".//titles/title/style") # Also grab keywords keywords = [keyword.text.strip().lower() for keyword in record.findall(".//keywords/keyword/style")] # and authors authors = [author.text for author in record.findall(".//contributors/authors/author/style")] # journal journal = record.findtext(".//periodical/abbr-1/style") ref_ids_to_abs[refmanid] = { "title": title_text, "abstract": ab_text, "journal": journal, "keywords": keywords, "pmid": pubmed_id, "authors": authors, }
def mark_changed_publications(modified_since): """ Asks Symplectic API for info about publications modified since given date Receives XML File as response Parses XML File to find publications modified matches publication XML element to db publication object flags each publication object as needing to be re-fetched from Symplectic """ # date needs to be in form of yyyy-mm-dd # will then append string "T00:00:00Z" as we are in UTC-0 timezone in # which : becomes %3A # symplectic api url and local file path url = "".join([ SYMPLECTIC_API_URL, 'search-publications?modified-since-when=', modified_since, 'T00%3A00%3A00Z' ]) tmp_filename = "".join([ SYMPLECTIC_LOCAL_XML_FOLDER, SYMPLECTIC_LOCAL_PUBSMODIFIED_FOLDER, modified_since, '.xml' ]) # get xml document from symplectic api and store on hd (tmp_filename, http_headers) = urllib.urlretrieve(url, tmp_filename) # parse xml file search_publications_etree = ElementTree(file=tmp_filename) # delete local file from hd # try: os.remove(tmp_filename) # except: # pass # publication lite elements are held in a subtree BUT the subtree is # the root element # search_publications_subtree = # search_publications_etree.find(SYMPLECTIC_NAMESPACE + # 'search-publications-response') search_publications_subtree = search_publications_etree.getroot() # check if any publication elements in subtree if search_publications_subtree is None or \ len(search_publications_subtree) < 1: return # for each publication element in subtree for search_publication_element \ in search_publications_subtree.getchildren(): _flag_publication_as_needing_refetch( search_publication_element )
def getpower(): cmd = [ "omreport", "chassis", "pwrsupplies", "-fmt", "xml" ] (omstdin, omstdout) = popen2.popen2(cmd) tree = ElementTree() root = tree.parse(omstdin) iter = root.getiterator() status = "" pwrsupplies = [] for element in iter: if element.tag == "Redundancy": status = element.get("status") redunstatus = element.findtext("RedunStatus") if element.tag == "PowerSupply": pwrsupplies.append(powersupply(element)) return [(status, redunstatus), pwrsupplies]
def currency_data(self, xml_url='http://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist-90d.xml'): """Returns the most recent currency data with tuples.""" now = datetime.now() today = u'-'.join([unicode(now.year), unicode(now.month), unicode(now.day)]) if today == self.updated_date(): return etree90 = ElementTree() try: data90 = urllib2.urlopen(xml_url) root90 = ElementTree.parse(etree90, data90) DATA90 = root90[2] DATA_list = [] for DATA in DATA90: daily_data_list = [] for daily_data in DATA: daily_data_tuple = (daily_data.get('currency'), daily_data.get('rate')) daily_data_list.append(daily_data_tuple) ddl = (DATA.get('time'), dict(daily_data_list)) DATA_list.append(ddl) date = DATA_list[0][0] try: if self.date != date: self.currencies = DATA_list self.date = date for date in self.currencies: for key in date[1].keys(): if key not in self.codes: self.codes.append(key) self.amount_of_days = range(1,len(self.currencies)+1) except AttributeError: self.currencies = DATA_list except: pass
def __init__(self, filename): ''' Parameters ---------- filename : path to xml config file (TODO - SCHEMA definition) ''' self._filename = filename self._config_tree = ElementTree(file = filename)
def __init__(self, track, request=None): ActivityFile.__init__(self, track, request) track.trackfile.open() self.xmltree = ElementTree(file=track.trackfile) track.trackfile.close() # logging.debug("Trackfile %r closed" % tcxfile.trackfile) self.parse_trackpoints()
def __call__(self, **kwargs): xml = """<?xml version="1.0" encoding="UTF-8"?>""" exporter = queryMultiAdapter((self.context, self.request), IExport) if not exporter: return xml body = exporter.body if not isinstance(body, ElementTree): body = ElementTree(body) out = StringIO() body.write(out) out.seek(0) xml += out.read() self.request.response.setHeader('content-type', 'text/xml') return xml
def gencix(major, minor): # First generate first pass at the CILE over all of the lib tree cixfile = "activeperl-%d.%d.cix" % (major, minor) command = "python ../../../ci2.py scan -n -r -p -l Perl -T /tmp/ActivePerl-%d.%d/perl/lib -i \"*.pm\"> %s" % (major, minor, cixfile) retval = os.system(command) if retval != 0: print "Error scanning ActivePerl library" sys.exit(retval) # # Grab the output of that scan root = parse(cixfile).getroot() newroot = Element("codeintel", version="2.0") cixfile = SubElement(newroot, "file", lang="Perl", mtime=str(int(time.time())), path=os.path.basename('perl.cix')) for file in root.getiterator('file'): print >> sys.stderr, "Processing", file.get('path') for blob in file: if blob.get("src"): # Don't want the src string. del blob.attrib["src"] cixfile.append(blob) cix = genPerlStdCIX(cixfile, "/tmp/ActivePerl-%d.%d/perl/lib/pod/perlfunc.pod" % (major, minor)) parent_map = dict((c, p) for p in cixfile.getiterator() for c in p) for variable in newroot.getiterator('variable'): attributes = variable.get('attributes') if attributes and '__local__' in variable.get('attributes'): parent_map[variable].remove(variable) # Generate the CIX. print >>sys.stderr, "Prettying" prettify(newroot) tree = ElementTree(newroot) #fname = '../../../lib/codeintel2/stdlibs/perl-%d.%d.cix' % (major, minor) fname = 'perl-%d.%d.cix' % (major, minor) #os.system('p4 edit %s' % fname) stream = open(fname, "w") print >>sys.stderr, "Writing" stream.write('<?xml version="1.0" encoding="UTF-8"?>\n') tree.write(stream) stream.close()
def tla_list(request): """ """ url = 'http://www.text-link-ads.com/xml.php?inventory_key=' + settings.TLA_INVENTORY_KEY + '&referer=' + request.META.get('REQUEST_URI', request.META.get('PATH_INFO', '/')) agent = '&user_agent=' + request.META['HTTP_USER_AGENT'] links = ElementTree.parse(ElementTree(),urlopen(url+agent)) return { 'links': [ Link(link) for link in links ] }
def loadBlissLexicon(fname): from elementtree.ElementTree import ElementTree xml = ElementTree(file=gOpenIn(fname)) pronunciations = pronunciationsFromXmlLexicon(xml) result = [(orth, phon) for orth in pronunciations if not (orth.startswith('[') and orth.endswith(']')) for phon in pronunciations[orth]] result.sort() return result
def tostring(node, *args, **kwds): if 'pretty' in kwds or 'pretty_print' in kwds: if 'pretty' in kwds: del kwds['pretty'] if 'pretty_print' in kwds: del kwds['pretty_print'] indent(node) kwds.pop('pretty', None) kwds.pop('pretty_print', None) oss = StringIO() ElementTree(node).write(oss, *args, **kwds) return oss.getvalue()
def GetElementsFromXML(self,filename): 'Extracts a dictionary of elements from the gcc_xml file.' tree = ElementTree() try: tree.parse(filename) except ExpatError: raise InvalidXMLError, 'Not a XML file: %s' % filename root = tree.getroot() if root.tag != 'GCC_XML': raise InvalidXMLError, 'Not a valid GCC_XML file' # build a dictionary of id -> element, None elementlist = root.getchildren() elements = {} for element in elementlist: id = element.get('id') if id: elements[id] = element, None return elements
def listMonitors(self): ret = list() req = urllib2.Request(str('{0}/?apikey={1}&output={2}'+\ '&version={3}&action=getMonitors')\ .format(self.url,self.apiKey,self.output,self.version)) res = urllib2.urlopen(req) xml = res.read() root = ElementTree(file=StringIO.StringIO(xml)).getroot() for monitor in list(root): ret.append((monitor.find('id').text,monitor.find('tag').text, monitor.find('name').text)) return ret
def GetElementsFromXML(self, filename): 'Extracts a dictionary of elements from the gcc_xml file.' tree = ElementTree() try: tree.parse(filename) except ExpatError: raise InvalidXMLError, 'Not a XML file: %s' % filename root = tree.getroot() if root.tag != 'GCC_XML': raise InvalidXMLError, 'Not a valid GCC_XML file' # build a dictionary of id -> element, None elementlist = root.getchildren() elements = {} for element in elementlist: id = element.get('id') if id: elements[id] = element, None return elements
def currency_data( self, xml_url='http://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist-90d.xml' ): """Returns the most recent currency data with tuples.""" now = datetime.now() today = u'-'.join( [unicode(now.year), unicode(now.month), unicode(now.day)]) if today == self.updated_date(): return etree90 = ElementTree() try: data90 = urllib2.urlopen(xml_url) root90 = ElementTree.parse(etree90, data90) DATA90 = root90[2] DATA_list = [] for DATA in DATA90: daily_data_list = [] for daily_data in DATA: daily_data_tuple = (daily_data.get('currency'), daily_data.get('rate')) daily_data_list.append(daily_data_tuple) ddl = (DATA.get('time'), dict(daily_data_list)) DATA_list.append(ddl) date = DATA_list[0][0] try: if self.date != date: self.currencies = DATA_list self.date = date for date in self.currencies: for key in date[1].keys(): if key not in self.codes: self.codes.append(key) self.amount_of_days = range(1, len(self.currencies) + 1) except AttributeError: self.currencies = DATA_list except: pass
def dump(self, output, lang): tempo = {} root = Element('resources') root.tail = '\n' tempo[()] = root for key in self._resources_order: for i in range(1, len(key) + 1): if key[0:i] not in tempo: parent = tempo[key[0:i - 1]] value = self._resources.get(key[0:i], None) if value is None: elem = SubElement(parent, 'node', name=key[i - 1]) else: localized = value.get(lang, None) english = value.get('en', None) if english is None: print >> sys.stderr, 'English file does not have the string for', key[ 0:i] print >> sys.stderr, ' entry is marked as obosolete.' elem = SubElement(parent, 'node', name=key[i - 1], value=localized, obsolete='true') elif localized is not None: elem = SubElement(parent, 'node', name=key[i - 1], value=localized) else: elem = SubElement(parent, 'node', name=key[i - 1], value=english, toBeTranslated='true') parent.text = elem.tail = '\n' + i * SPACES tempo[key[0:i]] = elem fix_it(root) print >> output, '<?xml version="1.0" encoding="UTF-8"?>' ElementTree(root).write(output, 'utf-8')
def update_dependency(config, plugins, path, force=False): if path in updated_dependencies and not force: return if path in config["dependencies"]: git_url = config["dependencies"][path]["git_url"] dependency_path = os.path.join(config["staging_path"], slugify(unicode(path))) dependency_path = os.path.expanduser(dependency_path) if not os.path.isdir(dependency_path): os.makedirs(dependency_path) repo = init_repo(dependency_path, git_url) pom_path = os.path.join(dependency_path, 'pom.xml') doc = ElementTree(file=pom_path) version = doc.findall('/%sversion' % POM_NS)[0].text for p in plugins: p.process_pom(config["dependencies"][path], pom_path) config["dependencies"][path]["version"] = version updated_dependencies.append(path)
def body(self, xml): """ Body importer """ if isinstance(xml, (str, unicode)): parser = XMLTreeBuilder() parser.feed(xml) tree = parser.close() tree = ElementTree(tree) elem = tree.getroot() else: elem = xml if elem.tag != 'object': raise AttributeError('Invalid xml root element %s' % elem.tag) name = elem.get('name') if not name: raise AttributeError('No name provided for object') if hasattr(self.context, '__name__') and (name != self.context.__name__): raise AttributeError(('XML root object name %s ' 'should match context name %s') % (name, self.context.__name__)) for child in elem.getchildren(): if child.tag == 'property': self.attribute = child elif child.tag == 'object': self.child = child event.notify(ObjectModifiedEvent(self.context)) if INews.providedBy(self.context): logger.info('Commit transaction import for %s' % getattr( self.context, '__name__', '(no name)')) transaction.commit()
class lpcParameterParser(object): ''' Returns dictionary with keys 'type' and 'params' ''' def __init__(self, filename): ''' Parameters ---------- filename : path to xml config file (TODO - SCHEMA definition) ''' self._filename = filename self._config_tree = ElementTree(file = filename) def _assignDictionaryItem(self, item_dict): s = 'v=' + item_dict['type'] + '("' + item_dict['value'] + '")' exec(s) #TODO - remove exec, there are clearly better ways to do this! return v def _generateParamDictionary(self, tag): '''Generates a dictionary containing 'type', a string defining the 'type' attribute of element 'tag' and 'params', a dictionary of parameters to be unpacked as arguments to constructors for instances of 'type' ''' parser_type_tag = self.__class__.TYPE_TAG parser_type_node = self._config_tree.getiterator(parser_type_tag) if len(parser_type_node) == 1: param_node = parser_type_node[0].getiterator(tag) if len(param_node) == 1: param_type = param_node[0].get('type') params = {} for par in param_node[0]: items = dict(par.items()) if items['type'] == 'list': elts = par.getiterator('elt') temp_list = [] for e in elts: list_attr = dict(e.items()) temp_list.append(self._assignDictionaryItem(list_attr)) params[items['name']] = temp_list else: params[items['name']] = self._assignDictionaryItem(items) return {'type': param_type, 'params': params} else: msg = 'The required unique lpc configuration element tag, ' + parser_type_tag + ' is missing or not unique from ' + self._filename raise ValueError, msg else: msg = 'The required unique lpc configuration element tag, ' + parser_type_tag + ' is missing or not unique from ' + self._filename raise ValueError, msg
def test(): import sys doc = HTML(HEAD('ankjwajhsjasa', META('blabal'))) table = TABLE() body = BODY( table.append( TBODY('ahasa', TR( TD('blabla'), TD('blabla'), TD('blabla'), )))) p1, p2 = P('blabla'), P('bli') p2.text += 'dhsdhshkds' doc += body doc += (p1, p2) tree = ElementTree(doc) write_pretty(tree, sys.stdout)
def __init__(self, lstData, GeoType, strPath, strFilename, strLayername): dctWriteKML = {'Point': self.writePoint, 'Polyline': self.writeLine, 'Polygon': self.writePolygon} #Create new element tree with a root of KML... objRoot = Element("{http://earth.google.com/kml/2.1}kml") objTree = ElementTree(element=objRoot) elemDoc = SubElement(objRoot, 'Document') elemDocName = SubElement(elemDoc, 'name') #According the KML spec, default Polystyle stuff goes here... elemDocName.text = strLayername #Add a document name element here...populate from supplied parameters for objRow in lstData: elemPlace = SubElement(elemDoc, 'Placemark') elemName =SubElement(elemPlace,'name') elemName.text = objRow['Name'] #Add support for the description tag... elemDesc = SubElement(elemPlace, 'description') elemDesc.text = objRow['Description'] elemGeo = dctWriteKML.get(GeoType, self.errHandler)(objRow['Geometry'], elemPlace) elemPlace.append(elemGeo) self.Write(objTree, strPath, strFilename)
def output_workers (self, file = sys.stdout) : week = [] tree = Element ("resources-list", type = "worker") tt = SubElement (tree, "timetable", id = "weekend") SubElement (tt, "dayoff", type = "weekday").text = "saturday" SubElement (tt, "dayoff", type = "weekday").text = "sunday" for day in ("monday", "tuesday", "wednesday", "thursday", "friday") : tt = SubElement (tree, "timetable", id = day) SubElement (tt, "dayoff", type = "weekday").text = day week.append (day) stati = [self.db.user_status.lookup (i) for i in ("valid", "obsolete", "system") ] for uid in self.db.user.filter (None, dict (status = stati)) : dyn = self.get_user_dynamic (self.db, uid, self.now) if not dyn : dyn = self.last_user_dynamic (self.db, uid) user = self.db.user.getnode (uid) if not user.nickname and not user.username : continue r = SubElement \ ( tree , "resource" , id = (user.nickname or user.username).decode ("utf-8") , fullname = (user.realname or user.username).decode ("utf-8") ) SubElement (r, "use-timetable", idref = "weekend") wh = 38.5 if dyn : wh = self.weekly_hours (dyn) or 38.5 wh *= 4 wh += 7.75 * 4 - 1 wh = int (wh) wh = int (wh / (7.75 * 4)) for i in range (wh, 5) : SubElement (r, "use-timetable", idref = week [i]) ElementTree (tree).write (file, encoding = "utf-8")
def main(options, args): # 1. load reference lexicon print('loading reference lexicon ...') lexicon = loadBlissLexicon(options.lexicon) knownWords = set([ orth for orth, phon in lexicon ]) # 2. load model for fragmentizing unknown words if options.subliminal_lexicon: print('loading subliminal lexicon ...') subliminalLexicon = loadBlissLexicon(options.subliminal_lexicon) else: subliminalLexicon = None if options.subliminal_g2p: print('loading subliminal g2p model ...') subliminalG2p = pickle.load(open(options.subliminal_g2p)) else: subliminalG2p = None if options.g2pModel: print('loading g2p model ...') model = pickle.load(open(options.g2pModel)) oldSize, newSize = model.strip() print('stripped number of multigrams from %d to %d' % (oldSize, newSize)) fragmentizer = Fragmentizer(model) if subliminalLexicon: fragmentizer.addSupervised(subliminalLexicon) if subliminalG2p: fragmentizer.addSupervised(subliminalG2p) graphones = model.sequitur.symbols() graphones.remove(model.sequitur.symbol(model.sequitur.term)) else: model = fragmentizer = graphones = None # 3. add fragments to lexicon if options.write_lexicon: print('creating extended lexicon ...') xmlLexicon = ElementTree(file = options.lexicon) if options.model_type == 'phonemes': changeSyntaticToPhonetic(xmlLexicon) else: addGraphonesToLexicon(xmlLexicon, graphones) xmlLexicon.write(gOpenOut(options.write_lexicon), defaultEncoding) # 4. determine set of LM tokens vocabulary = mGramCounts.ClosedVocablary() vocabulary.add(['<s>', '</s>']) if options.model_type == 'flat-hybrid': vocabulary.add(filter(isLmToken, knownWords), soft=True) if graphones: vocabulary.add(starmap(lmToken, graphones)) vocabulary.sort() if options.write_tokens: f = gOpenOut(options.write_tokens, defaultEncoding) if options.model_type == 'phonemes': phonemes = set(p for orth, phon in lexicon for p in phon) phonemes.add('#1') if 'si' in phonemes: phonemes.remove('si') for p in sorted(phonemes): print(p, file=f) else: for w in vocabulary: if w is not None: print(w, file=f) # 5./6. set-up LM event generator if options.write_counts or options.write_events: order = options.order - 1 if options.model_type == 'flat-hybrid': events = HybridEventGenerator(knownWords, fragmentizer, order) if options.range_type == 'fragments': events.setFragmentRange() elif options.range_type == 'words': events.setTrueWordRange() else: assert ValueError(options.range_type) elif options.model_type == 'fragments': events = OovEventGenerator(knownWords, fragmentizer, order) elif options.model_type == 'phonemes': events = PhonemeEventGenerator(lexicon, order) # 5. create modified LM training corpus counts if options.write_events: print('creating sequence model events ...') f = gOpenOut(options.write_events, defaultEncoding) for event, count in events(gOpenIn(options.text, defaultEncoding)): print(repr(event), '\t', count, file=f) # 6. count LM events if options.write_counts: print('creating sequence model counts ...') counts = mGramCounts.SimpleMultifileStorage() counts.addIter(events(gOpenIn(options.text, defaultEncoding))) mGramCounts.TextStorage.write(gOpenOut(options.write_counts, defaultEncoding), counts) # 7. dump list of OOV words and their corresponding fragmentation if options.write_fragments: print('dumping fragments ...') f = gOpenOut(options.write_fragments, defaultEncoding) events = OovFragmentGenerator(knownWords, fragmentizer) fragments = events(gOpenIn(options.text, defaultEncoding)) for event in list(fragments.keys()): print(event, '\t', ' '.join(fragments[event]), file=f) # 8. dump modified LM training text if options.write_lm_text: print('dumping modified LM training text ...') f = gOpenOut(options.write_lm_text, defaultEncoding) events = OovFragmentGenerator(knownWords, fragmentizer) for line in gOpenIn(options.text, defaultEncoding): words = line.split() modWords = events.modifyLmText(words) print(" ".join(modWords), file=f)
def _parse(self, entry): try: etree = ElementTree(file=StringIO(entry.encode('utf-8'))) except: raise AtomStoreExp("Not a well-formed Atom Entry.") return etree
# -*- coding: utf-8 -*- # This is just an illustration... from elementtree.ElementTree import ElementTree mydoc = ElementTree(file='tst.xml') for e in mydoc.findall('/foo/bar'): print(e.get('title').text)
<author> <name>John Doe</name> </author> <id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id> <entry> <title type="xhtml">Atom-Powered <br/> Robots Run Amok</title> <link href="http://example.org/2003/12/13/atom03"/> <id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id> <updated>2003-12-13T18:30:02Z</updated> <summary>Some text.</summary> </entry> </feed>""" etree = ElementTree(file=StringIO.StringIO(content)) feed = XML(content) print etree print feed #print len(feed) #print feed[0] #print feed.keys() ATOM = "http://www.w3.org/2005/Atom" entry = etree.getiterator('{%s}entry'%ATOM)[0] new_lin = SubElement(entry, '{%s}link'%ATOM) new_lin.set('rel', 'source') new_lin.set('href', 'http://somthing.org')
def process(directory, option, file_out, use_file_out, xml_file, group, verbose, recurse, progress): if verbose: print "Inside process..." col = commands.getoutput("echo \"$COLUMNS\"") try: columns = int(col) except: columns = 60 pb = progress_bar.pb("Progress: ", "-", columns, sys.stderr) tree = ElementTree(file=xml_file) elem = tree.getroot() if verbose: print "Getting rpm_names" rpm_names = get_names_from_dir(directory, recurse, pb, progress) if verbose: print "Processing names" if option == comps_opt.ERASE: """ Handle the ERASE operations """ for subelem in elem: for subsub in subelem: p = 0.0 for subsubsub in subsub: p = p + 1.0 if progress: percentage = p / len(subsub) pb.progress(percentage) if subsubsub.tag == 'packagereq' and subsubsub.text in rpm_names: subsub.remove(subsubsub) if verbose: print "Found %s, removing" % subsubsub.text elif option == comps_opt.ADD: """ Handle the ADD operations """ text = "<group>\n" text += "<id>%s</id>\n" % group text += "<name>%s</name>\n" % group text += "<packagelist>\n" p = 0.0 for name in rpm_names: p = p + 1.0 if progress: percentage = p / len(rpm_names) pb.progress(percentage) text += "<packagereq type=\"mandatory\">%s</packagereq>\n" % name text += "</packagelist>\n" text += "</group>\n" node = fromstring(text) elem.append(node) else: die("Some unknown error has occured. Neither 'ADD' nor 'ERASE' was specified, somehow") if progress: pb.clear() if verbose: print "Ending, outputing XML" if use_file_out: ElementTree(tree).write(file_out) else: dump(tree)
if type == "float": type = "double" if type == "": type = "int" f.write("\t%s %s;\t// FlightGear property: %s\n" % (type, chunk["name"], chunk["node"])) f.write("\n\tunsigned int magic;\n};\n") if __name__ == "__main__": if len(sys.argv) != 3: print "Usage: fgXMLtoHeader.py FG_OpenGC.xml output.h" print "Copy output.h over ../Source/DataSources/FlightGear_Protocol.h" sys.exit(1) # Open the file, check it has the "PropertyList/generic/output" root = ElementTree(file=sys.argv[1]).getroot() if root == None or root.tag != "PropertyList": print "File is not a property list" sys.exit(1) # Get the output section output_sec = root.find("generic/output") if output_sec == None: print "File has no generic/output section" sys.exit(1) # Parse the file and write the result to a file parse_tree = parse_file(output_sec, output_sec) write_output(sys.argv[2], parse_tree)
def main(): pp = pprint.PrettyPrinter(indent=4) try: lastMod = int(os.path.getmtime(basePath + "xmlviruses.xml")) except: lastMod = 0 curTime = int(calendar.timegm(time.gmtime())) #print("Last modified: " + str(lastMod)) #print("Current time: " + str(curTime)) #print("Age: " + str(int((curTime - lastMod)/60)) + " minutes") if (lastMod + delay) < curTime: age = int((curTime - lastMod)) age_d = age / DAY age = age - (age_d * DAY) age_h = age / HOUR age = age - (age_h * HOUR) age_m = age / MINUTE age = age - (age_m * MINUTE) print("It has been " + str(age_d) + " days, " + str(age_h) + " hours, " + str(age_m) + " minutes and " + str(age) + " seconds since last update") #print("Been at least 30 minutes since last checked") urllib.urlretrieve("http://support.clean-mx.de/clean-mx/xmlviruses.php?response=alive", basePath + "xmlviruses.xml") #with open(basePath + "xmlviruses.xml", "r+") as f: # newF = [] # for line in f.readline(): # line = re.sub('\]\]\>\<\/url\>\]\]\>\<\/url\>', '\]\]\>\<\/url\>', line) # line = re.sub('\<\/url\>\/\]\]\>\<\/url\>', '\<\/url\>', line) # newF.append(line) # newLines = ''.join(newF) # f.seek(0) # f.write(newLines) else: print("Not updating virus list as it is less then 30 minutes old") # sed -e s:']]></url>]]></url>':']]></url>': -e s:'</url>/]]></url>':'</url>': -i xmlviruses.xml # s:'</url>]].*':'</url>': cmd = [ 'sed', '-i', '-e', 's:\'</url>.*\':\'</url>\':g', basePath + "xmlviruses.xml" ] pp.pprint(cmd) print("Running command: " + ' '.join(cmd)) subprocess.check_call(cmd) cmd = [ 'xmllint', '-noout', basePath + "xmlviruses.xml" ] pp.pprint(cmd) print("Running command: " + ' '.join(cmd)) subprocess.check_call(cmd) tree = ElementTree(file=basePath + "xmlviruses.xml") entryList = tree.findall("entries/entry") for entry in entryList: #print url.text urlString = entry[9].text md5String = entry[4].text #print "urlString: " + urlString #print "md5String: " + md5String re.IGNORECASE #result = re.match("^.*\.[Ee][Xx][Ee]$", urlString) result = re.match(".*", urlString) if result: try: filename = malwarePath + md5String generated_filename = False except: print("Filename generation error") filename = malwarePath + base64.urlsafe_b64encode(os.urandom(30)) generated_filename = True files = glob(filename + "*") #if len(files) == 0 and os.path.isfile(filename) == False: if len(files) == 0: print("Downloading " + urlString + " as " + filename) try: urllib.urlretrieve(urlString, filename) if generated_filename == True: md5String = hashlib.md5(open(filename, 'rb').read()).hexdigest() newFilename = malwarePath + md5String print("Renaming " + filename + " to " + newFilename) os.rename(filename, newFilename) filename = newFilename # Add the correct file extension extension=magicFileExtension.getExt(filename) if( len(extension) > 0): print("Renaming " + filename + " to " + filename + extension) os.rename(filename, filename + extension) except Exception as e: print("Error while downloading " + urlString + " %s" % e) #else: #print ("Not downloading " + urlString + " - already exists") else: print ("Not downloading " + urlString + " - not a exe file") print ("Finished downloading all available samples")
class TCXFile(ActivityFile): filetypes = ["tcx"] # TCX_NS="http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2" TCX_NS = "{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}" xmlns = "{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}" xmlactextns = "{http://www.garmin.com/xmlschemas/ActivityExtension/v2}" xml_instance = "{http://www.w3.org/2001/XMLSchema-instance}" def __init__(self, track, request=None): ActivityFile.__init__(self, track, request) track.trackfile.open() self.xmltree = ElementTree(file=track.trackfile) track.trackfile.close() # logging.debug("Trackfile %r closed" % tcxfile.trackfile) self.parse_trackpoints() def parse_file(self): self.laps = [] self.position_start = None self.date = None self.time_start = None self.time_end = None self.track.trackfile.open() xmltree = ElementTree(file=self.track.trackfile) self.track.trackfile.close() # take only first activity from file xmlactivity = xmltree.find(self.TCX_NS + "Activities")[0] lap_date = None for xmllap in xmlactivity.findall(self.TCX_NS + "Lap"): lap_date = dateutil.parser.parse(xmllap.get("StartTime")) if self.date is None: self.date = lap_date time = int(float(xmllap.find(self.TCX_NS + "TotalTimeSeconds").text)) if xmllap.find(self.TCX_NS + "DistanceMeters") is None: logging.debug("DistanceMeters not present in Lap data") distance = None else: distance = str(float(xmllap.find(self.TCX_NS + "DistanceMeters").text) / 1000) if xmllap.find(self.TCX_NS + "MaximumSpeed") is None: logging.debug("MaximumSpeed is None") speed_max = None else: logging.debug("MaximumSpeed xml is %r" % xmllap.find(self.TCX_NS + "MaximumSpeed")) speed_max = str(float(xmllap.find(self.TCX_NS + "MaximumSpeed").text) * 3.6) # Given as meters per second in tcx file logging.debug("speed_max is %s" % speed_max) if xmllap.find(self.TCX_NS + "Calories") is not None: calories = int(xmllap.find(self.TCX_NS + "Calories").text) else: calories = None try: hf_avg = int(xmllap.find(self.TCX_NS + "AverageHeartRateBpm").find(self.TCX_NS + "Value").text) logging.debug("Found hf_avg: %s" % hf_avg) except AttributeError: hf_avg = None logging.debug("Not found hf_avg") try: hf_max = int(xmllap.find(self.TCX_NS + "MaximumHeartRateBpm").find(self.TCX_NS + "Value").text) logging.debug("Found hf_max: %s" % hf_max) except AttributeError: hf_max = None logging.debug("Not found hf_max") try: cadence_avg = int(xmllap.find(self.TCX_NS + "Cadence").text) logging.debug("Found average cadence: %s" % cadence_avg) except AttributeError: cadence_avg = None logging.debug("Not found average cadence") if time != 0 and distance is not None: speed_avg = str(float(distance) * 3600 / time) else: speed_avg = None cadence_max = None elev_min = None elev_max = None elev_gain = None elev_loss = None last_elev = None for xmltrack in xmllap.findall(self.TCX_NS + "Track"): for xmltp in xmltrack.findall(self.TCX_NS + "Trackpoint"): if not self.position_start: xmlpos = xmltp.find(self.TCX_NS + "Position") if xmlpos is not None: if xmlpos.find(self.TCX_NS + "LatitudeDegrees") is not None and xmlpos.find(self.TCX_NS + "LongitudeDegrees") is not None: lat = float(xmlpos.find(self.TCX_NS + "LatitudeDegrees").text) lon = float(xmlpos.find(self.TCX_NS + "LongitudeDegrees").text) self.position_start = (lat, lon) if not self.time_start and xmltp.find(self.TCX_NS + "Time") is not None: self.time_start = dateutil.parser.parse(xmltp.find(self.TCX_NS + "Time").text) if xmltp.find(self.TCX_NS + "AltitudeMeters") is not None: elev = int(round(float(xmltp.find(self.TCX_NS + "AltitudeMeters").text))) else: elev = last_elev if elev != last_elev: if elev_max is not None: if elev > elev_max: elev_max = elev else: elev_max = elev if elev_min is not None: if elev < elev_min: elev_min = elev else: elev_min = elev if last_elev: if elev > last_elev: if elev_gain is None: elev_gain = elev - last_elev else: elev_gain += elev - last_elev else: if elev_loss is None: elev_loss = last_elev - elev else: elev_loss += last_elev - elev last_elev = elev if xmltp.find(self.TCX_NS + "Cadence") is not None: cadence = int(xmltp.find(self.TCX_NS + "Cadence").text) if cadence > cadence_max: cadence_max = cadence # Get timestamp from last trackpoint in this track xmltp = xmltrack.findall(self.TCX_NS + "Trackpoint")[-1] if xmltp.find(self.TCX_NS + "Time") is not None: self.time_end = dateutil.parser.parse(xmltp.find(self.TCX_NS + "Time").text) lap = Lap( date=lap_date, time=time, distance=distance, elevation_gain=elev_gain, elevation_loss=elev_loss, elevation_min=elev_min, elevation_max=elev_max, speed_max=speed_max, speed_avg=speed_avg, cadence_avg=cadence_avg, cadence_max=cadence_max, calories=calories, hf_max=hf_max, hf_avg=hf_avg) self.laps.append(lap) def parse_trackpoints(self): # take only first activity from file xmlactivity = self.xmltree.find(self.xmlns + "Activities")[0] alt_data = [] cad_data = [] hf_data = [] pos_data = [] speed_gps_data = [] speed_foot_data = [] # logging.debug("Parsing TCX Track file in first activity") first_lap = xmlactivity.find(self.xmlns + "Lap") start_time = dateutil.parser.parse(first_lap.get("StartTime")) offset_time = 0 # used to remove track sequences from plot where no movement has occured last_lap_distance = 0 # used to add distance to laps starting with distance 0 last_distance = None last_lat_lon = None for xmllap in xmlactivity.findall(self.xmlns + "Lap"): distance_offset = 0 # check if lap starts with distance 0 if len(xmllap.findall(self.xmlns + "Track/" + self.xmlns + "Trackpoint")) == 0: continue # skip empty laps xmltp = xmllap.findall(self.xmlns + "Track/" + self.xmlns + "Trackpoint")[0] if hasattr(xmltp.find(self.xmlns + "DistanceMeters"), "text"): distance = float(xmltp.find(self.xmlns + "DistanceMeters").text) if distance < last_lap_distance: distance_offset = last_lap_distance for xmltp in xmllap.findall(self.xmlns + "Track/" + self.xmlns + "Trackpoint"): distance = alt = cad = hf = trackpoint_time = None if hasattr(xmltp.find(self.xmlns + "DistanceMeters"), "text"): distance = float(xmltp.find(self.xmlns + "DistanceMeters").text) distance = distance + distance_offset elif xmltp.find(self.xmlns + "Position"): xmltp_pos = xmltp.find(self.xmlns + "Position") lat = float(xmltp_pos.find(self.xmlns + "LatitudeDegrees").text) lon = float(xmltp_pos.find(self.xmlns + "LongitudeDegrees").text) if last_lat_lon is None: last_lat_lon = (lat, lon) last_distance = 0 continue else: distance = last_distance + activities.utils.latlon_distance(last_lat_lon, (lat, lon)) last_lat_lon = (lat, lon) else: continue if not hasattr(xmltp.find(self.xmlns + "Time"), "text"): continue delta = dateutil.parser.parse(xmltp.find(self.xmlns + "Time").text) - start_time trackpoint_time = ((delta.seconds + 86400 * delta.days) - offset_time) * 1000 # Find sections with speed < 0.5m/s (no real movement, remove duration of this section from timeline) if last_distance: delta_dist = distance - last_distance delta_time = (trackpoint_time - self.track_by_distance[last_distance]["trackpoint_time"]) / 1000 if delta_time > 0 and (delta_dist / delta_time) < 0.5: offset_time += delta_time trackpoint_time = ((delta.seconds + 86400 * delta.days) - offset_time) * 1000 last_distance = distance if distance not in self.track_by_distance: self.track_by_distance[distance] = {} self.track_by_distance[distance]["trackpoint_time"] = trackpoint_time # Get altitude if hasattr(xmltp.find(self.xmlns + "AltitudeMeters"), "text"): alt = float(xmltp.find(self.xmlns + "AltitudeMeters").text) self.track_by_distance[distance]["alt"] = alt # alt_data.append((trackpoint_time,alt)) alt_data.append((distance, trackpoint_time, alt)) # Get Cadence data (from Bike cadence sensor) if hasattr(xmltp.find(self.xmlns + "Cadence"), "text"): cad = int(xmltp.find(self.xmlns + "Cadence").text) self.track_by_distance[distance]["cad"] = cad # cad_data.append((trackpoint_time,cad)) cad_data.append((distance, trackpoint_time, cad)) # Locate heart rate in beats per minute hrt = xmltp.find(self.xmlns + "HeartRateBpm") if hrt is not None: if hasattr(xmltp.find(self.xmlns + "HeartRateBpm/" + self.xmlns + "Value"), "text"): hf = int(xmltp.find(self.xmlns + "HeartRateBpm/" + self.xmlns + "Value").text) self.track_by_distance[distance]["hf"] = hf # hf_data.append((trackpoint_time,hf)) hf_data.append((distance, trackpoint_time, hf)) # Locate time stamps for speed calculation based on GPS if hasattr(xmltp.find(self.xmlns + "Time"), "text"): track_time = dateutil.parser.parse(xmltp.find(self.xmlns + "Time").text) self.track_by_distance[distance]["gps"] = track_time speed_gps_data.append((distance, track_time)) # Get position coordinates pos = xmltp.find(self.xmlns + "Position") if pos is not None: if hasattr(pos.find(self.xmlns + "LatitudeDegrees"), "text") and hasattr(pos.find(self.xmlns + "LongitudeDegrees"), "text"): lat = float(pos.find(self.xmlns + "LatitudeDegrees").text) lon = float(pos.find(self.xmlns + "LongitudeDegrees").text) pos_data.append((lat, lon)) # Search for Garmin Trackpoint Extensions TPX, carrying RunCadence data from Footpods ext = xmltp.find(self.xmlns + "Extensions") # logging.debug("Found Activity Extensions") if ext is not None: xmltpx = ext.find(self.xmlactextns + "TPX") # currenlty supported Footpod sensor if xmltpx is not None and xmltpx.get("CadenceSensor") == "Footpod": if hasattr(xmltpx.find(self.xmlactextns + "Speed"), "text"): speed = float(xmltpx.find(self.xmlactextns + "Speed").text) self.track_by_distance[distance]["speed_footpod"] = speed speed_foot_data.append((distance, trackpoint_time, speed)) if hasattr(xmltpx.find(self.xmlactextns + "RunCadence"), "text"): # Only copy cadence data if no other Cadence data (from bike) is present if cad is None: cad = int(xmltpx.find(self.xmlactextns + "RunCadence").text) self.track_by_distance[distance]["cad"] = cad cad_data.append((distance, trackpoint_time, cad)) # TODO: Watts sensors ??? last_lap_distance = distance # logging.debug("Found a total time of %s seconds without movement (speed < 0.5m/s)" % offset_time) self.track_data["alt"] = alt_data self.track_data["cad"] = cad_data self.track_data["hf"] = hf_data self.track_data["pos"] = pos_data self.track_data["speed_gps"] = speed_gps_data self.track_data["speed_foot"] = speed_foot_data
"Titles" : (TitlesToXML, XMLToTitles), "Fonts" : (ListToXML, XMLToList), #"Rectangles" : (ListToXML, XMLToList), #"" : XMLToMenuItems, #"" : XMLToMenuItems, } if __name__ == "__main__": import sys from elementtree import ElementTree parsed = ElementTree.parse(sys.argv[1]) props = {} print dir(parsed) for ctrl in parsed.findall("CONTROL"): print ControlFromXML(ctrl) sys.exit() props['ClientRect'] = ParseRect(ctrl.find("CLIENTRECT")) props['Rectangle'] = ParseRect(ctrl.find("RECTANGLE")) props['Font'] = ParseLogFont(ctrl.find("FONT")) props['Titles'] = ParseTitles(ctrl.find("TITLES"))
from elementtree.ElementTree import Element, ElementTree, SubElement _src_f = open("pycon_src.xml") _content = _src_f.read() _src_f.close() _content = _content.replace("<description>", "<description>\n<![CDATA[\n") _content = _content.replace("</description>", "\n\n\n]]>\n</description>") _content = _content.replace("<", "<") _content = _content.replace(">", ">") _tmp_f = open("pycon_src_tmp.xml", "wb") _tmp_f.write(_content) _tmp_f.close() _src_xml = ElementTree(file="pycon_src_tmp.xml") _root = Element("xml") _conference = SubElement(_root, "conference") # add pyconfr main values SubElement(_conference, "title").text = "PyCONFR 2010" SubElement(_conference, "subtitle").text = \ "Rendez-vous annuel des utilisateurs de Python organisee par l'Association Francophone Python" SubElement(_conference, "venue").text = "Cyberbase de la Cite des Sciences" SubElement(_conference, "city").text = "Paris" SubElement(_conference, "start").text = "2010-10-28" SubElement(_conference, "end").text = "2010-10-29" SubElement(_conference, "days").text = "2" SubElement(_conference, "day_change").text = "08:00"
if len(sys.argv) < 2: print "USAGE: ./transform_objects PATH/TO/PROJECT/DIR" exit(0) persistence_path = os.path.join(sys.argv[1], "persistence") cat_path = os.path.join(persistence_path, "Category") comp_path = os.path.join(persistence_path, "Component") prop_path = os.path.join(persistence_path, "Property") if not os.path.exists(persistence_path) or \ not os.path.exists(cat_path) or \ not os.path.exists(comp_path) or \ not os.path.exists(prop_path): print "INVALID PROJECT DIRECTORY" exit(0) et = ElementTree() #transform categories print "Transforming Categories..." for cat_fname in os.listdir(cat_path): fpath = os.path.join(cat_path, cat_fname) et.parse(fpath) version = et.getroot().get("version") if not version: print "\tTransforming %s..." % cat_fname root = Element( "category", { "version": "1.1", "name": et.find("name").text.strip(), "description": et.find("description").text.strip() })
def WriteElementToFile(element, fileName): # wrap it in an ElementTree instance, and save as XML tree = ElementTree(element) tree.write(fileName, encoding="utf-8")
def PackageToXml(pkg, summary="N/A", trainingDataId='N/A', dataPerformance=[], recommendedThreshold=None, classDescriptions=[], modelType=None, modelOrganism=None): """ generates XML for a package that follows the RD_Model.dtd If provided, dataPerformance should be a sequence of 2-tuples: ( note, performance ) where performance is of the form: ( accuracy, avgCorrectConf, avgIncorrectConf, confusionMatrix, thresh, avgSkipConf, nSkipped ) the last four elements are optional """ head = Element("RDModelInfo") name = SubElement(head, "ModelName") notes = pkg.GetNotes() if not notes: notes = "Unnamed model" name.text = notes summ = SubElement(head, "ModelSummary") summ.text = summary calc = pkg.GetCalculator() descrs = SubElement(head, "ModelDescriptors") for name, summary, func in zip(calc.GetDescriptorNames(), calc.GetDescriptorSummaries(), calc.GetDescriptorFuncs()): descr = SubElement(descrs, "Descriptor") elem = SubElement(descr, "DescriptorName") elem.text = name elem = SubElement(descr, "DescriptorDetail") elem.text = summary if hasattr(func, 'version'): vers = SubElement(descr, "DescriptorVersion") major, minor, patch = func.version.split('.') elem = SubElement(vers, "VersionMajor") elem.text = major elem = SubElement(vers, "VersionMinor") elem.text = minor elem = SubElement(vers, "VersionPatch") elem.text = patch elem = SubElement(head, "TrainingDataId") elem.text = trainingDataId for description, perfData in dataPerformance: dataNode = SubElement(head, "ValidationData") note = SubElement(dataNode, 'ScreenNote') note.text = description perf = SubElement(dataNode, "PerformanceData") _ConvertModelPerformance(perf, perfData) if recommendedThreshold: elem = SubElement(head, "RecommendedThreshold") elem.text = str(recommendedThreshold) if classDescriptions: elem = SubElement(head, "ClassDescriptions") for val, text in classDescriptions: descr = SubElement(elem, 'ClassDescription') valElem = SubElement(descr, 'ClassVal') valElem.text = str(val) valText = SubElement(descr, 'ClassText') valText.text = str(text) if modelType: elem = SubElement(head, "ModelType") elem.text = modelType if modelOrganism: elem = SubElement(head, "ModelOrganism") elem.text = modelOrganism hist = SubElement(head, "ModelHistory") revision = SubElement(hist, "Revision") tm = time.localtime() date = SubElement(revision, "RevisionDate") elem = SubElement(date, "Year") elem.text = str(tm[0]) elem = SubElement(date, "Month") elem.text = str(tm[1]) elem = SubElement(date, "Day") elem.text = str(tm[2]) note = SubElement(revision, "RevisionNote") note.text = "Created" return ElementTree(head)
def xmlToDict(fPath, stopPath=None, splitTxt= False, get_pubmed = False): ''' Converts study data from (ref man generated) XML to a dictionary matching study IDs (keys) to title/abstract tuples (values). For example: dict[n] might map to a tuple [t_n, a_n] where t_n is the title of the nth paper and a_n is the abstract ''' refIDToAbs = {} numNoPubmeds = 0 numNoAbs = 0 # Keep track of how many studies have no abstracts. tree = ElementTree(file=fPath) for record in tree.findall('.//record'): pubmed_id = None refmanid = eval(record.findall('.//rec-number')[0].text) try: pubmed = record.findall('.//notes/style')[0].text pubmed = pubmed.split("-") for i in range(len(pubmed)): if "UI" in pubmed[i]: pubmed_str = pubmed[i+1].strip() pubmed_id = eval("".join([x for x in pubmed_str if x in string.digits])) #pubmed_id = eval(pubmed[i+1].replace("PT", "").replace("IN", "")) #print pubmed break except Exception, ex: print ex if pubmed_id is None: #if not "Cochrane" in pubmed[2]: # pdb.set_trace() numNoPubmeds+=1 print "%s has no pubmed id" % refmanid abstract = record.findall('.//abstract/style') abText = "" try: if abstract and splitTxt: abText = (abstract[0].text).split(" ") abText = [string.lower(s) for s in abText] abText = cleanUpTxt(abText, stopListPath=stopPath) elif abstract: abText = abstract[0].text else: numNoAbs += 1 except: pdb.set_trace() title = "" if splitTxt: title = cleanUpTxt(string.lower(record.findall('.//titles/title/style')[0].text).split(" "), stopListPath=stopPath) else: try: title = record.findall('.//titles/title/style')[0].text except: pdb.set_trace() # Also grab keywords keywords = [keyword.text.strip().lower() for keyword in record.findall(".//keywords/keyword/style")] if pubmed_id is not None or True: refIDToAbs[refmanid] = [title, abText, keywords, pubmed_id]
import os import sys import xml import re os.chdir('votes') cwdfiles = os.listdir(os.getcwd()) votesfiles = filter(lambda s: re.match('votes', s), cwdfiles) topelement = Element('top') i = 1 for vf in votesfiles: print vf try: votetree = ElementTree(file=vf) voteroot = votetree.getroot() date = voteroot.get('date') m = re.match('(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})', date) if not m: print "internal error in date format" sys.exit() mgd = m.groupdict() mgd.update({'date': date}) acts = votetree.findall('//royal_assent/act') if len(acts) > 0: assent = Element('assent', mgd) for j in range(len(acts)): assent.insert(j, acts[j]) topelement.insert(i, assent) i = i + 1
def genPerlStdCIX(filename, stream): log.debug("genPerlStdCIX(filename=%r, stream=%r)", filename, stream) root = Element("codeintel", version="2.0") cixfile = SubElement(root, "file", lang="Perl", mtime=str(int(time.time())), path=os.path.basename(filename)) # Process Perl's built-ins out of perlfunc.pod. if 1: p4path = "//depot/main/Apps/Gecko/src/Core/pod/perlfunc.pod" cmd = "p4 print -q %s" % p4path i, o, e = os.popen3(cmd) lines = o.read().splitlines(0) i.close() o.close() retval = e.close() if retval: raise Error("error running: %s" % cmd) else: lines = open("perlfunc.pod", 'r').read().splitlines(0) # Parse the "Alphabetical Listing of Perl Functions" into a list of # 'blocks' where each block is one command-"=item" block. start = lines.index("=head2 Alphabetical Listing of Perl Functions") blocks = [] block = None level = 0 def parseItem(line): sig = line.split(None, 1)[1] name = re.split("[ \t\n(/]", sig, 1)[0] return name, sig for i, line in enumerate(lines[start:]): if line.startswith("=over"): level += 1 if line.startswith("=back"): level -= 1 if level == 0: # done the 'Alphabetical Listing' section if block: blocks.append(block) break if level > 1: if block: block["lines"].append(line) elif block is None and not line.startswith("=item"): continue elif block is None and line.startswith("=item"): block = {} name, sig = parseItem(line) block = {"name": name, "sigs": [sig], "lines": []} elif line.startswith("=item"): name, sig = parseItem(line) if name == block["name"]: block["sigs"].append(sig) else: blocks.append(block) block = {"name": name, "sigs": [sig], "lines": []} else: if not block["lines"] and not line.strip(): pass # drop leading empty lines elif not line.strip() and block["lines"] and \ not block["lines"][-1].strip(): pass # collapse multiple blank lines else: block["lines"].append(line) # pprint(blocks) # Process the blocks into a list of command info dicts. def podrender(pod): rendered = pod rendered = re.sub("F<(.*?)>", r"\1", rendered) rendered = re.sub("I<(.*?)>", r"*\1*", rendered) def quoteifspaced(match): if ' ' in match.group(1): return "'%s'" % match.group(1) else: return match.group(1) rendered = re.sub("C<(.*?)>", quoteifspaced, rendered) def linkrepl(match): content = match.group(1) if content.startswith("/"): content = content[1:] if "/" in content: page, section = content.split("/", 1) content = "%s in '%s'" % (section, page) else: content = "'%s'" % content return content rendered = re.sub("L<(.*?)>", linkrepl, rendered) return rendered # These perl built-ins are grouped in perlfunc.pod. commands = [] WIDTH = 60 # desc field width syscalls = """ getpwnam getgrnam gethostbyname getnetbyname getprotobyname getpwuid getgrgid getservbyname gethostbyaddr getnetbyaddr getprotobynumber getservbyport getpwent getgrent gethostent getnetent getprotoent getservent setpwent setgrent sethostent setnetent setprotoent setservent endpwent endgrent endhostent endnetent endprotoent endservent """.split() calltip_skips = "sub use require".split() for block in blocks: name, sigs, lines = block["name"], block["sigs"], block["lines"] if name == "-X": # template for -r, -w, -f, ... pattern = re.compile(r"^ (-\w)\t(.*)$") tlines = [line for line in lines if pattern.match(line)] for tline in tlines: tname, tdesc = pattern.match(tline).groups() tsigs = [s.replace("-X", tname) for s in sigs] command = { "name": tname, "sigs": tsigs, "desc": textwrap.fill(tdesc, WIDTH) } commands.append(command) elif name in ("m", "q", "qq", "qr", "qx", "qw", "s", "tr", "y"): operators = { "m": """\ m/PATTERN/cgimosx /PATTERN/cgimosx Searches a string for a pattern match, and in scalar context returns true if it succeeds, false if it fails. """, "q": """\ q/STRING/ 'STRING' A single-quoted, literal string. """, "qq": """\ qq/STRING/ "STRING" A double-quoted, interpolated string. """, "qr": """\ qr/STRING/imosx Quotes (and possibly compiles) STRING as a regular expression. """, "qx": """\ qx/STRING/ `STRING` A string which is (possibly) interpolated and then executed as a system command. """, "qw": """\ qw/STRING/ Evaluates to a list of the words extracted out of STRING, using embedded whitespace as the word delimiters. """, "s": """\ s/PATTERN/REPLACEMENT/egimosx Searches a string for a pattern, and if found, replaces that pattern with the replacement text and returns the number of substitutions made. Otherwise it returns the empty string. """, "tr": """\ tr/SEARCHLIST/REPLACEMENTLIST/cds y/SEARCHLIST/REPLACEMENTLIST/cds Transliterates all occurrences of the characters found in the search list with the corresponding character in the replacement list. It returns the number of characters replaced or deleted. """, "y": """\ tr/SEARCHLIST/REPLACEMENTLIST/cds y/SEARCHLIST/REPLACEMENTLIST/cds Transliterates all occurrences of the characters found in the search list with the corresponding character in the replacement list. It returns the number of characters replaced or deleted. """, } sigs = [] desclines = None for line in operators[name].splitlines(0): if desclines is not None: desclines.append(line.strip()) elif not line.strip(): desclines = [] else: sigs.append(line.strip()) command = { "name": name, "sigs": sigs, "desc": textwrap.fill(' '.join(desclines), WIDTH) } commands.append(command) elif name in syscalls: desc = "Performs the same function as the '%s' system call." % name desc = textwrap.fill(desc, WIDTH) getterListContext = { "getpw": "\n" " ($name,$passwd,$uid,$gid,$quota,$comment,\n" " $gcos,$dir,$shell,$expire) = %s", "getgr": "\n ($name,$passwd,$gid,$members) = %s", "gethost": "\n ($name,$aliases,$addrtype,$length,@addrs) = %s", "getnet": "\n ($name,$aliases,$addrtype,$net) = %s", "getproto": "\n ($name,$aliases,$proto) = %s", "getserv": "\n ($name,$aliases,$port,$proto) = %s", } getterScalarContext = { "getgrent": "$name = %s", "getgrgid": "$name = %s", "getgrnam": "$gid = %s", "gethostbyaddr": "$name = %s", "gethostbyname": "$addr = %s", "gethostent": "$name = %s", "getnetbyaddr": "$name = %s", "getnetbyname": "$net = %s", "getnetent": "$name = %s", "getprotobyname": "$num = %s", "getprotobynumber": "$name = %s", "getprotoent": "$name = %s", "getpwent": "$name = %s", "getpwnam": "$uid = %s", "getpwuid": "$name = %s", "getservbyname": "$num = %s", "getservbyport": "$name = %s", "getservent": "$name = %s", } for prefix, template in getterListContext.items(): if name.startswith(prefix): desc += template % sigs[0] if name in getterScalarContext: desc += "\nin list context or:\n "\ + getterScalarContext[name] % sigs[0] command = {"name": name, "desc": desc, "sigs": sigs} commands.append(command) elif name == "shmread": desc = """\ Reads the System V shared memory segment ID starting at position POS for size SIZE by attaching to it, copying out, and detaching from it. """ desc = ' '.join([ln.strip() for ln in desc.splitlines(0)]) command = { "name": name, "sigs": sigs, "desc": textwrap.fill(desc, WIDTH) } commands.append(command) elif name == "shmwrite": desc = """\ Writes the System V shared memory segment ID starting at position POS for size SIZE by attaching to it, copying in, and detaching from it. """ desc = ' '.join([ln.strip() for ln in desc.splitlines(0)]) command = { "name": name, "sigs": sigs, "desc": textwrap.fill(desc, WIDTH) } commands.append(command) elif name in calltip_skips: continue # just drop the sub calltip: annoying else: # Parsing the description from the full description: # Pull out the first sentence up to a maximum of three lines # and one paragraph. If the first *two* sentences fit on the # first line, then use both. desc = "" sentencePat = re.compile(r"([^\.]+(?:\. |\.$))") if name in ("dbmclose", "dbmopen"): # Skip the first paragraph: "[This function...superceded by" lines = lines[lines.index('') + 1:] elif name == "do": # Skip the first sentence: "Not really a function." end = sentencePat.match(lines[0]).span()[1] lines[0] = lines[0][end:].lstrip() for i, line in enumerate(lines): if not line.strip(): break sentences = sentencePat.findall(line) if not sentences: desc += line + ' ' continue elif i == 0 and len(sentences) > 1: desc += ' '.join([s.strip() for s in sentences[:2]]) else: desc += sentences[0].strip() break command = { "name": name, "sigs": sigs, "desc": textwrap.fill(podrender(desc), WIDTH) } commands.append(command) # for command in commands: # print # print banner(command["name"], '-') # print '\n'.join(command["sigs"]) # print # print command["desc"] # Generate the CIX for each function. module_elt = SubElement(cixfile, "scope", ilk="blob", name="*") # "built-ins" module for command in commands: name, sigs, desc = command["name"], command["sigs"], command["desc"] func_elt = SubElement(module_elt, "scope", ilk="function", name=name) if sigs: func_elt.set("signature", '\n'.join(sigs)) if desc: doclines = desc.split('\n')[:3] # doclines = parseDocSummary(doclines) doc = '\n'.join(doclines) func_elt.set("doc", doc) # Generate the CIX. prettify(root) tree = ElementTree(root) stream.write('<?xml version="1.0" encoding="UTF-8"?>\n') tree.write(stream)
def xml_to_dict(fpath): ''' Converts study data from (ref man generated) XML to a dictionary matching study IDs (keys) to title/abstract tuples (values). For example: dict[n] might map to a tuple [t_n, a_n] where t_n is the title of the nth paper and a_n is the abstract ''' ref_ids_to_abs = {} parsing_errors = [] num_no_abs = 0 tree = ElementTree(file=fpath) num_failed = 0 for record in tree.findall('.//record'): pubmed_id, refmanid = None, None refman_version = record.findtext('.//source-app') path_str = None ### here we check the RefMan version, and change # the xml path accordingly. this fixes issue #7 if refman_version == 'Reference Manager 12.0': path_str = './/rec-number/style' journal_path_str = './/periodical/full-title/style' elif refman_version == 'Reference Manager 11.0': path_str = './/rec-number' journal_path_str = './/periodical/abbr-1/style' try: refmanid = int(record.findtext(path_str)) except: error = "Unable to parse record '%s' in '%s'" % ( record, os.path.basename(fpath)) #print "failed to parse refman document" parsing_errors.append(error) if refmanid is not None: # attempt to grab the pubmed id pubmed_id = "" try: pubmed = record.findtext('.//notes/style') pubmed = pubmed.split("-") for i in range(len(pubmed)): if "UI" in pubmed[i]: pubmed_str = pubmed[i + 1].strip() pubmed_id = int("".join( [x for x in pubmed_str if x in string.digits])) except Exception, ex: error = "Problem getting pmid from '%s' in '%s'" % ( record, os.path.basename(fpath)) parsing_errors.append(error) #print "problem getting pmid ..." #print ex #print("\n") ab_text = record.findtext('.//abstract/style') if ab_text is None: num_no_abs += 1 title_text = record.findtext('.//titles/title/style') # Also grab keywords keywords = [ keyword.text.strip().lower() for keyword in record.findall(".//keywords/keyword/style") ] # and authors authors = [ author.text for author in record.findall( ".//contributors/authors/author/style") ] # journal journal = record.findtext(journal_path_str) ref_ids_to_abs[refmanid] = {"title":title_text, "abstract":ab_text, "journal":journal,\ "keywords":keywords, "pmid":pubmed_id, "authors":authors}
import sys from elementtree.ElementTree import ElementTree mydoc = ElementTree(file=sys.argv[1]) for e in mydoc.findall(sys.argv[2]): print e.text
@return: ElementTree containing an XRDS document @raises XRDSError: When there is a parse error or the document does not contain an XRDS. """ try: parser = XMLTreeBuilder() parser.feed(text) element = parser.close() except XMLError, why: exc = XRDSError('Error parsing document as XML') exc.reason = why raise exc else: tree = ElementTree(element) if not isXRDS(tree): raise XRDSError('Not an XRDS document') return tree XRD_NS_2_0 = 'xri://$xrd*($v*2.0)' XRDS_NS = 'xri://$xrds' def nsTag(ns, t): return '{%s}%s' % (ns, t) def mkXRDTag(t):
def parse_file(self): self.laps = [] self.position_start = None self.date = None self.time_start = None self.time_end = None self.track.trackfile.open() xmltree = ElementTree(file=self.track.trackfile) self.track.trackfile.close() # take only first activity from file xmlactivity = xmltree.find(self.TCX_NS + "Activities")[0] lap_date = None for xmllap in xmlactivity.findall(self.TCX_NS + "Lap"): lap_date = dateutil.parser.parse(xmllap.get("StartTime")) if self.date is None: self.date = lap_date time = int(float(xmllap.find(self.TCX_NS + "TotalTimeSeconds").text)) if xmllap.find(self.TCX_NS + "DistanceMeters") is None: logging.debug("DistanceMeters not present in Lap data") distance = None else: distance = str(float(xmllap.find(self.TCX_NS + "DistanceMeters").text) / 1000) if xmllap.find(self.TCX_NS + "MaximumSpeed") is None: logging.debug("MaximumSpeed is None") speed_max = None else: logging.debug("MaximumSpeed xml is %r" % xmllap.find(self.TCX_NS + "MaximumSpeed")) speed_max = str(float(xmllap.find(self.TCX_NS + "MaximumSpeed").text) * 3.6) # Given as meters per second in tcx file logging.debug("speed_max is %s" % speed_max) if xmllap.find(self.TCX_NS + "Calories") is not None: calories = int(xmllap.find(self.TCX_NS + "Calories").text) else: calories = None try: hf_avg = int(xmllap.find(self.TCX_NS + "AverageHeartRateBpm").find(self.TCX_NS + "Value").text) logging.debug("Found hf_avg: %s" % hf_avg) except AttributeError: hf_avg = None logging.debug("Not found hf_avg") try: hf_max = int(xmllap.find(self.TCX_NS + "MaximumHeartRateBpm").find(self.TCX_NS + "Value").text) logging.debug("Found hf_max: %s" % hf_max) except AttributeError: hf_max = None logging.debug("Not found hf_max") try: cadence_avg = int(xmllap.find(self.TCX_NS + "Cadence").text) logging.debug("Found average cadence: %s" % cadence_avg) except AttributeError: cadence_avg = None logging.debug("Not found average cadence") if time != 0 and distance is not None: speed_avg = str(float(distance) * 3600 / time) else: speed_avg = None cadence_max = None elev_min = None elev_max = None elev_gain = None elev_loss = None last_elev = None for xmltrack in xmllap.findall(self.TCX_NS + "Track"): for xmltp in xmltrack.findall(self.TCX_NS + "Trackpoint"): if not self.position_start: xmlpos = xmltp.find(self.TCX_NS + "Position") if xmlpos is not None: if xmlpos.find(self.TCX_NS + "LatitudeDegrees") is not None and xmlpos.find(self.TCX_NS + "LongitudeDegrees") is not None: lat = float(xmlpos.find(self.TCX_NS + "LatitudeDegrees").text) lon = float(xmlpos.find(self.TCX_NS + "LongitudeDegrees").text) self.position_start = (lat, lon) if not self.time_start and xmltp.find(self.TCX_NS + "Time") is not None: self.time_start = dateutil.parser.parse(xmltp.find(self.TCX_NS + "Time").text) if xmltp.find(self.TCX_NS + "AltitudeMeters") is not None: elev = int(round(float(xmltp.find(self.TCX_NS + "AltitudeMeters").text))) else: elev = last_elev if elev != last_elev: if elev_max is not None: if elev > elev_max: elev_max = elev else: elev_max = elev if elev_min is not None: if elev < elev_min: elev_min = elev else: elev_min = elev if last_elev: if elev > last_elev: if elev_gain is None: elev_gain = elev - last_elev else: elev_gain += elev - last_elev else: if elev_loss is None: elev_loss = last_elev - elev else: elev_loss += last_elev - elev last_elev = elev if xmltp.find(self.TCX_NS + "Cadence") is not None: cadence = int(xmltp.find(self.TCX_NS + "Cadence").text) if cadence > cadence_max: cadence_max = cadence # Get timestamp from last trackpoint in this track xmltp = xmltrack.findall(self.TCX_NS + "Trackpoint")[-1] if xmltp.find(self.TCX_NS + "Time") is not None: self.time_end = dateutil.parser.parse(xmltp.find(self.TCX_NS + "Time").text) lap = Lap( date=lap_date, time=time, distance=distance, elevation_gain=elev_gain, elevation_loss=elev_loss, elevation_min=elev_min, elevation_max=elev_max, speed_max=speed_max, speed_avg=speed_avg, cadence_avg=cadence_avg, cadence_max=cadence_max, calories=calories, hf_max=hf_max, hf_avg=hf_avg) self.laps.append(lap)
#!/usr/bin/python import sys from elementtree.ElementTree import ElementTree COLOR_NONE = "\033[m" COLOR_GREEN = "\033[01;32m" COLOR_RED = "\033[01;31m" COLOR_YELLOW = "\033[01;33m" if len(sys.argv) < 2: print "Error: Params not well defined" xml_paths = sys.argv[1:] for xml_path in xml_paths: tree = ElementTree() try: tree.parse(xml_path) except: print COLOR_RED + "ERROR:[ " + COLOR_YELLOW + xml_path + COLOR_RED + " ] is not a well formed xml file!!!!" + COLOR_NONE continue print COLOR_GREEN + "Info:[ " + COLOR_YELLOW + xml_path + COLOR_GREEN + " ] The XML file is normal!" + COLOR_NONE
def write_xml_file(self): ElementTree(self.xmltree).write(os.getcwd() + r'\mod_' + self.filename) print os.getcwd() + r'\mod_' + self.filename